Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * lock.c
4 : : * POSTGRES primary lock mechanism
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/lock.c
12 : : *
13 : : * NOTES
14 : : * A lock table is a shared memory hash table. When
15 : : * a process tries to acquire a lock of a type that conflicts
16 : : * with existing locks, it is put to sleep using the routines
17 : : * in storage/lmgr/proc.c.
18 : : *
19 : : * For the most part, this code should be invoked via lmgr.c
20 : : * or another lock-management module, not directly.
21 : : *
22 : : * Interface:
23 : : *
24 : : * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : : * LockCheckConflicts(), GrantLock()
27 : : *
28 : : *-------------------------------------------------------------------------
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : :
35 : : #include "access/transam.h"
36 : : #include "access/twophase.h"
37 : : #include "access/twophase_rmgr.h"
38 : : #include "access/xlog.h"
39 : : #include "access/xlogutils.h"
40 : : #include "miscadmin.h"
41 : : #include "pg_trace.h"
42 : : #include "pgstat.h"
43 : : #include "storage/lmgr.h"
44 : : #include "storage/proc.h"
45 : : #include "storage/procarray.h"
46 : : #include "storage/shmem.h"
47 : : #include "storage/spin.h"
48 : : #include "storage/standby.h"
49 : : #include "storage/subsystems.h"
50 : : #include "utils/memutils.h"
51 : : #include "utils/ps_status.h"
52 : : #include "utils/resowner.h"
53 : :
54 : :
55 : : /* GUC variables */
56 : : int max_locks_per_xact; /* used to set the lock table size */
57 : : bool log_lock_failures = false;
58 : :
59 : : #define NLOCKENTS() \
60 : : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
61 : :
62 : :
63 : : /*
64 : : * Data structures defining the semantics of the standard lock methods.
65 : : *
66 : : * The conflict table defines the semantics of the various lock modes.
67 : : */
68 : : static const LOCKMASK LockConflicts[] = {
69 : : 0,
70 : :
71 : : /* AccessShareLock */
72 : : LOCKBIT_ON(AccessExclusiveLock),
73 : :
74 : : /* RowShareLock */
75 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
76 : :
77 : : /* RowExclusiveLock */
78 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
79 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
80 : :
81 : : /* ShareUpdateExclusiveLock */
82 : : LOCKBIT_ON(ShareUpdateExclusiveLock) |
83 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
84 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
85 : :
86 : : /* ShareLock */
87 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
88 : : LOCKBIT_ON(ShareRowExclusiveLock) |
89 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
90 : :
91 : : /* ShareRowExclusiveLock */
92 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
93 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
94 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
95 : :
96 : : /* ExclusiveLock */
97 : : LOCKBIT_ON(RowShareLock) |
98 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
99 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
100 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
101 : :
102 : : /* AccessExclusiveLock */
103 : : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
104 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
105 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
106 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
107 : :
108 : : };
109 : :
110 : : /* Names of lock modes, for debug printouts */
111 : : static const char *const lock_mode_names[] =
112 : : {
113 : : "INVALID",
114 : : "AccessShareLock",
115 : : "RowShareLock",
116 : : "RowExclusiveLock",
117 : : "ShareUpdateExclusiveLock",
118 : : "ShareLock",
119 : : "ShareRowExclusiveLock",
120 : : "ExclusiveLock",
121 : : "AccessExclusiveLock"
122 : : };
123 : :
124 : : #ifndef LOCK_DEBUG
125 : : static bool Dummy_trace = false;
126 : : #endif
127 : :
128 : : static const LockMethodData default_lockmethod = {
129 : : MaxLockMode,
130 : : LockConflicts,
131 : : lock_mode_names,
132 : : #ifdef LOCK_DEBUG
133 : : &Trace_locks
134 : : #else
135 : : &Dummy_trace
136 : : #endif
137 : : };
138 : :
139 : : static const LockMethodData user_lockmethod = {
140 : : MaxLockMode,
141 : : LockConflicts,
142 : : lock_mode_names,
143 : : #ifdef LOCK_DEBUG
144 : : &Trace_userlocks
145 : : #else
146 : : &Dummy_trace
147 : : #endif
148 : : };
149 : :
150 : : /*
151 : : * map from lock method id to the lock table data structures
152 : : */
153 : : static const LockMethod LockMethods[] = {
154 : : NULL,
155 : : &default_lockmethod,
156 : : &user_lockmethod
157 : : };
158 : :
159 : :
160 : : /* Record that's written to 2PC state file when a lock is persisted */
161 : : typedef struct TwoPhaseLockRecord
162 : : {
163 : : LOCKTAG locktag;
164 : : LOCKMODE lockmode;
165 : : } TwoPhaseLockRecord;
166 : :
167 : :
168 : : /*
169 : : * Count of the number of fast path lock slots we believe to be used. This
170 : : * might be higher than the real number if another backend has transferred
171 : : * our locks to the primary lock table, but it can never be lower than the
172 : : * real value, since only we can acquire locks on our own behalf.
173 : : *
174 : : * XXX Allocate a static array of the maximum size. We could use a pointer
175 : : * and then allocate just the right size to save a couple kB, but then we
176 : : * would have to initialize that, while for the static array that happens
177 : : * automatically. Doesn't seem worth the extra complexity.
178 : : */
179 : : static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX];
180 : :
181 : : /*
182 : : * Flag to indicate if the relation extension lock is held by this backend.
183 : : * This flag is used to ensure that while holding the relation extension lock
184 : : * we don't try to acquire a heavyweight lock on any other object. This
185 : : * restriction implies that the relation extension lock won't ever participate
186 : : * in the deadlock cycle because we can never wait for any other heavyweight
187 : : * lock after acquiring this lock.
188 : : *
189 : : * Such a restriction is okay for relation extension locks as unlike other
190 : : * heavyweight locks these are not held till the transaction end. These are
191 : : * taken for a short duration to extend a particular relation and then
192 : : * released.
193 : : */
194 : : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
195 : :
196 : : /*
197 : : * Number of fast-path locks per backend - size of the arrays in PGPROC.
198 : : * This is set only once during start, before initializing shared memory,
199 : : * and remains constant after that.
200 : : *
201 : : * We set the limit based on max_locks_per_transaction GUC, because that's
202 : : * the best information about expected number of locks per backend we have.
203 : : * See InitializeFastPathLocks() for details.
204 : : */
205 : : int FastPathLockGroupsPerBackend = 0;
206 : :
207 : : /*
208 : : * Macros to calculate the fast-path group and index for a relation.
209 : : *
210 : : * The formula is a simple hash function, designed to spread the OIDs a bit,
211 : : * so that even contiguous values end up in different groups. In most cases
212 : : * there will be gaps anyway, but the multiplication should help a bit.
213 : : *
214 : : * The selected constant (49157) is a prime not too close to 2^k, and it's
215 : : * small enough to not cause overflows (in 64-bit).
216 : : *
217 : : * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
218 : : * InitializeFastPathLocks().
219 : : */
220 : : #define FAST_PATH_REL_GROUP(rel) \
221 : : (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
222 : :
223 : : /*
224 : : * Given the group/slot indexes, calculate the slot index in the whole array
225 : : * of fast-path lock slots.
226 : : */
227 : : #define FAST_PATH_SLOT(group, index) \
228 : : (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
229 : : AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
230 : : ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
231 : :
232 : : /*
233 : : * Given a slot index (into the whole per-backend array), calculated using
234 : : * the FAST_PATH_SLOT macro, split it into group and index (in the group).
235 : : */
236 : : #define FAST_PATH_GROUP(index) \
237 : : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 : : ((index) / FP_LOCK_SLOTS_PER_GROUP))
239 : : #define FAST_PATH_INDEX(index) \
240 : : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
241 : : ((index) % FP_LOCK_SLOTS_PER_GROUP))
242 : :
243 : : /* Macros for manipulating proc->fpLockBits */
244 : : #define FAST_PATH_BITS_PER_SLOT 3
245 : : #define FAST_PATH_LOCKNUMBER_OFFSET 1
246 : : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
247 : : #define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
248 : : #define FAST_PATH_GET_BITS(proc, n) \
249 : : ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
250 : : #define FAST_PATH_BIT_POSITION(n, l) \
251 : : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
252 : : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
253 : : AssertMacro((n) < FastPathLockSlotsPerBackend()), \
254 : : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
255 : : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
256 : : FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
257 : : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
258 : : FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
259 : : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
260 : : (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
261 : :
262 : : /*
263 : : * The fast-path lock mechanism is concerned only with relation locks on
264 : : * unshared relations by backends bound to a database. The fast-path
265 : : * mechanism exists mostly to accelerate acquisition and release of locks
266 : : * that rarely conflict. Because ShareUpdateExclusiveLock is
267 : : * self-conflicting, it can't use the fast-path mechanism; but it also does
268 : : * not conflict with any of the locks that do, so we can ignore it completely.
269 : : */
270 : : #define EligibleForRelationFastPath(locktag, mode) \
271 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
272 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
273 : : (locktag)->locktag_field1 == MyDatabaseId && \
274 : : MyDatabaseId != InvalidOid && \
275 : : (mode) < ShareUpdateExclusiveLock)
276 : : #define ConflictsWithRelationFastPath(locktag, mode) \
277 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
278 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
279 : : (locktag)->locktag_field1 != InvalidOid && \
280 : : (mode) > ShareUpdateExclusiveLock)
281 : :
282 : : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
283 : : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
284 : : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
285 : : const LOCKTAG *locktag, uint32 hashcode);
286 : : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
287 : :
288 : : /*
289 : : * To make the fast-path lock mechanism work, we must have some way of
290 : : * preventing the use of the fast-path when a conflicting lock might be present.
291 : : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
292 : : * and maintain an integer count of the number of "strong" lockers
293 : : * in each partition. When any "strong" lockers are present (which is
294 : : * hopefully not very often), the fast-path mechanism can't be used, and we
295 : : * must fall back to the slower method of pushing matching locks directly
296 : : * into the main lock tables.
297 : : *
298 : : * The deadlock detector does not know anything about the fast path mechanism,
299 : : * so any locks that might be involved in a deadlock must be transferred from
300 : : * the fast-path queues to the main lock table.
301 : : */
302 : :
303 : : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
304 : : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
305 : : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
306 : : #define FastPathStrongLockHashPartition(hashcode) \
307 : : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
308 : :
309 : : typedef struct
310 : : {
311 : : slock_t mutex;
312 : : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
313 : : } FastPathStrongRelationLockData;
314 : :
315 : : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
316 : :
317 : : static void LockManagerShmemRequest(void *arg);
318 : : static void LockManagerShmemInit(void *arg);
319 : :
320 : : const ShmemCallbacks LockManagerShmemCallbacks = {
321 : : .request_fn = LockManagerShmemRequest,
322 : : .init_fn = LockManagerShmemInit,
323 : : };
324 : :
325 : :
326 : : /*
327 : : * Pointers to hash tables containing lock state
328 : : *
329 : : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
330 : : * shared memory; LockMethodLocalHash is local to each backend.
331 : : */
332 : : static HTAB *LockMethodLockHash;
333 : : static HTAB *LockMethodProcLockHash;
334 : : static HTAB *LockMethodLocalHash;
335 : :
336 : :
337 : : /* private state for error cleanup */
338 : : static LOCALLOCK *StrongLockInProgress;
339 : : static LOCALLOCK *awaitedLock;
340 : : static ResourceOwner awaitedOwner;
341 : :
342 : :
343 : : #ifdef LOCK_DEBUG
344 : :
345 : : /*------
346 : : * The following configuration options are available for lock debugging:
347 : : *
348 : : * TRACE_LOCKS -- give a bunch of output what's going on in this file
349 : : * TRACE_USERLOCKS -- same but for user locks
350 : : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
351 : : * (use to avoid output on system tables)
352 : : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
353 : : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
354 : : *
355 : : * Furthermore, but in storage/lmgr/lwlock.c:
356 : : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
357 : : *
358 : : * Define LOCK_DEBUG at compile time to get all these enabled.
359 : : * --------
360 : : */
361 : :
362 : : int Trace_lock_oidmin = FirstNormalObjectId;
363 : : bool Trace_locks = false;
364 : : bool Trace_userlocks = false;
365 : : int Trace_lock_table = 0;
366 : : bool Debug_deadlocks = false;
367 : :
368 : :
369 : : inline static bool
370 : : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
371 : : {
372 : : return
373 : : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
374 : : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
375 : : || (Trace_lock_table &&
376 : : (tag->locktag_field2 == Trace_lock_table));
377 : : }
378 : :
379 : :
380 : : inline static void
381 : : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
382 : : {
383 : : if (LOCK_DEBUG_ENABLED(&lock->tag))
384 : : elog(LOG,
385 : : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
386 : : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
387 : : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
388 : : where, lock,
389 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
390 : : lock->tag.locktag_field3, lock->tag.locktag_field4,
391 : : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
392 : : lock->grantMask,
393 : : lock->requested[1], lock->requested[2], lock->requested[3],
394 : : lock->requested[4], lock->requested[5], lock->requested[6],
395 : : lock->requested[7], lock->nRequested,
396 : : lock->granted[1], lock->granted[2], lock->granted[3],
397 : : lock->granted[4], lock->granted[5], lock->granted[6],
398 : : lock->granted[7], lock->nGranted,
399 : : dclist_count(&lock->waitProcs),
400 : : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
401 : : }
402 : :
403 : :
404 : : inline static void
405 : : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
406 : : {
407 : : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
408 : : elog(LOG,
409 : : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
410 : : where, proclockP, proclockP->tag.myLock,
411 : : PROCLOCK_LOCKMETHOD(*(proclockP)),
412 : : proclockP->tag.myProc, (int) proclockP->holdMask);
413 : : }
414 : : #else /* not LOCK_DEBUG */
415 : :
416 : : #define LOCK_PRINT(where, lock, type) ((void) 0)
417 : : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
418 : : #endif /* not LOCK_DEBUG */
419 : :
420 : :
421 : : static uint32 proclock_hash(const void *key, Size keysize);
422 : : static void RemoveLocalLock(LOCALLOCK *locallock);
423 : : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
424 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
425 : : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
426 : : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
427 : : static void FinishStrongLockAcquire(void);
428 : : static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
429 : : static void waitonlock_error_callback(void *arg);
430 : : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
431 : : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
432 : : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
433 : : PROCLOCK *proclock, LockMethod lockMethodTable);
434 : : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
435 : : LockMethod lockMethodTable, uint32 hashcode,
436 : : bool wakeupNeeded);
437 : : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
438 : : LOCKTAG *locktag, LOCKMODE lockmode,
439 : : bool decrement_strong_lock_count);
440 : : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
441 : : BlockedProcsData *data);
442 : :
443 : :
444 : : /*
445 : : * Register the lock manager's shmem data structures.
446 : : *
447 : : * In addition to this, each backend must also call InitLockManagerAccess() to
448 : : * create the locallock hash table.
449 : : */
450 : : static void
29 heikki.linnakangas@i 451 :GNC 1244 : LockManagerShmemRequest(void *arg)
452 : : {
453 : : int64 max_table_size;
454 : :
455 : : /*
456 : : * Compute sizes for lock hashtables.
457 : : */
7627 tgl@sss.pgh.pa.us 458 :CBC 1244 : max_table_size = NLOCKENTS();
459 : :
460 : : /*
461 : : * Hash table for LOCK structs. This stores per-locked-object
462 : : * information.
463 : : */
29 heikki.linnakangas@i 464 :GNC 1244 : ShmemRequestHash(.name = "LOCK hash",
465 : : .nelems = max_table_size,
466 : : .ptr = &LockMethodLockHash,
467 : : .hash_info.keysize = sizeof(LOCKTAG),
468 : : .hash_info.entrysize = sizeof(LOCK),
469 : : .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
470 : : .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
471 : : );
472 : :
473 : : /* Assume an average of 2 holders per lock */
7450 tgl@sss.pgh.pa.us 474 :CBC 1244 : max_table_size *= 2;
475 : :
29 heikki.linnakangas@i 476 :GNC 1244 : ShmemRequestHash(.name = "PROCLOCK hash",
477 : : .nelems = max_table_size,
478 : : .ptr = &LockMethodProcLockHash,
479 : : .hash_info.keysize = sizeof(PROCLOCKTAG),
480 : : .hash_info.entrysize = sizeof(PROCLOCK),
481 : : .hash_info.hash = proclock_hash,
482 : : .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
483 : : .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
484 : : );
485 : :
486 : 1244 : ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
487 : : .size = sizeof(FastPathStrongRelationLockData),
488 : : .ptr = (void **) (void *) &FastPathStrongRelationLocks,
489 : : );
490 : 1244 : }
491 : :
492 : : static void
493 : 1241 : LockManagerShmemInit(void *arg)
494 : : {
495 : 1241 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
614 heikki.linnakangas@i 496 :CBC 1241 : }
497 : :
498 : : /*
499 : : * Initialize the lock manager's backend-private data structures.
500 : : */
501 : : void
502 : 22969 : InitLockManagerAccess(void)
503 : : {
504 : : /*
505 : : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
506 : : * counts and resource owner information.
507 : : */
508 : : HASHCTL info;
509 : :
7921 tgl@sss.pgh.pa.us 510 : 22969 : info.keysize = sizeof(LOCALLOCKTAG);
511 : 22969 : info.entrysize = sizeof(LOCALLOCK);
512 : :
7452 513 : 22969 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
514 : : 16,
515 : : &info,
516 : : HASH_ELEM | HASH_BLOBS);
10892 scrappy@hub.org 517 : 22969 : }
518 : :
519 : :
520 : : /*
521 : : * Fetch the lock method table associated with a given lock
522 : : */
523 : : LockMethod
7452 tgl@sss.pgh.pa.us 524 : 113 : GetLocksMethodTable(const LOCK *lock)
525 : : {
526 : 113 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
527 : :
528 [ + - - + ]: 113 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
529 : 113 : return LockMethods[lockmethodid];
530 : : }
531 : :
532 : : /*
533 : : * Fetch the lock method table associated with a given locktag
534 : : */
535 : : LockMethod
3725 536 : 1261 : GetLockTagsMethodTable(const LOCKTAG *locktag)
537 : : {
538 : 1261 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
539 : :
540 [ + - - + ]: 1261 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
541 : 1261 : return LockMethods[lockmethodid];
542 : : }
543 : :
544 : :
545 : : /*
546 : : * Compute the hash code associated with a LOCKTAG.
547 : : *
548 : : * To avoid unnecessary recomputations of the hash code, we try to do this
549 : : * just once per function, and then pass it around as needed. Aside from
550 : : * passing the hashcode to hash_search_with_hash_value(), we can extract
551 : : * the lock partition number from the hashcode.
552 : : */
553 : : uint32
7226 554 : 23021331 : LockTagHashCode(const LOCKTAG *locktag)
555 : : {
515 peter@eisentraut.org 556 : 23021331 : return get_hash_value(LockMethodLockHash, locktag);
557 : : }
558 : :
559 : : /*
560 : : * Compute the hash code associated with a PROCLOCKTAG.
561 : : *
562 : : * Because we want to use just one set of partition locks for both the
563 : : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
564 : : * fall into the same partition number as their associated LOCKs.
565 : : * dynahash.c expects the partition number to be the low-order bits of
566 : : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
567 : : * same low-order bits as the associated LOCKTAG's hash code. We achieve
568 : : * this with this specialized hash function.
569 : : */
570 : : static uint32
7226 tgl@sss.pgh.pa.us 571 : 732 : proclock_hash(const void *key, Size keysize)
572 : : {
573 : 732 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
574 : : uint32 lockhash;
575 : : Datum procptr;
576 : :
577 [ - + ]: 732 : Assert(keysize == sizeof(PROCLOCKTAG));
578 : :
579 : : /* Look into the associated LOCK object, and compute its hash code */
580 : 732 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
581 : :
582 : : /*
583 : : * To make the hash code also depend on the PGPROC, we xor the proc
584 : : * struct's address into the hash code, left-shifted so that the
585 : : * partition-number bits don't change. Since this is only a hash, we
586 : : * don't care if we lose high-order bits of the address; use an
587 : : * intermediate variable to suppress cast-pointer-to-int warnings.
588 : : */
589 : 732 : procptr = PointerGetDatum(proclocktag->myProc);
270 peter@eisentraut.org 590 :GNC 732 : lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
591 : :
7226 tgl@sss.pgh.pa.us 592 :CBC 732 : return lockhash;
593 : : }
594 : :
595 : : /*
596 : : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
597 : : * for its underlying LOCK.
598 : : *
599 : : * We use this just to avoid redundant calls of LockTagHashCode().
600 : : */
601 : : static inline uint32
602 : 5094882 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
603 : : {
7153 bruce@momjian.us 604 : 5094882 : uint32 lockhash = hashcode;
605 : : Datum procptr;
606 : :
607 : : /*
608 : : * This must match proclock_hash()!
609 : : */
7226 tgl@sss.pgh.pa.us 610 : 5094882 : procptr = PointerGetDatum(proclocktag->myProc);
270 peter@eisentraut.org 611 :GNC 5094882 : lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
612 : :
7226 tgl@sss.pgh.pa.us 613 :CBC 5094882 : return lockhash;
614 : : }
615 : :
616 : : /*
617 : : * Given two lock modes, return whether they would conflict.
618 : : */
619 : : bool
4850 alvherre@alvh.no-ip. 620 : 232730 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
621 : : {
622 : 232730 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
623 : :
624 [ + + ]: 232730 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
625 : 193851 : return true;
626 : :
627 : 38879 : return false;
628 : : }
629 : :
630 : : /*
631 : : * LockHeldByMe -- test whether lock 'locktag' is held by the current
632 : : * transaction
633 : : *
634 : : * Returns true if current transaction holds a lock on 'tag' of mode
635 : : * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
636 : : * ("Stronger" is defined as "numerically higher", which is a bit
637 : : * semantically dubious but is OK for the purposes we use this for.)
638 : : */
639 : : bool
677 noah@leadboat.com 640 : 6371434 : LockHeldByMe(const LOCKTAG *locktag,
641 : : LOCKMODE lockmode, bool orstronger)
642 : : {
643 : : LOCALLOCKTAG localtag;
644 : : LOCALLOCK *locallock;
645 : :
646 : : /*
647 : : * See if there is a LOCALLOCK entry for this lock and lockmode
648 : : */
2773 tgl@sss.pgh.pa.us 649 [ + - - + : 6371434 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
650 : 6371434 : localtag.lock = *locktag;
651 : 6371434 : localtag.mode = lockmode;
652 : :
653 : 6371434 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
654 : : &localtag,
655 : : HASH_FIND, NULL);
656 : :
677 noah@leadboat.com 657 [ + + + - ]: 6371434 : if (locallock && locallock->nLocks > 0)
658 : 3317702 : return true;
659 : :
660 [ + + ]: 3053732 : if (orstronger)
661 : : {
662 : : LOCKMODE slockmode;
663 : :
664 : 999903 : for (slockmode = lockmode + 1;
665 [ + - ]: 2949302 : slockmode <= MaxLockMode;
666 : 1949399 : slockmode++)
667 : : {
668 [ + + ]: 2949302 : if (LockHeldByMe(locktag, slockmode, false))
669 : 999903 : return true;
670 : : }
671 : : }
672 : :
673 : 2053829 : return false;
674 : : }
675 : :
676 : : #ifdef USE_ASSERT_CHECKING
677 : : /*
678 : : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
679 : : * evaluate assertions based on all locks held.
680 : : */
681 : : HTAB *
2222 682 : 1935 : GetLockMethodLocalHash(void)
683 : : {
684 : 1935 : return LockMethodLocalHash;
685 : : }
686 : : #endif
687 : :
688 : : /*
689 : : * LockHasWaiters -- look up 'locktag' and check if releasing this
690 : : * lock would wake up other processes waiting for it.
691 : : */
692 : : bool
4893 kgrittn@postgresql.o 693 :UBC 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
694 : : {
695 : 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
696 : : LockMethod lockMethodTable;
697 : : LOCALLOCKTAG localtag;
698 : : LOCALLOCK *locallock;
699 : : LOCK *lock;
700 : : PROCLOCK *proclock;
701 : : LWLock *partitionLock;
702 : 0 : bool hasWaiters = false;
703 : :
704 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
705 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
706 : 0 : lockMethodTable = LockMethods[lockmethodid];
707 [ # # # # ]: 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
708 [ # # ]: 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
709 : :
710 : : #ifdef LOCK_DEBUG
711 : : if (LOCK_DEBUG_ENABLED(locktag))
712 : : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
713 : : locktag->locktag_field1, locktag->locktag_field2,
714 : : lockMethodTable->lockModeNames[lockmode]);
715 : : #endif
716 : :
717 : : /*
718 : : * Find the LOCALLOCK entry for this lock and lockmode
719 : : */
3240 tgl@sss.pgh.pa.us 720 [ # # # # : 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
# # # # #
# ]
4893 kgrittn@postgresql.o 721 : 0 : localtag.lock = *locktag;
722 : 0 : localtag.mode = lockmode;
723 : :
724 : 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
725 : : &localtag,
726 : : HASH_FIND, NULL);
727 : :
728 : : /*
729 : : * let the caller print its own error message, too. Do not ereport(ERROR).
730 : : */
731 [ # # # # ]: 0 : if (!locallock || locallock->nLocks <= 0)
732 : : {
733 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
734 : : lockMethodTable->lockModeNames[lockmode]);
735 : 0 : return false;
736 : : }
737 : :
738 : : /*
739 : : * Check the shared lock table.
740 : : */
741 : 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
742 : :
743 : 0 : LWLockAcquire(partitionLock, LW_SHARED);
744 : :
745 : : /*
746 : : * We don't need to re-find the lock or proclock, since we kept their
747 : : * addresses in the locallock table, and they couldn't have been removed
748 : : * while we were holding a lock on them.
749 : : */
750 : 0 : lock = locallock->lock;
751 : : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
752 : 0 : proclock = locallock->proclock;
753 : : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
754 : :
755 : : /*
756 : : * Double-check that we are actually holding a lock of the type we want to
757 : : * release.
758 : : */
759 [ # # ]: 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
760 : : {
761 : : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
762 : 0 : LWLockRelease(partitionLock);
763 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
764 : : lockMethodTable->lockModeNames[lockmode]);
765 : 0 : RemoveLocalLock(locallock);
766 : 0 : return false;
767 : : }
768 : :
769 : : /*
770 : : * Do the checking.
771 : : */
772 [ # # ]: 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
773 : 0 : hasWaiters = true;
774 : :
775 : 0 : LWLockRelease(partitionLock);
776 : :
777 : 0 : return hasWaiters;
778 : : }
779 : :
780 : : /*
781 : : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
782 : : * set lock if/when no conflicts.
783 : : *
784 : : * Inputs:
785 : : * locktag: unique identifier for the lockable object
786 : : * lockmode: lock mode to acquire
787 : : * sessionLock: if true, acquire lock for session not current transaction
788 : : * dontWait: if true, don't wait to acquire lock
789 : : *
790 : : * Returns one of:
791 : : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
792 : : * LOCKACQUIRE_OK lock successfully acquired
793 : : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
794 : : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
795 : : *
796 : : * In the normal case where dontWait=false and the caller doesn't need to
797 : : * distinguish a freshly acquired lock from one already taken earlier in
798 : : * this same transaction, there is no need to examine the return value.
799 : : *
800 : : * Side Effects: The lock is acquired and recorded in lock tables.
801 : : *
802 : : * NOTE: if we wait for the lock, there is no way to abort the wait
803 : : * short of aborting the transaction.
804 : : */
805 : : LockAcquireResult
7452 tgl@sss.pgh.pa.us 806 :CBC 869470 : LockAcquire(const LOCKTAG *locktag,
807 : : LOCKMODE lockmode,
808 : : bool sessionLock,
809 : : bool dontWait)
810 : : {
2797 811 : 869470 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
812 : : true, NULL, false);
813 : : }
814 : :
815 : : /*
816 : : * LockAcquireExtended - allows us to specify additional options
817 : : *
818 : : * reportMemoryError specifies whether a lock request that fills the lock
819 : : * table should generate an ERROR or not. Passing "false" allows the caller
820 : : * to attempt to recover from lock-table-full situations, perhaps by forcibly
821 : : * canceling other lock holders and then retrying. Note, however, that the
822 : : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
823 : : * in combination with dontWait = true, as the cause of failure couldn't be
824 : : * distinguished.
825 : : *
826 : : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
827 : : * table entry if a lock is successfully acquired, or NULL if not.
828 : : *
829 : : * logLockFailure indicates whether to log details when a lock acquisition
830 : : * fails with dontWait = true.
831 : : */
832 : : LockAcquireResult
5981 simon@2ndQuadrant.co 833 : 25516028 : LockAcquireExtended(const LOCKTAG *locktag,
834 : : LOCKMODE lockmode,
835 : : bool sessionLock,
836 : : bool dontWait,
837 : : bool reportMemoryError,
838 : : LOCALLOCK **locallockp,
839 : : bool logLockFailure)
840 : : {
7452 tgl@sss.pgh.pa.us 841 : 25516028 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
842 : : LockMethod lockMethodTable;
843 : : LOCALLOCKTAG localtag;
844 : : LOCALLOCK *locallock;
845 : : LOCK *lock;
846 : : PROCLOCK *proclock;
847 : : bool found;
848 : : ResourceOwner owner;
849 : : uint32 hashcode;
850 : : LWLock *partitionLock;
851 : : bool found_conflict;
852 : : ProcWaitStatus waitResult;
5636 simon@2ndQuadrant.co 853 : 25516028 : bool log_lock = false;
854 : :
7452 tgl@sss.pgh.pa.us 855 [ + - - + ]: 25516028 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7452 tgl@sss.pgh.pa.us 856 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7452 tgl@sss.pgh.pa.us 857 :CBC 25516028 : lockMethodTable = LockMethods[lockmethodid];
858 [ + - - + ]: 25516028 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
7452 tgl@sss.pgh.pa.us 859 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
860 : :
5981 simon@2ndQuadrant.co 861 [ + + + + ]:CBC 25516028 : if (RecoveryInProgress() && !InRecovery &&
862 [ + + ]: 380028 : (locktag->locktag_type == LOCKTAG_OBJECT ||
5912 bruce@momjian.us 863 [ + - - + ]: 380028 : locktag->locktag_type == LOCKTAG_RELATION) &&
864 : : lockmode > RowExclusiveLock)
5981 simon@2ndQuadrant.co 865 [ # # ]:UBC 0 : ereport(ERROR,
866 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
867 : : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
868 : : lockMethodTable->lockModeNames[lockmode]),
869 : : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
870 : :
871 : : #ifdef LOCK_DEBUG
872 : : if (LOCK_DEBUG_ENABLED(locktag))
873 : : elog(LOG, "LockAcquire: lock [%u,%u] %s",
874 : : locktag->locktag_field1, locktag->locktag_field2,
875 : : lockMethodTable->lockModeNames[lockmode]);
876 : : #endif
877 : :
878 : : /* Identify owner for lock */
5114 tgl@sss.pgh.pa.us 879 [ + + ]:CBC 25516028 : if (sessionLock)
7921 880 : 46603 : owner = NULL;
881 : : else
5114 882 : 25469425 : owner = CurrentResourceOwner;
883 : :
884 : : /*
885 : : * Find or create a LOCALLOCK entry for this lock and lockmode
886 : : */
3240 887 [ + - - + : 25516028 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7921 888 : 25516028 : localtag.lock = *locktag;
889 : 25516028 : localtag.mode = lockmode;
890 : :
7452 891 : 25516028 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
892 : : &localtag,
893 : : HASH_ENTER, &found);
894 : :
895 : : /*
896 : : * if it's a new locallock object, initialize it
897 : : */
7921 898 [ + + ]: 25516028 : if (!found)
899 : : {
900 : 22309688 : locallock->lock = NULL;
901 : 22309688 : locallock->proclock = NULL;
7226 902 : 22309688 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
7921 903 : 22309688 : locallock->nLocks = 0;
2797 904 : 22309688 : locallock->holdsStrongLockCount = false;
905 : 22309688 : locallock->lockCleared = false;
7921 906 : 22309688 : locallock->numLockOwners = 0;
907 : 22309688 : locallock->maxLockOwners = 8;
3880 908 : 22309688 : locallock->lockOwners = NULL; /* in case next line fails */
7921 909 : 22309688 : locallock->lockOwners = (LOCALLOCKOWNER *)
910 : 22309688 : MemoryContextAlloc(TopMemoryContext,
3240 911 : 22309688 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
912 : : }
913 : : else
914 : : {
915 : : /* Make sure there will be room to remember the lock */
7921 916 [ + + ]: 3206340 : if (locallock->numLockOwners >= locallock->maxLockOwners)
917 : : {
7919 bruce@momjian.us 918 : 21 : int newsize = locallock->maxLockOwners * 2;
919 : :
7921 tgl@sss.pgh.pa.us 920 : 21 : locallock->lockOwners = (LOCALLOCKOWNER *)
921 : 21 : repalloc(locallock->lockOwners,
922 : : newsize * sizeof(LOCALLOCKOWNER));
923 : 21 : locallock->maxLockOwners = newsize;
924 : : }
925 : : }
5456 rhaas@postgresql.org 926 : 25516028 : hashcode = locallock->hashcode;
927 : :
2797 tgl@sss.pgh.pa.us 928 [ + + ]: 25516028 : if (locallockp)
929 : 24646464 : *locallockp = locallock;
930 : :
931 : : /*
932 : : * If we already hold the lock, we can just increase the count locally.
933 : : *
934 : : * If lockCleared is already set, caller need not worry about absorbing
935 : : * sinval messages related to the lock's object.
936 : : */
7921 937 [ + + ]: 25516028 : if (locallock->nLocks > 0)
938 : : {
939 : 3206340 : GrantLockLocal(locallock, owner);
2797 940 [ + + ]: 3206340 : if (locallock->lockCleared)
941 : 3107844 : return LOCKACQUIRE_ALREADY_CLEAR;
942 : : else
943 : 98496 : return LOCKACQUIRE_ALREADY_HELD;
944 : : }
945 : :
946 : : /*
947 : : * We don't acquire any other heavyweight lock while holding the relation
948 : : * extension lock. We do allow to acquire the same relation extension
949 : : * lock more than once but that case won't reach here.
950 : : */
2239 akapila@postgresql.o 951 [ - + ]: 22309688 : Assert(!IsRelationExtensionLockHeld);
952 : :
953 : : /*
954 : : * Prepare to emit a WAL record if acquisition of this lock needs to be
955 : : * replayed in a standby server.
956 : : *
957 : : * Here we prepare to log; after lock is acquired we'll issue log record.
958 : : * This arrangement simplifies error recovery in case the preparation step
959 : : * fails.
960 : : *
961 : : * Only AccessExclusiveLocks can conflict with lock types that read-only
962 : : * transactions can acquire in a standby server. Make sure this definition
963 : : * matches the one in GetRunningTransactionLocks().
964 : : */
5636 simon@2ndQuadrant.co 965 [ + + ]: 22309688 : if (lockmode >= AccessExclusiveLock &&
966 [ + + ]: 314316 : locktag->locktag_type == LOCKTAG_RELATION &&
967 [ + + ]: 207016 : !RecoveryInProgress() &&
968 [ + + ]: 179532 : XLogStandbyInfoActive())
969 : : {
970 : 171893 : LogAccessExclusiveLockPrepare();
971 : 171893 : log_lock = true;
972 : : }
973 : :
974 : : /*
975 : : * Attempt to take lock via fast path, if eligible. But if we remember
976 : : * having filled up the fast path array, we don't attempt to make any
977 : : * further use of it until we release some locks. It's possible that some
978 : : * other backend has transferred some of those locks to the shared hash
979 : : * table, leaving space free, but it's not worth acquiring the LWLock just
980 : : * to check. It's also possible that we're acquiring a second or third
981 : : * lock type on a relation we have already locked using the fast-path, but
982 : : * for now we don't worry about that case either.
983 : : */
42 michael@paquier.xyz 984 [ + + + + :GNC 22309688 : if (EligibleForRelationFastPath(locktag, lockmode))
+ + + + +
+ ]
985 : : {
986 [ + + ]: 20148729 : if (FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] <
987 : : FP_LOCK_SLOTS_PER_GROUP)
988 : : {
989 : 20039803 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
990 : : bool acquired;
991 : :
992 : : /*
993 : : * LWLockAcquire acts as a memory sequencing point, so it's safe
994 : : * to assume that any strong locker whose increment to
995 : : * FastPathStrongRelationLocks->counts becomes visible after we
996 : : * test it has yet to begin to transfer fast-path locks.
997 : : */
998 : 20039803 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
999 [ + + ]: 20039803 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1000 : 456994 : acquired = false;
1001 : : else
1002 : 19582809 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1003 : : lockmode);
1004 : 20039803 : LWLockRelease(&MyProc->fpInfoLock);
1005 [ + + ]: 20039803 : if (acquired)
1006 : : {
1007 : : /*
1008 : : * The locallock might contain stale pointers to some old
1009 : : * shared objects; we MUST reset these to null before
1010 : : * considering the lock to be acquired via fast-path.
1011 : : */
1012 : 19582809 : locallock->lock = NULL;
1013 : 19582809 : locallock->proclock = NULL;
1014 : 19582809 : GrantLockLocal(locallock, owner);
1015 : 19582809 : return LOCKACQUIRE_OK;
1016 : : }
1017 : : }
1018 : : else
1019 : : {
1020 : : /*
1021 : : * Increment the lock statistics counter if lock could not be
1022 : : * acquired via the fast-path.
1023 : : */
1024 : 108926 : pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1025 : : }
1026 : : }
1027 : :
1028 : : /*
1029 : : * If this lock could potentially have been taken via the fast-path by
1030 : : * some other backend, we must (temporarily) disable further use of the
1031 : : * fast-path for this lock tag, and migrate any locks already taken via
1032 : : * this method to the main lock table.
1033 : : */
5088 rhaas@postgresql.org 1034 [ + + + + :CBC 2726879 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ + + + ]
1035 : : {
5077 bruce@momjian.us 1036 : 246328 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1037 : :
5088 rhaas@postgresql.org 1038 : 246328 : BeginStrongLockAcquire(locallock, fasthashcode);
1039 [ - + ]: 246328 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1040 : : hashcode))
1041 : : {
5088 rhaas@postgresql.org 1042 :UBC 0 : AbortStrongLockAcquire();
2797 tgl@sss.pgh.pa.us 1043 [ # # ]: 0 : if (locallock->nLocks == 0)
1044 : 0 : RemoveLocalLock(locallock);
1045 [ # # ]: 0 : if (locallockp)
1046 : 0 : *locallockp = NULL;
5088 rhaas@postgresql.org 1047 [ # # ]: 0 : if (reportMemoryError)
1048 [ # # ]: 0 : ereport(ERROR,
1049 : : (errcode(ERRCODE_OUT_OF_MEMORY),
1050 : : errmsg("out of shared memory"),
1051 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1052 : : else
1053 : 0 : return LOCKACQUIRE_NOT_AVAIL;
1054 : : }
1055 : : }
1056 : :
1057 : : /*
1058 : : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1059 : : * take it via the fast-path, either, so we've got to mess with the shared
1060 : : * lock table.
1061 : : */
7226 tgl@sss.pgh.pa.us 1062 :CBC 2726879 : partitionLock = LockHashPartitionLock(hashcode);
1063 : :
7450 1064 : 2726879 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1065 : :
1066 : : /*
1067 : : * Find or create lock and proclock entries with this tag
1068 : : *
1069 : : * Note: if the locallock object already existed, it might have a pointer
1070 : : * to the lock already ... but we should not assume that that pointer is
1071 : : * valid, since a lock object with zero hold and request counts can go
1072 : : * away anytime. So we have to use SetupLockInTable() to recompute the
1073 : : * lock and proclock pointers, even if they're already set.
1074 : : */
5456 rhaas@postgresql.org 1075 : 2726879 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1076 : : hashcode, lockmode);
1077 [ - + ]: 2726879 : if (!proclock)
1078 : : {
5130 rhaas@postgresql.org 1079 :UBC 0 : AbortStrongLockAcquire();
5456 1080 : 0 : LWLockRelease(partitionLock);
2797 tgl@sss.pgh.pa.us 1081 [ # # ]: 0 : if (locallock->nLocks == 0)
1082 : 0 : RemoveLocalLock(locallock);
1083 [ # # ]: 0 : if (locallockp)
1084 : 0 : *locallockp = NULL;
5456 rhaas@postgresql.org 1085 [ # # ]: 0 : if (reportMemoryError)
1086 [ # # ]: 0 : ereport(ERROR,
1087 : : (errcode(ERRCODE_OUT_OF_MEMORY),
1088 : : errmsg("out of shared memory"),
1089 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1090 : : else
1091 : 0 : return LOCKACQUIRE_NOT_AVAIL;
1092 : : }
5456 rhaas@postgresql.org 1093 :CBC 2726879 : locallock->proclock = proclock;
1094 : 2726879 : lock = proclock->tag.myLock;
1095 : 2726879 : locallock->lock = lock;
1096 : :
1097 : : /*
1098 : : * If lock requested conflicts with locks requested by waiters, must join
1099 : : * wait queue. Otherwise, check for conflict with already-held locks.
1100 : : * (That's last because most complex check.)
1101 : : */
1102 [ + + ]: 2726879 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
2319 peter@eisentraut.org 1103 : 274 : found_conflict = true;
1104 : : else
1105 : 2726605 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1106 : : lock, proclock);
1107 : :
1108 [ + + ]: 2726879 : if (!found_conflict)
1109 : : {
1110 : : /* No conflict with held or previously requested locks */
5456 rhaas@postgresql.org 1111 : 2724561 : GrantLock(lock, proclock, lockmode);
547 heikki.linnakangas@i 1112 : 2724561 : waitResult = PROC_WAIT_STATUS_OK;
1113 : : }
1114 : : else
1115 : : {
1116 : : /*
1117 : : * Join the lock's wait queue. We call this even in the dontWait
1118 : : * case, because JoinWaitQueue() may discover that we can acquire the
1119 : : * lock immediately after all.
1120 : : */
1121 : 2318 : waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1122 : : }
1123 : :
1124 [ + + ]: 2726879 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1125 : : {
1126 : : /*
1127 : : * We're not getting the lock because a deadlock was detected already
1128 : : * while trying to join the wait queue, or because we would have to
1129 : : * wait but the caller requested no blocking.
1130 : : *
1131 : : * Undo the changes to shared entries before releasing the partition
1132 : : * lock.
1133 : : */
1134 : 764 : AbortStrongLockAcquire();
1135 : :
1136 [ + + ]: 764 : if (proclock->holdMask == 0)
1137 : : {
1138 : : uint32 proclock_hashcode;
1139 : :
1140 : 560 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1141 : : hashcode);
1142 : 560 : dlist_delete(&proclock->lockLink);
1143 : 560 : dlist_delete(&proclock->procLink);
1144 [ - + ]: 560 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1145 : 560 : &(proclock->tag),
1146 : : proclock_hashcode,
1147 : : HASH_REMOVE,
1148 : : NULL))
547 heikki.linnakangas@i 1149 [ # # ]:UBC 0 : elog(PANIC, "proclock table corrupted");
1150 : : }
1151 : : else
1152 : : PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
547 heikki.linnakangas@i 1153 :CBC 764 : lock->nRequested--;
1154 : 764 : lock->requested[lockmode]--;
1155 : : LOCK_PRINT("LockAcquire: did not join wait queue",
1156 : : lock, lockmode);
1157 [ + - - + ]: 764 : Assert((lock->nRequested > 0) &&
1158 : : (lock->requested[lockmode] >= 0));
1159 [ - + ]: 764 : Assert(lock->nGranted <= lock->nRequested);
1160 : 764 : LWLockRelease(partitionLock);
1161 [ + - ]: 764 : if (locallock->nLocks == 0)
1162 : 764 : RemoveLocalLock(locallock);
1163 : :
1164 [ + + ]: 764 : if (dontWait)
1165 : : {
1166 : : /*
1167 : : * Log lock holders and waiters as a detail log message if
1168 : : * logLockFailure = true and lock acquisition fails with dontWait
1169 : : * = true
1170 : : */
417 fujii@postgresql.org 1171 [ - + ]: 763 : if (logLockFailure)
1172 : : {
1173 : : StringInfoData buf,
1174 : : lock_waiters_sbuf,
1175 : : lock_holders_sbuf;
1176 : : const char *modename;
417 fujii@postgresql.org 1177 :UBC 0 : int lockHoldersNum = 0;
1178 : :
1179 : 0 : initStringInfo(&buf);
1180 : 0 : initStringInfo(&lock_waiters_sbuf);
1181 : 0 : initStringInfo(&lock_holders_sbuf);
1182 : :
1183 : 0 : DescribeLockTag(&buf, &locallock->tag.lock);
1184 : 0 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1185 : : lockmode);
1186 : :
1187 : : /* Gather a list of all lock holders and waiters */
1188 : 0 : LWLockAcquire(partitionLock, LW_SHARED);
1189 : 0 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1190 : : &lock_waiters_sbuf, &lockHoldersNum);
1191 : 0 : LWLockRelease(partitionLock);
1192 : :
1193 [ # # ]: 0 : ereport(LOG,
1194 : : (errmsg("process %d could not obtain %s on %s",
1195 : : MyProcPid, modename, buf.data),
1196 : : errdetail_log_plural(
1197 : : "Process holding the lock: %s, Wait queue: %s.",
1198 : : "Processes holding the lock: %s, Wait queue: %s.",
1199 : : lockHoldersNum,
1200 : : lock_holders_sbuf.data,
1201 : : lock_waiters_sbuf.data)));
1202 : :
1203 : 0 : pfree(buf.data);
1204 : 0 : pfree(lock_holders_sbuf.data);
1205 : 0 : pfree(lock_waiters_sbuf.data);
1206 : : }
547 heikki.linnakangas@i 1207 [ + + ]:CBC 763 : if (locallockp)
1208 : 221 : *locallockp = NULL;
1209 : 763 : return LOCKACQUIRE_NOT_AVAIL;
1210 : : }
1211 : : else
1212 : : {
1213 : 1 : DeadLockReport();
1214 : : /* DeadLockReport() will not return */
1215 : : }
1216 : : }
1217 : :
1218 : : /*
1219 : : * We are now in the lock queue, or the lock was already granted. If
1220 : : * queued, go to sleep.
1221 : : */
1222 [ + + ]: 2726115 : if (waitResult == PROC_WAIT_STATUS_WAITING)
1223 : : {
1224 [ - + ]: 1547 : Assert(!dontWait);
1225 : : PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1226 : : LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1227 : 1547 : LWLockRelease(partitionLock);
1228 : :
1229 : 1547 : waitResult = WaitOnLock(locallock, owner);
1230 : :
1231 : : /*
1232 : : * NOTE: do not do any material change of state between here and
1233 : : * return. All required changes in locktable state must have been
1234 : : * done when the lock was granted to us --- see notes in WaitOnLock.
1235 : : */
1236 : :
1237 [ + + ]: 1505 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1238 : : {
1239 : : /*
1240 : : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1241 : : * now.
1242 : : */
1243 [ - + ]: 5 : Assert(!dontWait);
1244 : 5 : DeadLockReport();
1245 : : /* DeadLockReport() will not return */
1246 : : }
1247 : : }
1248 : : else
1249 : 2724568 : LWLockRelease(partitionLock);
1250 [ - + ]: 2726068 : Assert(waitResult == PROC_WAIT_STATUS_OK);
1251 : :
1252 : : /* The lock was granted to us. Update the local lock entry accordingly */
1253 [ - + ]: 2726068 : Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1254 : 2726068 : GrantLockLocal(locallock, owner);
1255 : :
1256 : : /*
1257 : : * Lock state is fully up-to-date now; if we error out after this, no
1258 : : * special error cleanup is required.
1259 : : */
5130 rhaas@postgresql.org 1260 : 2726068 : FinishStrongLockAcquire();
1261 : :
1262 : : /*
1263 : : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1264 : : * standby server.
1265 : : */
5456 1266 [ + + ]: 2726068 : if (log_lock)
1267 : : {
1268 : : /*
1269 : : * Decode the locktag back to the original values, to avoid sending
1270 : : * lots of empty bytes with every message. See lock.h to check how a
1271 : : * locktag is defined for LOCKTAG_RELATION
1272 : : */
1273 : 171679 : LogAccessExclusiveLock(locktag->locktag_field1,
1274 : 171679 : locktag->locktag_field2);
1275 : : }
1276 : :
1277 : 2726068 : return LOCKACQUIRE_OK;
1278 : : }
1279 : :
1280 : : /*
1281 : : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1282 : : * request.
1283 : : *
1284 : : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1285 : : * for lack of shared memory.
1286 : : *
1287 : : * The appropriate partition lock must be held at entry, and will be
1288 : : * held at exit.
1289 : : */
1290 : : static PROCLOCK *
1291 : 2729042 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1292 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1293 : : {
1294 : : LOCK *lock;
1295 : : PROCLOCK *proclock;
1296 : : PROCLOCKTAG proclocktag;
1297 : : uint32 proclock_hashcode;
1298 : : bool found;
1299 : :
1300 : : /*
1301 : : * Find or create a lock with this tag.
1302 : : */
7226 tgl@sss.pgh.pa.us 1303 : 2729042 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1304 : : locktag,
1305 : : hashcode,
1306 : : HASH_ENTER_NULL,
1307 : : &found);
10467 bruce@momjian.us 1308 [ - + ]: 2729042 : if (!lock)
5456 rhaas@postgresql.org 1309 :UBC 0 : return NULL;
1310 : :
1311 : : /*
1312 : : * if it's a new lock object, initialize it
1313 : : */
10467 bruce@momjian.us 1314 [ + + ]:CBC 2729042 : if (!found)
1315 : : {
9240 tgl@sss.pgh.pa.us 1316 : 2317467 : lock->grantMask = 0;
1317 : 2317467 : lock->waitMask = 0;
1203 andres@anarazel.de 1318 : 2317467 : dlist_init(&lock->procLocks);
1319 : 2317467 : dclist_init(&lock->waitProcs);
9240 tgl@sss.pgh.pa.us 1320 : 2317467 : lock->nRequested = 0;
1321 : 2317467 : lock->nGranted = 0;
7664 neilc@samurai.com 1322 [ + - + - : 13904802 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
1323 [ - + - - : 2317467 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
1324 : : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1325 : : }
1326 : : else
1327 : : {
1328 : : LOCK_PRINT("LockAcquire: found", lock, lockmode);
9240 tgl@sss.pgh.pa.us 1329 [ + - - + ]: 411575 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1330 [ + - - + ]: 411575 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1331 [ - + ]: 411575 : Assert(lock->nGranted <= lock->nRequested);
1332 : : }
1333 : :
1334 : : /*
1335 : : * Create the hash key for the proclock table.
1336 : : */
7226 1337 : 2729042 : proclocktag.myLock = lock;
5456 rhaas@postgresql.org 1338 : 2729042 : proclocktag.myProc = proc;
1339 : :
7226 tgl@sss.pgh.pa.us 1340 : 2729042 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1341 : :
1342 : : /*
1343 : : * Find or create a proclock entry with this tag
1344 : : */
1345 : 2729042 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1346 : : &proclocktag,
1347 : : proclock_hashcode,
1348 : : HASH_ENTER_NULL,
1349 : : &found);
8477 bruce@momjian.us 1350 [ - + ]: 2729042 : if (!proclock)
1351 : : {
1352 : : /* Oops, not enough shmem for the proclock */
7905 tgl@sss.pgh.pa.us 1353 [ # # ]:UBC 0 : if (lock->nRequested == 0)
1354 : : {
1355 : : /*
1356 : : * There are no other requestors of this lock, so garbage-collect
1357 : : * the lock object. We *must* do this to avoid a permanent leak
1358 : : * of shared memory, because there won't be anything to cause
1359 : : * anyone to release the lock object later.
1360 : : */
1203 andres@anarazel.de 1361 [ # # ]: 0 : Assert(dlist_is_empty(&(lock->procLocks)));
7226 tgl@sss.pgh.pa.us 1362 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1184 peter@eisentraut.org 1363 : 0 : &(lock->tag),
1364 : : hashcode,
1365 : : HASH_REMOVE,
1366 : : NULL))
7656 tgl@sss.pgh.pa.us 1367 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
1368 : : }
5456 rhaas@postgresql.org 1369 : 0 : return NULL;
1370 : : }
1371 : :
1372 : : /*
1373 : : * If new, initialize the new entry
1374 : : */
10467 bruce@momjian.us 1375 [ + + ]:CBC 2729042 : if (!found)
1376 : : {
5456 rhaas@postgresql.org 1377 : 2363066 : uint32 partition = LockHashPartition(hashcode);
1378 : :
1379 : : /*
1380 : : * It might seem unsafe to access proclock->groupLeader without a
1381 : : * lock, but it's not really. Either we are initializing a proclock
1382 : : * on our own behalf, in which case our group leader isn't changing
1383 : : * because the group leader for a process can only ever be changed by
1384 : : * the process itself; or else we are transferring a fast-path lock to
1385 : : * the main lock table, in which case that process can't change its
1386 : : * lock group leader without first releasing all of its locks (and in
1387 : : * particular the one we are currently transferring).
1388 : : */
3740 1389 : 4726132 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1390 [ + + ]: 2363066 : proc->lockGroupLeader : proc;
7921 tgl@sss.pgh.pa.us 1391 : 2363066 : proclock->holdMask = 0;
7630 1392 : 2363066 : proclock->releaseMask = 0;
1393 : : /* Add proclock to appropriate lists */
1203 andres@anarazel.de 1394 : 2363066 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1395 : 2363066 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1396 : : PROCLOCK_PRINT("LockAcquire: new", proclock);
1397 : : }
1398 : : else
1399 : : {
1400 : : PROCLOCK_PRINT("LockAcquire: found", proclock);
7921 tgl@sss.pgh.pa.us 1401 [ - + ]: 365976 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1402 : :
1403 : : #ifdef CHECK_DEADLOCK_RISK
1404 : :
1405 : : /*
1406 : : * Issue warning if we already hold a lower-level lock on this object
1407 : : * and do not hold a lock of the requested level or higher. This
1408 : : * indicates a deadlock-prone coding practice (eg, we'd have a
1409 : : * deadlock if another backend were following the same code path at
1410 : : * about the same time).
1411 : : *
1412 : : * This is not enabled by default, because it may generate log entries
1413 : : * about user-level coding practices that are in fact safe in context.
1414 : : * It can be enabled to help find system-level problems.
1415 : : *
1416 : : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1417 : : * better to use a table. For now, though, this works.
1418 : : */
1419 : : {
1420 : : int i;
1421 : :
1422 : : for (i = lockMethodTable->numLockModes; i > 0; i--)
1423 : : {
1424 : : if (proclock->holdMask & LOCKBIT_ON(i))
1425 : : {
1426 : : if (i >= (int) lockmode)
1427 : : break; /* safe: we have a lock >= req level */
1428 : : elog(LOG, "deadlock risk: raising lock level"
1429 : : " from %s to %s on object %u/%u/%u",
1430 : : lockMethodTable->lockModeNames[i],
1431 : : lockMethodTable->lockModeNames[lockmode],
1432 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1433 : : lock->tag.locktag_field3);
1434 : : break;
1435 : : }
1436 : : }
1437 : : }
1438 : : #endif /* CHECK_DEADLOCK_RISK */
1439 : : }
1440 : :
1441 : : /*
1442 : : * lock->nRequested and lock->requested[] count the total number of
1443 : : * requests, whether granted or waiting, so increment those immediately.
1444 : : * The other counts don't increment till we get the lock.
1445 : : */
9240 1446 : 2729042 : lock->nRequested++;
1447 : 2729042 : lock->requested[lockmode]++;
1448 [ + - - + ]: 2729042 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1449 : :
1450 : : /*
1451 : : * We shouldn't already hold the desired lock; else locallock table is
1452 : : * broken.
1453 : : */
7630 1454 [ - + ]: 2729042 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
7630 tgl@sss.pgh.pa.us 1455 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1456 : : lockMethodTable->lockModeNames[lockmode],
1457 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1458 : : lock->tag.locktag_field3);
1459 : :
5456 rhaas@postgresql.org 1460 :CBC 2729042 : return proclock;
1461 : : }
1462 : :
1463 : : /*
1464 : : * Check and set/reset the flag that we hold the relation extension lock.
1465 : : *
1466 : : * It is callers responsibility that this function is called after
1467 : : * acquiring/releasing the relation extension lock.
1468 : : *
1469 : : * Pass acquired as true if lock is acquired, false otherwise.
1470 : : */
1471 : : static inline void
2239 akapila@postgresql.o 1472 : 45678067 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1473 : : {
1474 : : #ifdef USE_ASSERT_CHECKING
1475 [ + + ]: 45678067 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1476 : 420834 : IsRelationExtensionLockHeld = acquired;
1477 : : #endif
1478 : 45678067 : }
1479 : :
1480 : : /*
1481 : : * Subroutine to free a locallock entry
1482 : : */
1483 : : static void
7921 tgl@sss.pgh.pa.us 1484 : 22309688 : RemoveLocalLock(LOCALLOCK *locallock)
1485 : : {
1486 : : int i;
1487 : :
5066 heikki.linnakangas@i 1488 [ + + ]: 22424073 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1489 : : {
1490 [ + + ]: 114385 : if (locallock->lockOwners[i].owner != NULL)
1491 : 114327 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1492 : : }
3880 tgl@sss.pgh.pa.us 1493 : 22309688 : locallock->numLockOwners = 0;
1494 [ + - ]: 22309688 : if (locallock->lockOwners != NULL)
1495 : 22309688 : pfree(locallock->lockOwners);
7921 1496 : 22309688 : locallock->lockOwners = NULL;
1497 : :
5456 rhaas@postgresql.org 1498 [ + + ]: 22309688 : if (locallock->holdsStrongLockCount)
1499 : : {
1500 : : uint32 fasthashcode;
1501 : :
1502 : 246010 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1503 : :
5404 1504 [ + + ]: 246010 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1505 [ - + ]: 246010 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1506 : 246010 : FastPathStrongRelationLocks->count[fasthashcode]--;
3184 peter_e@gmx.net 1507 : 246010 : locallock->holdsStrongLockCount = false;
5404 rhaas@postgresql.org 1508 : 246010 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1509 : : }
1510 : :
7452 tgl@sss.pgh.pa.us 1511 [ - + ]: 22309688 : if (!hash_search(LockMethodLocalHash,
1184 peter@eisentraut.org 1512 : 22309688 : &(locallock->tag),
1513 : : HASH_REMOVE, NULL))
7921 tgl@sss.pgh.pa.us 1514 [ # # ]:UBC 0 : elog(WARNING, "locallock table corrupted");
1515 : :
1516 : : /*
1517 : : * Indicate that the lock is released for certain types of locks
1518 : : */
2239 akapila@postgresql.o 1519 :CBC 22309688 : CheckAndSetLockHeld(locallock, false);
7921 tgl@sss.pgh.pa.us 1520 : 22309688 : }
1521 : :
1522 : : /*
1523 : : * LockCheckConflicts -- test whether requested lock conflicts
1524 : : * with those already granted
1525 : : *
1526 : : * Returns true if conflict, false if no conflict.
1527 : : *
1528 : : * NOTES:
1529 : : * Here's what makes this complicated: one process's locks don't
1530 : : * conflict with one another, no matter what purpose they are held for
1531 : : * (eg, session and transaction locks do not conflict). Nor do the locks
1532 : : * of one process in a lock group conflict with those of another process in
1533 : : * the same group. So, we must subtract off these locks when determining
1534 : : * whether the requested new lock conflicts with those already held.
1535 : : */
1536 : : bool
8191 bruce@momjian.us 1537 : 2728424 : LockCheckConflicts(LockMethod lockMethodTable,
1538 : : LOCKMODE lockmode,
1539 : : LOCK *lock,
1540 : : PROCLOCK *proclock)
1541 : : {
8692 1542 : 2728424 : int numLockModes = lockMethodTable->numLockModes;
1543 : : LOCKMASK myLocks;
3740 rhaas@postgresql.org 1544 : 2728424 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1545 : : int conflictsRemaining[MAX_LOCKMODES];
1546 : 2728424 : int totalConflictsRemaining = 0;
1547 : : dlist_iter proclock_iter;
1548 : : int i;
1549 : :
1550 : : /*
1551 : : * first check for global conflicts: If no locks conflict with my request,
1552 : : * then I get the lock.
1553 : : *
1554 : : * Checking for conflict: lock->grantMask represents the types of
1555 : : * currently held locks. conflictTable[lockmode] has a bit set for each
1556 : : * type of lock that conflicts with request. Bitwise compare tells if
1557 : : * there is a conflict.
1558 : : */
1559 [ + + ]: 2728424 : if (!(conflictMask & lock->grantMask))
1560 : : {
1561 : : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
2319 peter@eisentraut.org 1562 : 2573446 : return false;
1563 : : }
1564 : :
1565 : : /*
1566 : : * Rats. Something conflicts. But it could still be my own lock, or a
1567 : : * lock held by another member of my locking group. First, figure out how
1568 : : * many conflicts remain after subtracting out any locks I hold myself.
1569 : : */
7630 tgl@sss.pgh.pa.us 1570 : 154978 : myLocks = proclock->holdMask;
8191 bruce@momjian.us 1571 [ + + ]: 1394802 : for (i = 1; i <= numLockModes; i++)
1572 : : {
3740 rhaas@postgresql.org 1573 [ + + ]: 1239824 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1574 : : {
1575 : 754807 : conflictsRemaining[i] = 0;
1576 : 754807 : continue;
1577 : : }
1578 : 485017 : conflictsRemaining[i] = lock->granted[i];
1579 [ + + ]: 485017 : if (myLocks & LOCKBIT_ON(i))
1580 : 164760 : --conflictsRemaining[i];
1581 : 485017 : totalConflictsRemaining += conflictsRemaining[i];
1582 : : }
1583 : :
1584 : : /* If no conflicts remain, we get the lock. */
1585 [ + + ]: 154978 : if (totalConflictsRemaining == 0)
1586 : : {
1587 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
2319 peter@eisentraut.org 1588 : 151841 : return false;
1589 : : }
1590 : :
1591 : : /* If no group locking, it's definitely a conflict. */
3740 rhaas@postgresql.org 1592 [ + + + + ]: 3137 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1593 : : {
1594 [ - + ]: 2040 : Assert(proclock->tag.myProc == MyProc);
1595 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1596 : : proclock);
2319 peter@eisentraut.org 1597 : 2040 : return true;
1598 : : }
1599 : :
1600 : : /*
1601 : : * The relation extension lock conflict even between the group members.
1602 : : */
1034 akapila@postgresql.o 1603 [ + + ]: 1097 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1604 : : {
1605 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1606 : : proclock);
2237 1607 : 23 : return true;
1608 : : }
1609 : :
1610 : : /*
1611 : : * Locks held in conflicting modes by members of our own lock group are
1612 : : * not real conflicts; we can subtract those out and see if we still have
1613 : : * a conflict. This is O(N) in the number of processes holding or
1614 : : * awaiting locks on this object. We could improve that by making the
1615 : : * shared memory state more complex (and larger) but it doesn't seem worth
1616 : : * it.
1617 : : */
1203 andres@anarazel.de 1618 [ + - + + ]: 2415 : dlist_foreach(proclock_iter, &lock->procLocks)
1619 : : {
1620 : 2125 : PROCLOCK *otherproclock =
1621 : 2125 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1622 : :
3740 rhaas@postgresql.org 1623 [ + + ]: 2125 : if (proclock != otherproclock &&
1624 [ + + ]: 1835 : proclock->groupLeader == otherproclock->groupLeader &&
1625 [ + + ]: 805 : (otherproclock->holdMask & conflictMask) != 0)
1626 : : {
3617 1627 : 803 : int intersectMask = otherproclock->holdMask & conflictMask;
1628 : :
3740 1629 [ + + ]: 7227 : for (i = 1; i <= numLockModes; i++)
1630 : : {
1631 [ + + ]: 6424 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1632 : : {
1633 [ - + ]: 816 : if (conflictsRemaining[i] <= 0)
3740 rhaas@postgresql.org 1634 [ # # ]:UBC 0 : elog(PANIC, "proclocks held do not match lock");
3740 rhaas@postgresql.org 1635 :CBC 816 : conflictsRemaining[i]--;
1636 : 816 : totalConflictsRemaining--;
1637 : : }
1638 : : }
1639 : :
1640 [ + + ]: 803 : if (totalConflictsRemaining == 0)
1641 : : {
1642 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1643 : : proclock);
2319 peter@eisentraut.org 1644 : 784 : return false;
1645 : : }
1646 : : }
1647 : : }
1648 : :
1649 : : /* Nope, it's a real conflict. */
1650 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1651 : 290 : return true;
1652 : : }
1653 : :
1654 : : /*
1655 : : * GrantLock -- update the lock and proclock data structures to show
1656 : : * the lock request has been granted.
1657 : : *
1658 : : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1659 : : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1660 : : *
1661 : : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1662 : : * table entry; but since we may be awaking some other process, we can't do
1663 : : * that here; it's done by GrantLockLocal, instead.
1664 : : */
1665 : : void
8477 bruce@momjian.us 1666 : 2728324 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1667 : : {
9240 tgl@sss.pgh.pa.us 1668 : 2728324 : lock->nGranted++;
1669 : 2728324 : lock->granted[lockmode]++;
8191 bruce@momjian.us 1670 : 2728324 : lock->grantMask |= LOCKBIT_ON(lockmode);
9240 tgl@sss.pgh.pa.us 1671 [ + + ]: 2728324 : if (lock->granted[lockmode] == lock->requested[lockmode])
8191 bruce@momjian.us 1672 : 2727875 : lock->waitMask &= LOCKBIT_OFF(lockmode);
7921 tgl@sss.pgh.pa.us 1673 : 2728324 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1674 : : LOCK_PRINT("GrantLock", lock, lockmode);
9240 1675 [ + - - + ]: 2728324 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1676 [ - + ]: 2728324 : Assert(lock->nGranted <= lock->nRequested);
7921 1677 : 2728324 : }
1678 : :
1679 : : /*
1680 : : * UnGrantLock -- opposite of GrantLock.
1681 : : *
1682 : : * Updates the lock and proclock data structures to show that the lock
1683 : : * is no longer held nor requested by the current holder.
1684 : : *
1685 : : * Returns true if there were any waiters waiting on the lock that
1686 : : * should now be woken up with ProcLockWakeup.
1687 : : */
1688 : : static bool
7760 neilc@samurai.com 1689 : 2728259 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1690 : : PROCLOCK *proclock, LockMethod lockMethodTable)
1691 : : {
7507 bruce@momjian.us 1692 : 2728259 : bool wakeupNeeded = false;
1693 : :
7760 neilc@samurai.com 1694 [ + - - + ]: 2728259 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1695 [ + - - + ]: 2728259 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1696 [ - + ]: 2728259 : Assert(lock->nGranted <= lock->nRequested);
1697 : :
1698 : : /*
1699 : : * fix the general lock stats
1700 : : */
1701 : 2728259 : lock->nRequested--;
1702 : 2728259 : lock->requested[lockmode]--;
1703 : 2728259 : lock->nGranted--;
1704 : 2728259 : lock->granted[lockmode]--;
1705 : :
1706 [ + + ]: 2728259 : if (lock->granted[lockmode] == 0)
1707 : : {
1708 : : /* change the conflict mask. No more of this lock type. */
1709 : 2692379 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1710 : : }
1711 : :
1712 : : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1713 : :
1714 : : /*
1715 : : * We need only run ProcLockWakeup if the released lock conflicts with at
1716 : : * least one of the lock types requested by waiter(s). Otherwise whatever
1717 : : * conflict made them wait must still exist. NOTE: before MVCC, we could
1718 : : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1719 : : * not true anymore, because the remaining granted locks might belong to
1720 : : * some waiter, who could now be awakened because he doesn't conflict with
1721 : : * his own locks.
1722 : : */
1723 [ + + ]: 2728259 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1724 : 1460 : wakeupNeeded = true;
1725 : :
1726 : : /*
1727 : : * Now fix the per-proclock state.
1728 : : */
1729 : 2728259 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1730 : : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1731 : :
1732 : 2728259 : return wakeupNeeded;
1733 : : }
1734 : :
1735 : : /*
1736 : : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1737 : : * proclock and lock objects if possible, and call ProcLockWakeup if there
1738 : : * are remaining requests and the caller says it's OK. (Normally, this
1739 : : * should be called after UnGrantLock, and wakeupNeeded is the result from
1740 : : * UnGrantLock.)
1741 : : *
1742 : : * The appropriate partition lock must be held at entry, and will be
1743 : : * held at exit.
1744 : : */
1745 : : static void
7450 tgl@sss.pgh.pa.us 1746 : 2682586 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1747 : : LockMethod lockMethodTable, uint32 hashcode,
1748 : : bool wakeupNeeded)
1749 : : {
1750 : : /*
1751 : : * If this was my last hold on this lock, delete my entry in the proclock
1752 : : * table.
1753 : : */
7656 1754 [ + + ]: 2682586 : if (proclock->holdMask == 0)
1755 : : {
1756 : : uint32 proclock_hashcode;
1757 : :
1758 : : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1203 andres@anarazel.de 1759 : 2362527 : dlist_delete(&proclock->lockLink);
1760 : 2362527 : dlist_delete(&proclock->procLink);
7226 tgl@sss.pgh.pa.us 1761 : 2362527 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1762 [ - + ]: 2362527 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1184 peter@eisentraut.org 1763 : 2362527 : &(proclock->tag),
1764 : : proclock_hashcode,
1765 : : HASH_REMOVE,
1766 : : NULL))
7656 tgl@sss.pgh.pa.us 1767 [ # # ]:UBC 0 : elog(PANIC, "proclock table corrupted");
1768 : : }
1769 : :
7656 tgl@sss.pgh.pa.us 1770 [ + + ]:CBC 2682586 : if (lock->nRequested == 0)
1771 : : {
1772 : : /*
1773 : : * The caller just released the last lock, so garbage-collect the lock
1774 : : * object.
1775 : : */
1776 : : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1203 andres@anarazel.de 1777 [ - + ]: 2317500 : Assert(dlist_is_empty(&lock->procLocks));
7226 tgl@sss.pgh.pa.us 1778 [ - + ]: 2317500 : if (!hash_search_with_hash_value(LockMethodLockHash,
1184 peter@eisentraut.org 1779 : 2317500 : &(lock->tag),
1780 : : hashcode,
1781 : : HASH_REMOVE,
1782 : : NULL))
7656 tgl@sss.pgh.pa.us 1783 [ # # ]:UBC 0 : elog(PANIC, "lock table corrupted");
1784 : : }
7656 tgl@sss.pgh.pa.us 1785 [ + + ]:CBC 365086 : else if (wakeupNeeded)
1786 : : {
1787 : : /* There are waiters on this lock, so wake them up. */
7450 1788 : 1502 : ProcLockWakeup(lockMethodTable, lock);
1789 : : }
7656 1790 : 2682586 : }
1791 : :
1792 : : /*
1793 : : * GrantLockLocal -- update the locallock data structures to show
1794 : : * the lock request has been granted.
1795 : : *
1796 : : * We expect that LockAcquire made sure there is room to add a new
1797 : : * ResourceOwner entry.
1798 : : */
1799 : : static void
7921 1800 : 25515218 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1801 : : {
1802 : 25515218 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1803 : : int i;
1804 : :
1805 [ - + ]: 25515218 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1806 : : /* Count the total */
1807 : 25515218 : locallock->nLocks++;
1808 : : /* Count the per-owner lock */
1809 [ + + ]: 27273742 : for (i = 0; i < locallock->numLockOwners; i++)
1810 : : {
1811 [ + + ]: 3905363 : if (lockOwners[i].owner == owner)
1812 : : {
1813 : 2146839 : lockOwners[i].nLocks++;
1814 : 2146839 : return;
1815 : : }
1816 : : }
1817 : 23368379 : lockOwners[i].owner = owner;
1818 : 23368379 : lockOwners[i].nLocks = 1;
1819 : 23368379 : locallock->numLockOwners++;
5066 heikki.linnakangas@i 1820 [ + + ]: 23368379 : if (owner != NULL)
1821 : 23322337 : ResourceOwnerRememberLock(owner, locallock);
1822 : :
1823 : : /* Indicate that the lock is acquired for certain types of locks. */
2239 akapila@postgresql.o 1824 : 23368379 : CheckAndSetLockHeld(locallock, true);
1825 : : }
1826 : :
1827 : : /*
1828 : : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1829 : : * and arrange for error cleanup if it fails
1830 : : */
1831 : : static void
5130 rhaas@postgresql.org 1832 : 246328 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1833 : : {
1834 [ - + ]: 246328 : Assert(StrongLockInProgress == NULL);
3184 peter_e@gmx.net 1835 [ - + ]: 246328 : Assert(locallock->holdsStrongLockCount == false);
1836 : :
1837 : : /*
1838 : : * Adding to a memory location is not atomic, so we take a spinlock to
1839 : : * ensure we don't collide with someone else trying to bump the count at
1840 : : * the same time.
1841 : : *
1842 : : * XXX: It might be worth considering using an atomic fetch-and-add
1843 : : * instruction here, on architectures where that is supported.
1844 : : */
1845 : :
5130 rhaas@postgresql.org 1846 [ + + ]: 246328 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1847 : 246328 : FastPathStrongRelationLocks->count[fasthashcode]++;
3184 peter_e@gmx.net 1848 : 246328 : locallock->holdsStrongLockCount = true;
5130 rhaas@postgresql.org 1849 : 246328 : StrongLockInProgress = locallock;
1850 : 246328 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1851 : 246328 : }
1852 : :
1853 : : /*
1854 : : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1855 : : * acquisition once it's no longer needed
1856 : : */
1857 : : static void
1858 : 2726068 : FinishStrongLockAcquire(void)
1859 : : {
1860 : 2726068 : StrongLockInProgress = NULL;
1861 : 2726068 : }
1862 : :
1863 : : /*
1864 : : * AbortStrongLockAcquire - undo strong lock state changes performed by
1865 : : * BeginStrongLockAcquire.
1866 : : */
1867 : : void
1868 : 465629 : AbortStrongLockAcquire(void)
1869 : : {
1870 : : uint32 fasthashcode;
1871 : 465629 : LOCALLOCK *locallock = StrongLockInProgress;
1872 : :
1873 [ + + ]: 465629 : if (locallock == NULL)
1874 : 465416 : return;
1875 : :
1876 : 213 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
3184 peter_e@gmx.net 1877 [ - + ]: 213 : Assert(locallock->holdsStrongLockCount == true);
5130 rhaas@postgresql.org 1878 [ - + ]: 213 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4411 1879 [ - + ]: 213 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
5130 1880 : 213 : FastPathStrongRelationLocks->count[fasthashcode]--;
3184 peter_e@gmx.net 1881 : 213 : locallock->holdsStrongLockCount = false;
5130 rhaas@postgresql.org 1882 : 213 : StrongLockInProgress = NULL;
1883 : 213 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1884 : : }
1885 : :
1886 : : /*
1887 : : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1888 : : * WaitOnLock on.
1889 : : *
1890 : : * proc.c needs this for the case where we are booted off the lock by
1891 : : * timeout, but discover that someone granted us the lock anyway.
1892 : : *
1893 : : * We could just export GrantLockLocal, but that would require including
1894 : : * resowner.h in lock.h, which creates circularity.
1895 : : */
1896 : : void
7921 tgl@sss.pgh.pa.us 1897 : 1 : GrantAwaitedLock(void)
1898 : : {
1899 : 1 : GrantLockLocal(awaitedLock, awaitedOwner);
10115 scrappy@hub.org 1900 : 1 : }
1901 : :
1902 : : /*
1903 : : * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1904 : : */
1905 : : LOCALLOCK *
547 heikki.linnakangas@i 1906 : 466420 : GetAwaitedLock(void)
1907 : : {
1908 : 466420 : return awaitedLock;
1909 : : }
1910 : :
1911 : : /*
1912 : : * ResetAwaitedLock -- Forget that we are waiting on a lock.
1913 : : */
1914 : : void
403 1915 : 40 : ResetAwaitedLock(void)
1916 : : {
1917 : 40 : awaitedLock = NULL;
1918 : 40 : }
1919 : :
1920 : : /*
1921 : : * MarkLockClear -- mark an acquired lock as "clear"
1922 : : *
1923 : : * This means that we know we have absorbed all sinval messages that other
1924 : : * sessions generated before we acquired this lock, and so we can confidently
1925 : : * assume we know about any catalog changes protected by this lock.
1926 : : */
1927 : : void
2797 tgl@sss.pgh.pa.us 1928 : 21556306 : MarkLockClear(LOCALLOCK *locallock)
1929 : : {
1930 [ - + ]: 21556306 : Assert(locallock->nLocks > 0);
1931 : 21556306 : locallock->lockCleared = true;
1932 : 21556306 : }
1933 : :
1934 : : /*
1935 : : * WaitOnLock -- wait to acquire a lock
1936 : : *
1937 : : * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1938 : : */
1939 : : static ProcWaitStatus
547 heikki.linnakangas@i 1940 : 1547 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1941 : : {
1942 : : ProcWaitStatus result;
1943 : : ErrorContextCallback waiterrcontext;
1944 : :
1945 : : TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1946 : : locallock->tag.lock.locktag_field2,
1947 : : locallock->tag.lock.locktag_field3,
1948 : : locallock->tag.lock.locktag_field4,
1949 : : locallock->tag.lock.locktag_type,
1950 : : locallock->tag.mode);
1951 : :
1952 : : /* Setup error traceback support for ereport() */
249 tgl@sss.pgh.pa.us 1953 :GNC 1547 : waiterrcontext.callback = waitonlock_error_callback;
165 peter@eisentraut.org 1954 : 1547 : waiterrcontext.arg = locallock;
249 tgl@sss.pgh.pa.us 1955 : 1547 : waiterrcontext.previous = error_context_stack;
1956 : 1547 : error_context_stack = &waiterrcontext;
1957 : :
1958 : : /* adjust the process title to indicate that it's waiting */
1170 drowley@postgresql.o 1959 :CBC 1547 : set_ps_display_suffix("waiting");
1960 : :
1961 : : /*
1962 : : * Record the fact that we are waiting for a lock, so that
1963 : : * LockErrorCleanup will clean up if cancel/die happens.
1964 : : */
7921 tgl@sss.pgh.pa.us 1965 : 1547 : awaitedLock = locallock;
1966 : 1547 : awaitedOwner = owner;
1967 : :
1968 : : /*
1969 : : * NOTE: Think not to put any shared-state cleanup after the call to
1970 : : * ProcSleep, in either the normal or failure path. The lock state must
1971 : : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1972 : : * waiting for the lock. This is necessary because of the possibility
1973 : : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1974 : : * grants us the lock, but before we've noticed it. Hence, after granting,
1975 : : * the locktable state must fully reflect the fact that we own the lock;
1976 : : * we can't do additional work on return.
1977 : : *
1978 : : * We can and do use a PG_TRY block to try to clean up after failure, but
1979 : : * this still has a major limitation: elog(FATAL) can occur while waiting
1980 : : * (eg, a "die" interrupt), and then control won't come back here. So all
1981 : : * cleanup of essential state should happen in LockErrorCleanup, not here.
1982 : : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1983 : : * is unimportant if the process exits.
1984 : : */
6667 1985 [ + + ]: 1547 : PG_TRY();
1986 : : {
547 heikki.linnakangas@i 1987 : 1547 : result = ProcSleep(locallock);
1988 : : }
6667 tgl@sss.pgh.pa.us 1989 : 39 : PG_CATCH();
1990 : : {
1991 : : /* In this path, awaitedLock remains set until LockErrorCleanup */
1992 : :
1993 : : /* reset ps display to remove the suffix */
1170 drowley@postgresql.o 1994 : 39 : set_ps_display_remove_suffix();
1995 : :
1996 : : /* and propagate the error */
6667 tgl@sss.pgh.pa.us 1997 : 39 : PG_RE_THROW();
1998 : : }
1999 [ - + ]: 1505 : PG_END_TRY();
2000 : :
2001 : : /*
2002 : : * We no longer want LockErrorCleanup to do anything.
2003 : : */
7921 2004 : 1505 : awaitedLock = NULL;
2005 : :
2006 : : /* reset ps display to remove the suffix */
1170 drowley@postgresql.o 2007 : 1505 : set_ps_display_remove_suffix();
2008 : :
249 tgl@sss.pgh.pa.us 2009 :GNC 1505 : error_context_stack = waiterrcontext.previous;
2010 : :
2011 : : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2012 : : locallock->tag.lock.locktag_field2,
2013 : : locallock->tag.lock.locktag_field3,
2014 : : locallock->tag.lock.locktag_field4,
2015 : : locallock->tag.lock.locktag_type,
2016 : : locallock->tag.mode);
2017 : :
547 heikki.linnakangas@i 2018 :CBC 1505 : return result;
2019 : : }
2020 : :
2021 : : /*
2022 : : * error context callback for failures in WaitOnLock
2023 : : *
2024 : : * We report which lock was being waited on, in the same style used in
2025 : : * deadlock reports. This helps with lock timeout errors in particular.
2026 : : */
2027 : : static void
249 tgl@sss.pgh.pa.us 2028 :GNC 244 : waitonlock_error_callback(void *arg)
2029 : : {
2030 : 244 : LOCALLOCK *locallock = (LOCALLOCK *) arg;
2031 : 244 : const LOCKTAG *tag = &locallock->tag.lock;
2032 : 244 : LOCKMODE mode = locallock->tag.mode;
2033 : : StringInfoData locktagbuf;
2034 : :
2035 : 244 : initStringInfo(&locktagbuf);
2036 : 244 : DescribeLockTag(&locktagbuf, tag);
2037 : :
2038 : 488 : errcontext("waiting for %s on %s",
2039 : 244 : GetLockmodeName(tag->locktag_lockmethodid, mode),
2040 : : locktagbuf.data);
2041 : 244 : }
2042 : :
2043 : : /*
2044 : : * Remove a proc from the wait-queue it is on (caller must know it is on one).
2045 : : * This is only used when the proc has failed to get the lock, so we set its
2046 : : * waitStatus to PROC_WAIT_STATUS_ERROR.
2047 : : *
2048 : : * Appropriate partition lock must be held by caller. Also, caller is
2049 : : * responsible for signaling the proc if needed.
2050 : : *
2051 : : * NB: this does not clean up any locallock object that may exist for the lock.
2052 : : */
2053 : : void
7226 tgl@sss.pgh.pa.us 2054 :CBC 44 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2055 : : {
9175 bruce@momjian.us 2056 : 44 : LOCK *waitLock = proc->waitLock;
7735 tgl@sss.pgh.pa.us 2057 : 44 : PROCLOCK *proclock = proc->waitProcLock;
9175 bruce@momjian.us 2058 : 44 : LOCKMODE lockmode = proc->waitLockMode;
7735 tgl@sss.pgh.pa.us 2059 : 44 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2060 : :
2061 : : /* Make sure proc is waiting */
2148 peter@eisentraut.org 2062 [ - + ]: 44 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
74 heikki.linnakangas@i 2063 [ - + ]:GNC 44 : Assert(!dlist_node_is_detached(&proc->waitLink));
9231 tgl@sss.pgh.pa.us 2064 [ - + ]:CBC 44 : Assert(waitLock);
1203 andres@anarazel.de 2065 [ - + ]: 44 : Assert(!dclist_is_empty(&waitLock->waitProcs));
7452 tgl@sss.pgh.pa.us 2066 [ + - - + ]: 44 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2067 : :
2068 : : /* Remove proc from lock's wait queue */
74 heikki.linnakangas@i 2069 :GNC 44 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->waitLink);
2070 : :
2071 : : /* Undo increments of request counts by waiting process */
9231 tgl@sss.pgh.pa.us 2072 [ - + ]:CBC 44 : Assert(waitLock->nRequested > 0);
2073 [ - + ]: 44 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
2074 : 44 : waitLock->nRequested--;
2075 [ - + ]: 44 : Assert(waitLock->requested[lockmode] > 0);
2076 : 44 : waitLock->requested[lockmode]--;
2077 : : /* don't forget to clear waitMask bit if appropriate */
2078 [ + - ]: 44 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
8191 bruce@momjian.us 2079 : 44 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2080 : :
2081 : : /* Clean up the proc's own state, and pass it the ok/fail signal */
9231 tgl@sss.pgh.pa.us 2082 : 44 : proc->waitLock = NULL;
7921 2083 : 44 : proc->waitProcLock = NULL;
2148 peter@eisentraut.org 2084 : 44 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
2085 : :
2086 : : /*
2087 : : * Delete the proclock immediately if it represents no already-held locks.
2088 : : * (This must happen now because if the owner of the lock decides to
2089 : : * release it, and the requested/granted counts then go to zero,
2090 : : * LockRelease expects there to be no remaining proclocks.) Then see if
2091 : : * any other waiters for the lock can be woken up now.
2092 : : */
7450 tgl@sss.pgh.pa.us 2093 : 44 : CleanUpLock(waitLock, proclock,
7226 2094 : 44 : LockMethods[lockmethodid], hashcode,
2095 : : true);
9231 2096 : 44 : }
2097 : :
2098 : : /*
2099 : : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2100 : : * Release a session lock if 'sessionLock' is true, else release a
2101 : : * regular transaction lock.
2102 : : *
2103 : : * Side Effects: find any waiting processes that are now wakable,
2104 : : * grant them their requested locks and awaken them.
2105 : : * (We have to grant the lock here to avoid a race between
2106 : : * the waking process and any new process to
2107 : : * come along and request the lock.)
2108 : : */
2109 : : bool
7452 2110 : 22721096 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2111 : : {
2112 : 22721096 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2113 : : LockMethod lockMethodTable;
2114 : : LOCALLOCKTAG localtag;
2115 : : LOCALLOCK *locallock;
2116 : : LOCK *lock;
2117 : : PROCLOCK *proclock;
2118 : : LWLock *partitionLock;
2119 : : bool wakeupNeeded;
2120 : :
2121 [ + - - + ]: 22721096 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7452 tgl@sss.pgh.pa.us 2122 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7452 tgl@sss.pgh.pa.us 2123 :CBC 22721096 : lockMethodTable = LockMethods[lockmethodid];
2124 [ + - - + ]: 22721096 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
7452 tgl@sss.pgh.pa.us 2125 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2126 : :
2127 : : #ifdef LOCK_DEBUG
2128 : : if (LOCK_DEBUG_ENABLED(locktag))
2129 : : elog(LOG, "LockRelease: lock [%u,%u] %s",
2130 : : locktag->locktag_field1, locktag->locktag_field2,
2131 : : lockMethodTable->lockModeNames[lockmode]);
2132 : : #endif
2133 : :
2134 : : /*
2135 : : * Find the LOCALLOCK entry for this lock and lockmode
2136 : : */
3240 tgl@sss.pgh.pa.us 2137 [ + - - + :CBC 22721096 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7921 2138 : 22721096 : localtag.lock = *locktag;
2139 : 22721096 : localtag.mode = lockmode;
2140 : :
7452 2141 : 22721096 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2142 : : &localtag,
2143 : : HASH_FIND, NULL);
2144 : :
2145 : : /*
2146 : : * let the caller print its own error message, too. Do not ereport(ERROR).
2147 : : */
7921 2148 [ + + - + ]: 22721096 : if (!locallock || locallock->nLocks <= 0)
2149 : : {
2150 [ + - ]: 17 : elog(WARNING, "you don't own a lock of type %s",
2151 : : lockMethodTable->lockModeNames[lockmode]);
3184 peter_e@gmx.net 2152 : 17 : return false;
2153 : : }
2154 : :
2155 : : /*
2156 : : * Decrease the count for the resource owner.
2157 : : */
2158 : : {
7921 tgl@sss.pgh.pa.us 2159 : 22721079 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2160 : : ResourceOwner owner;
2161 : : int i;
2162 : :
2163 : : /* Identify owner for lock */
5114 2164 [ + + ]: 22721079 : if (sessionLock)
7921 2165 : 46022 : owner = NULL;
2166 : : else
5114 2167 : 22675057 : owner = CurrentResourceOwner;
2168 : :
7921 2169 [ + + ]: 22722269 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2170 : : {
2171 [ + + ]: 22722253 : if (lockOwners[i].owner == owner)
2172 : : {
2173 [ - + ]: 22721063 : Assert(lockOwners[i].nLocks > 0);
2174 [ + + ]: 22721063 : if (--lockOwners[i].nLocks == 0)
2175 : : {
5066 heikki.linnakangas@i 2176 [ + + ]: 21850696 : if (owner != NULL)
2177 : 21804712 : ResourceOwnerForgetLock(owner, locallock);
2178 : : /* compact out unused slot */
7921 tgl@sss.pgh.pa.us 2179 : 21850696 : locallock->numLockOwners--;
2180 [ + + ]: 21850696 : if (i < locallock->numLockOwners)
2181 : 56 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2182 : : }
2183 : 22721063 : break;
2184 : : }
2185 : : }
2186 [ + + ]: 22721079 : if (i < 0)
2187 : : {
2188 : : /* don't release a lock belonging to another owner */
2189 [ + - ]: 16 : elog(WARNING, "you don't own a lock of type %s",
2190 : : lockMethodTable->lockModeNames[lockmode]);
3184 peter_e@gmx.net 2191 : 16 : return false;
2192 : : }
2193 : : }
2194 : :
2195 : : /*
2196 : : * Decrease the total local count. If we're still holding the lock, we're
2197 : : * done.
2198 : : */
7921 tgl@sss.pgh.pa.us 2199 : 22721063 : locallock->nLocks--;
2200 : :
2201 [ + + ]: 22721063 : if (locallock->nLocks > 0)
3184 peter_e@gmx.net 2202 : 1755394 : return true;
2203 : :
2204 : : /*
2205 : : * At this point we can no longer suppose we are clear of invalidation
2206 : : * messages related to this lock. Although we'll delete the LOCALLOCK
2207 : : * object before any intentional return from this routine, it seems worth
2208 : : * the trouble to explicitly reset lockCleared right now, just in case
2209 : : * some error prevents us from deleting the LOCALLOCK.
2210 : : */
2797 tgl@sss.pgh.pa.us 2211 : 20965669 : locallock->lockCleared = false;
2212 : :
2213 : : /* Attempt fast release of any lock eligible for the fast path. */
4542 2214 [ + + + + : 20965669 : if (EligibleForRelationFastPath(locktag, lockmode) &&
+ + + + +
+ ]
591 tomas.vondra@postgre 2215 [ + + ]: 19437335 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] > 0)
2216 : : {
2217 : : bool released;
2218 : :
2219 : : /*
2220 : : * We might not find the lock here, even if we originally entered it
2221 : : * here. Another backend may have moved it to the main table.
2222 : : */
2181 tgl@sss.pgh.pa.us 2223 : 19038410 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
5404 rhaas@postgresql.org 2224 : 19038410 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2225 : : lockmode);
2181 tgl@sss.pgh.pa.us 2226 : 19038410 : LWLockRelease(&MyProc->fpInfoLock);
5456 rhaas@postgresql.org 2227 [ + + ]: 19038410 : if (released)
2228 : : {
2229 : 18899156 : RemoveLocalLock(locallock);
3184 peter_e@gmx.net 2230 : 18899156 : return true;
2231 : : }
2232 : : }
2233 : :
2234 : : /*
2235 : : * Otherwise we've got to mess with the shared lock table.
2236 : : */
7226 tgl@sss.pgh.pa.us 2237 : 2066513 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2238 : :
7450 2239 : 2066513 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2240 : :
2241 : : /*
2242 : : * Normally, we don't need to re-find the lock or proclock, since we kept
2243 : : * their addresses in the locallock table, and they couldn't have been
2244 : : * removed while we were holding a lock on them. But it's possible that
2245 : : * the lock was taken fast-path and has since been moved to the main hash
2246 : : * table by another backend, in which case we will need to look up the
2247 : : * objects here. We assume the lock field is NULL if so.
2248 : : */
7921 2249 : 2066513 : lock = locallock->lock;
5456 rhaas@postgresql.org 2250 [ + + ]: 2066513 : if (!lock)
2251 : : {
2252 : : PROCLOCKTAG proclocktag;
2253 : :
5088 2254 [ + - + - : 10 : Assert(EligibleForRelationFastPath(locktag, lockmode));
+ - + - -
+ ]
5456 2255 : 10 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2256 : : locktag,
2257 : : locallock->hashcode,
2258 : : HASH_FIND,
2259 : : NULL);
4542 tgl@sss.pgh.pa.us 2260 [ - + ]: 10 : if (!lock)
4542 tgl@sss.pgh.pa.us 2261 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
5456 rhaas@postgresql.org 2262 :CBC 10 : locallock->lock = lock;
2263 : :
2264 : 10 : proclocktag.myLock = lock;
2265 : 10 : proclocktag.myProc = MyProc;
2266 : 10 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2267 : : &proclocktag,
2268 : : HASH_FIND,
2269 : : NULL);
4542 tgl@sss.pgh.pa.us 2270 [ - + ]: 10 : if (!locallock->proclock)
4542 tgl@sss.pgh.pa.us 2271 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
2272 : : }
2273 : : LOCK_PRINT("LockRelease: found", lock, lockmode);
7921 tgl@sss.pgh.pa.us 2274 :CBC 2066513 : proclock = locallock->proclock;
2275 : : PROCLOCK_PRINT("LockRelease: found", proclock);
2276 : :
2277 : : /*
2278 : : * Double-check that we are actually holding a lock of the type we want to
2279 : : * release.
2280 : : */
2281 [ - + ]: 2066513 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2282 : : {
2283 : : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
7450 tgl@sss.pgh.pa.us 2284 :UBC 0 : LWLockRelease(partitionLock);
8321 2285 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
2286 : : lockMethodTable->lockModeNames[lockmode]);
7921 2287 : 0 : RemoveLocalLock(locallock);
3184 peter_e@gmx.net 2288 : 0 : return false;
2289 : : }
2290 : :
2291 : : /*
2292 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2293 : : */
7656 tgl@sss.pgh.pa.us 2294 :CBC 2066513 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2295 : :
7450 2296 : 2066513 : CleanUpLock(lock, proclock,
2297 : : lockMethodTable, locallock->hashcode,
2298 : : wakeupNeeded);
2299 : :
2300 : 2066513 : LWLockRelease(partitionLock);
2301 : :
7921 2302 : 2066513 : RemoveLocalLock(locallock);
3184 peter_e@gmx.net 2303 : 2066513 : return true;
2304 : : }
2305 : :
2306 : : /*
2307 : : * LockReleaseAll -- Release all locks of the specified lock method that
2308 : : * are held by the current process.
2309 : : *
2310 : : * Well, not necessarily *all* locks. The available behaviors are:
2311 : : * allLocks == true: release all locks including session locks.
2312 : : * allLocks == false: release all non-session locks.
2313 : : */
2314 : : void
7630 tgl@sss.pgh.pa.us 2315 : 865731 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2316 : : {
2317 : : HASH_SEQ_STATUS status;
2318 : : LockMethod lockMethodTable;
2319 : : int i,
2320 : : numLockModes;
2321 : : LOCALLOCK *locallock;
2322 : : LOCK *lock;
2323 : : int partition;
5456 rhaas@postgresql.org 2324 : 865731 : bool have_fast_path_lwlock = false;
2325 : :
7452 tgl@sss.pgh.pa.us 2326 [ + - - + ]: 865731 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7452 tgl@sss.pgh.pa.us 2327 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7452 tgl@sss.pgh.pa.us 2328 :CBC 865731 : lockMethodTable = LockMethods[lockmethodid];
2329 : :
2330 : : #ifdef LOCK_DEBUG
2331 : : if (*(lockMethodTable->trace_flag))
2332 : : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2333 : : #endif
2334 : :
2335 : : /*
2336 : : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2337 : : * the only way that the lock we hold on our own VXID can ever get
2338 : : * released: it is always and only released when a toplevel transaction
2339 : : * ends.
2340 : : */
5388 rhaas@postgresql.org 2341 [ + + ]: 865731 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2342 : 423831 : VirtualXactLockTableCleanup();
2343 : :
8692 bruce@momjian.us 2344 : 865731 : numLockModes = lockMethodTable->numLockModes;
2345 : :
2346 : : /*
2347 : : * First we run through the locallock table and get rid of unwanted
2348 : : * entries, then we scan the process's proclocks and get rid of those. We
2349 : : * do this separately because we may have multiple locallock entries
2350 : : * pointing to the same proclock, and we daren't end up with any dangling
2351 : : * pointers. Fast-path locks are cleaned up during the locallock table
2352 : : * scan, though.
2353 : : */
7452 tgl@sss.pgh.pa.us 2354 : 865731 : hash_seq_init(&status, LockMethodLocalHash);
2355 : :
7921 2356 [ + + ]: 2276149 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2357 : : {
2358 : : /*
2359 : : * If the LOCALLOCK entry is unused, something must've gone wrong
2360 : : * while trying to acquire this lock. Just forget the local entry.
2361 : : */
5088 rhaas@postgresql.org 2362 [ + + ]: 1410418 : if (locallock->nLocks == 0)
2363 : : {
2364 : 46 : RemoveLocalLock(locallock);
2365 : 46 : continue;
2366 : : }
2367 : :
2368 : : /* Ignore items that are not of the lockmethod to be removed */
2369 [ + + ]: 1410372 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2370 : 34134 : continue;
2371 : :
2372 : : /*
2373 : : * If we are asked to release all locks, we can just zap the entry.
2374 : : * Otherwise, must scan to see if there are session locks. We assume
2375 : : * there is at most one lockOwners entry for session locks.
2376 : : */
2377 [ + + ]: 1376238 : if (!allLocks)
2378 : : {
2379 : 1263707 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2380 : :
2381 : : /* If session lock is above array position 0, move it down to 0 */
4724 bruce@momjian.us 2382 [ + + ]: 2548912 : for (i = 0; i < locallock->numLockOwners; i++)
2383 : : {
5088 rhaas@postgresql.org 2384 [ + + ]: 1285205 : if (lockOwners[i].owner == NULL)
2385 : 33818 : lockOwners[0] = lockOwners[i];
2386 : : else
5066 heikki.linnakangas@i 2387 : 1251387 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2388 : : }
2389 : :
5088 rhaas@postgresql.org 2390 [ + - ]: 1263707 : if (locallock->numLockOwners > 0 &&
2391 [ + + ]: 1263707 : lockOwners[0].owner == NULL &&
2392 [ + - ]: 33818 : lockOwners[0].nLocks > 0)
2393 : : {
2394 : : /* Fix the locallock to show just the session locks */
2395 : 33818 : locallock->nLocks = lockOwners[0].nLocks;
2396 : 33818 : locallock->numLockOwners = 1;
2397 : : /* We aren't deleting this locallock, so done */
5456 2398 : 33818 : continue;
2399 : : }
2400 : : else
5066 heikki.linnakangas@i 2401 : 1229889 : locallock->numLockOwners = 0;
2402 : : }
2403 : :
2404 : : #ifdef USE_ASSERT_CHECKING
2405 : :
2406 : : /*
2407 : : * Tuple locks are currently held only for short durations within a
2408 : : * transaction. Check that we didn't forget to release one.
2409 : : */
588 noah@leadboat.com 2410 [ + + - + ]: 1342420 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
588 noah@leadboat.com 2411 [ # # ]:UBC 0 : elog(WARNING, "tuple lock held at commit");
2412 : : #endif
2413 : :
2414 : : /*
2415 : : * If the lock or proclock pointers are NULL, this lock was taken via
2416 : : * the relation fast-path (and is not known to have been transferred).
2417 : : */
5088 rhaas@postgresql.org 2418 [ + + - + ]:CBC 1342420 : if (locallock->proclock == NULL || locallock->lock == NULL)
2419 : 1533 : {
2420 : 683323 : LOCKMODE lockmode = locallock->tag.mode;
2421 : : Oid relid;
2422 : :
2423 : : /* Verify that a fast-path lock is what we've got. */
2424 [ + - + - : 683323 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
+ - + - -
+ ]
5456 rhaas@postgresql.org 2425 [ # # ]:UBC 0 : elog(PANIC, "locallock table corrupted");
2426 : :
2427 : : /*
2428 : : * If we don't currently hold the LWLock that protects our
2429 : : * fast-path data structures, we must acquire it before attempting
2430 : : * to release the lock via the fast-path. We will continue to
2431 : : * hold the LWLock until we're done scanning the locallock table,
2432 : : * unless we hit a transferred fast-path lock. (XXX is this
2433 : : * really such a good idea? There could be a lot of entries ...)
2434 : : */
5456 rhaas@postgresql.org 2435 [ + + ]:CBC 683323 : if (!have_fast_path_lwlock)
2436 : : {
2181 tgl@sss.pgh.pa.us 2437 : 193643 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
5456 rhaas@postgresql.org 2438 : 193643 : have_fast_path_lwlock = true;
2439 : : }
2440 : :
2441 : : /* Attempt fast-path release. */
2442 : 683323 : relid = locallock->tag.lock.locktag_field2;
5404 2443 [ + + ]: 683323 : if (FastPathUnGrantRelationLock(relid, lockmode))
2444 : : {
5456 2445 : 681790 : RemoveLocalLock(locallock);
2446 : 681790 : continue;
2447 : : }
2448 : :
2449 : : /*
2450 : : * Our lock, originally taken via the fast path, has been
2451 : : * transferred to the main lock table. That's going to require
2452 : : * some extra work, so release our fast-path lock before starting.
2453 : : */
2181 tgl@sss.pgh.pa.us 2454 : 1533 : LWLockRelease(&MyProc->fpInfoLock);
5456 rhaas@postgresql.org 2455 : 1533 : have_fast_path_lwlock = false;
2456 : :
2457 : : /*
2458 : : * Now dump the lock. We haven't got a pointer to the LOCK or
2459 : : * PROCLOCK in this case, so we have to handle this a bit
2460 : : * differently than a normal lock release. Unfortunately, this
2461 : : * requires an extra LWLock acquire-and-release cycle on the
2462 : : * partitionLock, but hopefully it shouldn't happen often.
2463 : : */
2464 : 1533 : LockRefindAndRelease(lockMethodTable, MyProc,
2465 : : &locallock->tag.lock, lockmode, false);
7921 tgl@sss.pgh.pa.us 2466 : 1533 : RemoveLocalLock(locallock);
2467 : 1533 : continue;
2468 : : }
2469 : :
2470 : : /* Mark the proclock to show we need to release this lockmode */
7630 2471 [ + - ]: 659097 : if (locallock->nLocks > 0)
2472 : 659097 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2473 : :
2474 : : /* And remove the locallock hashtable entry */
7921 2475 : 659097 : RemoveLocalLock(locallock);
2476 : : }
2477 : :
2478 : : /* Done with the fast-path data structures */
5456 rhaas@postgresql.org 2479 [ + + ]: 865731 : if (have_fast_path_lwlock)
2181 tgl@sss.pgh.pa.us 2480 : 192110 : LWLockRelease(&MyProc->fpInfoLock);
2481 : :
2482 : : /*
2483 : : * Now, scan each lock partition separately.
2484 : : */
7450 2485 [ + + ]: 14717427 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2486 : : {
2487 : : LWLock *partitionLock;
1203 andres@anarazel.de 2488 : 13851696 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2489 : : dlist_mutable_iter proclock_iter;
2490 : :
4481 rhaas@postgresql.org 2491 : 13851696 : partitionLock = LockHashPartitionLockByIndex(partition);
2492 : :
2493 : : /*
2494 : : * If the proclock list for this partition is empty, we can skip
2495 : : * acquiring the partition lock. This optimization is trickier than
2496 : : * it looks, because another backend could be in process of adding
2497 : : * something to our proclock list due to promoting one of our
2498 : : * fast-path locks. However, any such lock must be one that we
2499 : : * decided not to delete above, so it's okay to skip it again now;
2500 : : * we'd just decide not to delete it again. We must, however, be
2501 : : * careful to re-fetch the list header once we've acquired the
2502 : : * partition lock, to be sure we have a valid, up-to-date pointer.
2503 : : * (There is probably no significant risk if pointer fetch/store is
2504 : : * atomic, but we don't wish to assume that.)
2505 : : *
2506 : : * XXX This argument assumes that the locallock table correctly
2507 : : * represents all of our fast-path locks. While allLocks mode
2508 : : * guarantees to clean up all of our normal locks regardless of the
2509 : : * locallock situation, we lose that guarantee for fast-path locks.
2510 : : * This is not ideal.
2511 : : */
1203 andres@anarazel.de 2512 [ + + ]: 13851696 : if (dlist_is_empty(procLocks))
7450 tgl@sss.pgh.pa.us 2513 : 13304826 : continue; /* needn't examine this partition */
2514 : :
2515 : 546870 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2516 : :
1203 andres@anarazel.de 2517 [ + - + + ]: 1227341 : dlist_foreach_modify(proclock_iter, procLocks)
2518 : : {
2519 : 680471 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
7450 tgl@sss.pgh.pa.us 2520 : 680471 : bool wakeupNeeded = false;
2521 : :
7226 2522 [ - + ]: 680471 : Assert(proclock->tag.myProc == MyProc);
2523 : :
2524 : 680471 : lock = proclock->tag.myLock;
2525 : :
2526 : : /* Ignore items that are not of the lockmethod to be removed */
7450 2527 [ + + ]: 680471 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
4541 2528 : 34134 : continue;
2529 : :
2530 : : /*
2531 : : * In allLocks mode, force release of all locks even if locallock
2532 : : * table had problems
2533 : : */
7450 2534 [ + + ]: 646337 : if (allLocks)
2535 : 44182 : proclock->releaseMask = proclock->holdMask;
2536 : : else
2537 [ - + ]: 602155 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2538 : :
2539 : : /*
2540 : : * Ignore items that have nothing to be released, unless they have
2541 : : * holdMask == 0 and are therefore recyclable
2542 : : */
2543 [ + + + - ]: 646337 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
4541 2544 : 32955 : continue;
2545 : :
2546 : : PROCLOCK_PRINT("LockReleaseAll", proclock);
2547 : : LOCK_PRINT("LockReleaseAll", lock, 0);
7450 2548 [ - + ]: 613382 : Assert(lock->nRequested >= 0);
2549 [ - + ]: 613382 : Assert(lock->nGranted >= 0);
2550 [ - + ]: 613382 : Assert(lock->nGranted <= lock->nRequested);
2551 [ - + ]: 613382 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2552 : :
2553 : : /*
2554 : : * Release the previously-marked lock modes
2555 : : */
2556 [ + + ]: 5520438 : for (i = 1; i <= numLockModes; i++)
2557 : : {
2558 [ + + ]: 4907056 : if (proclock->releaseMask & LOCKBIT_ON(i))
2559 : 659099 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2560 : : lockMethodTable);
2561 : : }
2562 [ + - - + ]: 613382 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2563 [ - + ]: 613382 : Assert(lock->nGranted <= lock->nRequested);
2564 : : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2565 : :
2566 : 613382 : proclock->releaseMask = 0;
2567 : :
2568 : : /* CleanUpLock will wake up waiters if needed. */
2569 : 613382 : CleanUpLock(lock, proclock,
2570 : : lockMethodTable,
7226 2571 : 613382 : LockTagHashCode(&lock->tag),
2572 : : wakeupNeeded);
2573 : : } /* loop over PROCLOCKs within this partition */
2574 : :
7450 2575 : 546870 : LWLockRelease(partitionLock);
2576 : : } /* loop over partitions */
2577 : :
2578 : : #ifdef LOCK_DEBUG
2579 : : if (*(lockMethodTable->trace_flag))
2580 : : elog(LOG, "LockReleaseAll done");
2581 : : #endif
10892 scrappy@hub.org 2582 : 865731 : }
2583 : :
2584 : : /*
2585 : : * LockReleaseSession -- Release all session locks of the specified lock method
2586 : : * that are held by the current process.
2587 : : */
2588 : : void
5114 tgl@sss.pgh.pa.us 2589 : 122 : LockReleaseSession(LOCKMETHODID lockmethodid)
2590 : : {
2591 : : HASH_SEQ_STATUS status;
2592 : : LOCALLOCK *locallock;
2593 : :
2594 [ + - - + ]: 122 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
5114 tgl@sss.pgh.pa.us 2595 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2596 : :
5114 tgl@sss.pgh.pa.us 2597 :CBC 122 : hash_seq_init(&status, LockMethodLocalHash);
2598 : :
2599 [ + + ]: 242 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2600 : : {
2601 : : /* Ignore items that are not of the specified lock method */
2602 [ + + ]: 120 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2603 : 11 : continue;
2604 : :
2605 : 109 : ReleaseLockIfHeld(locallock, true);
2606 : : }
2607 : 122 : }
2608 : :
2609 : : /*
2610 : : * LockReleaseCurrentOwner
2611 : : * Release all locks belonging to CurrentResourceOwner
2612 : : *
2613 : : * If the caller knows what those locks are, it can pass them as an array.
2614 : : * That speeds up the call significantly, when a lot of locks are held.
2615 : : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2616 : : * table to find them.
2617 : : */
2618 : : void
5066 heikki.linnakangas@i 2619 : 6274 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2620 : : {
2621 [ + + ]: 6274 : if (locallocks == NULL)
2622 : : {
2623 : : HASH_SEQ_STATUS status;
2624 : : LOCALLOCK *locallock;
2625 : :
2626 : 5 : hash_seq_init(&status, LockMethodLocalHash);
2627 : :
2628 [ + + ]: 357 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2629 : 352 : ReleaseLockIfHeld(locallock, false);
2630 : : }
2631 : : else
2632 : : {
2633 : : int i;
2634 : :
2635 [ + + ]: 9733 : for (i = nlocks - 1; i >= 0; i--)
2636 : 3464 : ReleaseLockIfHeld(locallocks[i], false);
2637 : : }
5555 itagaki.takahiro@gma 2638 : 6274 : }
2639 : :
2640 : : /*
2641 : : * ReleaseLockIfHeld
2642 : : * Release any session-level locks on this lockable object if sessionLock
2643 : : * is true; else, release any locks held by CurrentResourceOwner.
2644 : : *
2645 : : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2646 : : * locks), but without refactoring LockRelease() we cannot support releasing
2647 : : * locks belonging to resource owners other than CurrentResourceOwner.
2648 : : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2649 : : * do a hashtable lookup of the locallock, too. However, currently this
2650 : : * function isn't used heavily enough to justify refactoring for its
2651 : : * convenience.
2652 : : */
2653 : : static void
5114 tgl@sss.pgh.pa.us 2654 : 3925 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2655 : : {
2656 : : ResourceOwner owner;
2657 : : LOCALLOCKOWNER *lockOwners;
2658 : : int i;
2659 : :
2660 : : /* Identify owner for lock (must match LockRelease!) */
2661 [ + + ]: 3925 : if (sessionLock)
2662 : 109 : owner = NULL;
2663 : : else
2664 : 3816 : owner = CurrentResourceOwner;
2665 : :
2666 : : /* Scan to see if there are any locks belonging to the target owner */
5555 itagaki.takahiro@gma 2667 : 3925 : lockOwners = locallock->lockOwners;
2668 [ + + ]: 4182 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2669 : : {
2670 [ + + ]: 3925 : if (lockOwners[i].owner == owner)
2671 : : {
2672 [ - + ]: 3668 : Assert(lockOwners[i].nLocks > 0);
2673 [ + + ]: 3668 : if (lockOwners[i].nLocks < locallock->nLocks)
2674 : : {
2675 : : /*
2676 : : * We will still hold this lock after forgetting this
2677 : : * ResourceOwner.
2678 : : */
2679 : 977 : locallock->nLocks -= lockOwners[i].nLocks;
2680 : : /* compact out unused slot */
2681 : 977 : locallock->numLockOwners--;
5066 heikki.linnakangas@i 2682 [ + - ]: 977 : if (owner != NULL)
2683 : 977 : ResourceOwnerForgetLock(owner, locallock);
5555 itagaki.takahiro@gma 2684 [ - + ]: 977 : if (i < locallock->numLockOwners)
5555 itagaki.takahiro@gma 2685 :UBC 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2686 : : }
2687 : : else
2688 : : {
5555 itagaki.takahiro@gma 2689 [ - + ]:CBC 2691 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2690 : : /* We want to call LockRelease just once */
2691 : 2691 : lockOwners[i].nLocks = 1;
2692 : 2691 : locallock->nLocks = 1;
2693 [ - + ]: 2691 : if (!LockRelease(&locallock->tag.lock,
2694 : : locallock->tag.mode,
2695 : : sessionLock))
5114 tgl@sss.pgh.pa.us 2696 [ # # ]:UBC 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2697 : : }
5555 itagaki.takahiro@gma 2698 :CBC 3668 : break;
2699 : : }
2700 : : }
7921 tgl@sss.pgh.pa.us 2701 : 3925 : }
2702 : :
2703 : : /*
2704 : : * LockReassignCurrentOwner
2705 : : * Reassign all locks belonging to CurrentResourceOwner to belong
2706 : : * to its parent resource owner.
2707 : : *
2708 : : * If the caller knows what those locks are, it can pass them as an array.
2709 : : * That speeds up the call significantly, when a lot of locks are held
2710 : : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2711 : : * and we'll traverse through our hash table to find them.
2712 : : */
2713 : : void
5066 heikki.linnakangas@i 2714 : 444813 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2715 : : {
7921 tgl@sss.pgh.pa.us 2716 : 444813 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2717 : :
2718 [ - + ]: 444813 : Assert(parent != NULL);
2719 : :
5066 heikki.linnakangas@i 2720 [ + + ]: 444813 : if (locallocks == NULL)
2721 : : {
2722 : : HASH_SEQ_STATUS status;
2723 : : LOCALLOCK *locallock;
2724 : :
2725 : 5093 : hash_seq_init(&status, LockMethodLocalHash);
2726 : :
2727 [ + + ]: 176536 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2728 : 171443 : LockReassignOwner(locallock, parent);
2729 : : }
2730 : : else
2731 : : {
2732 : : int i;
2733 : :
2734 [ + + ]: 958628 : for (i = nlocks - 1; i >= 0; i--)
2735 : 518908 : LockReassignOwner(locallocks[i], parent);
2736 : : }
2737 : 444813 : }
2738 : :
2739 : : /*
2740 : : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2741 : : * CurrentResourceOwner to its parent.
2742 : : */
2743 : : static void
2744 : 690351 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2745 : : {
2746 : : LOCALLOCKOWNER *lockOwners;
2747 : : int i;
2748 : 690351 : int ic = -1;
2749 : 690351 : int ip = -1;
2750 : :
2751 : : /*
2752 : : * Scan to see if there are any locks belonging to current owner or its
2753 : : * parent
2754 : : */
2755 : 690351 : lockOwners = locallock->lockOwners;
2756 [ + + ]: 1575578 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2757 : : {
2758 [ + + ]: 885227 : if (lockOwners[i].owner == CurrentResourceOwner)
2759 : 641272 : ic = i;
2760 [ + + ]: 243955 : else if (lockOwners[i].owner == parent)
2761 : 199978 : ip = i;
2762 : : }
2763 : :
2764 [ + + ]: 690351 : if (ic < 0)
4724 bruce@momjian.us 2765 : 49079 : return; /* no current locks */
2766 : :
5066 heikki.linnakangas@i 2767 [ + + ]: 641272 : if (ip < 0)
2768 : : {
2769 : : /* Parent has no slot, so just give it the child's slot */
2770 : 490338 : lockOwners[ic].owner = parent;
2771 : 490338 : ResourceOwnerRememberLock(parent, locallock);
2772 : : }
2773 : : else
2774 : : {
2775 : : /* Merge child's count with parent's */
2776 : 150934 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2777 : : /* compact out unused slot */
2778 : 150934 : locallock->numLockOwners--;
2779 [ + + ]: 150934 : if (ic < locallock->numLockOwners)
2780 : 910 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2781 : : }
2782 : 641272 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2783 : : }
2784 : :
2785 : : /*
2786 : : * FastPathGrantRelationLock
2787 : : * Grant lock using per-backend fast-path array, if there is space.
2788 : : */
2789 : : static bool
5404 rhaas@postgresql.org 2790 : 19582809 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2791 : : {
2792 : : uint32 i;
427 tomas.vondra@postgre 2793 : 19582809 : uint32 unused_slot = FastPathLockSlotsPerBackend();
2794 : :
2795 : : /* fast-path group the lock belongs to */
591 2796 : 19582809 : uint32 group = FAST_PATH_REL_GROUP(relid);
2797 : :
2798 : : /* Scan for existing entry for this relid, remembering empty slot. */
2799 [ + + ]: 332131252 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2800 : : {
2801 : : /* index into the whole per-backend array */
2802 [ - + - + ]: 313151652 : uint32 f = FAST_PATH_SLOT(group, i);
2803 : :
5456 rhaas@postgresql.org 2804 [ - + - + : 313151652 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
+ + ]
2805 : 305672695 : unused_slot = f;
2806 [ + + ]: 7478957 : else if (MyProc->fpRelId[f] == relid)
2807 : : {
2808 [ - + - + : 603209 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
- + - + -
+ - + ]
2809 [ - + - + : 603209 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
2810 : 603209 : return true;
2811 : : }
2812 : : }
2813 : :
2814 : : /* If no existing entry, use any empty slot. */
427 tomas.vondra@postgre 2815 [ + - ]: 18979600 : if (unused_slot < FastPathLockSlotsPerBackend())
2816 : : {
5456 rhaas@postgresql.org 2817 : 18979600 : MyProc->fpRelId[unused_slot] = relid;
2818 [ - + - + : 18979600 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
- + - + -
+ ]
591 tomas.vondra@postgre 2819 : 18979600 : ++FastPathLocalUseCounts[group];
5456 rhaas@postgresql.org 2820 : 18979600 : return true;
2821 : : }
2822 : :
2823 : : /* No existing entry, and no empty slot. */
5456 rhaas@postgresql.org 2824 :UBC 0 : return false;
2825 : : }
2826 : :
2827 : : /*
2828 : : * FastPathUnGrantRelationLock
2829 : : * Release fast-path lock, if present. Update backend-private local
2830 : : * use count, while we're at it.
2831 : : */
2832 : : static bool
5404 rhaas@postgresql.org 2833 :CBC 19721733 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2834 : : {
2835 : : uint32 i;
5456 2836 : 19721733 : bool result = false;
2837 : :
2838 : : /* fast-path group the lock belongs to */
591 tomas.vondra@postgre 2839 : 19721733 : uint32 group = FAST_PATH_REL_GROUP(relid);
2840 : :
2841 : 19721733 : FastPathLocalUseCounts[group] = 0;
2842 [ + + ]: 335269461 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2843 : : {
2844 : : /* index into the whole per-backend array */
2845 [ - + - + ]: 315547728 : uint32 f = FAST_PATH_SLOT(group, i);
2846 : :
5456 rhaas@postgresql.org 2847 [ + + ]: 315547728 : if (MyProc->fpRelId[f] == relid
2848 [ - + - + : 27786212 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + - + -
+ + + ]
2849 : : {
2850 [ - + ]: 19580946 : Assert(!result);
2851 [ - + - + : 19580946 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
2852 : 19580946 : result = true;
2853 : : /* we continue iterating so as to update FastPathLocalUseCount */
2854 : : }
2855 [ - + - + : 315547728 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
+ + ]
591 tomas.vondra@postgre 2856 : 9317464 : ++FastPathLocalUseCounts[group];
2857 : : }
5456 rhaas@postgresql.org 2858 : 19721733 : return result;
2859 : : }
2860 : :
2861 : : /*
2862 : : * FastPathTransferRelationLocks
2863 : : * Transfer locks matching the given lock tag from per-backend fast-path
2864 : : * arrays to the shared hash table.
2865 : : *
2866 : : * Returns true if successful, false if ran out of shared memory.
2867 : : */
2868 : : static bool
5404 2869 : 246328 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2870 : : uint32 hashcode)
2871 : : {
4481 2872 : 246328 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
5077 bruce@momjian.us 2873 : 246328 : Oid relid = locktag->locktag_field2;
2874 : : uint32 i;
2875 : :
2876 : : /* fast-path group the lock belongs to */
417 fujii@postgresql.org 2877 : 246328 : uint32 group = FAST_PATH_REL_GROUP(relid);
2878 : :
2879 : : /*
2880 : : * Every PGPROC that can potentially hold a fast-path lock is present in
2881 : : * ProcGlobal->allProcs. Prepared transactions are not, but any
2882 : : * outstanding fast-path locks held by prepared transactions are
2883 : : * transferred to the main lock table.
2884 : : */
5456 rhaas@postgresql.org 2885 [ + + ]: 36678879 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2886 : : {
120 drowley@postgresql.o 2887 :GNC 36432551 : PGPROC *proc = GetPGProcByNumber(i);
2888 : : uint32 j;
2889 : :
2181 tgl@sss.pgh.pa.us 2890 :CBC 36432551 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2891 : :
2892 : : /*
2893 : : * If the target backend isn't referencing the same database as the
2894 : : * lock, then we needn't examine the individual relation IDs at all;
2895 : : * none of them can be relevant.
2896 : : *
2897 : : * proc->databaseId is set at backend startup time and never changes
2898 : : * thereafter, so it might be safe to perform this test before
2899 : : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2900 : : * assume that if the target backend holds any fast-path locks, it
2901 : : * must have performed a memory-fencing operation (in particular, an
2902 : : * LWLock acquisition) since setting proc->databaseId. However, it's
2903 : : * less clear that our backend is certain to have performed a memory
2904 : : * fencing operation since the other backend set proc->databaseId. So
2905 : : * for now, we test it after acquiring the LWLock just to be safe.
2906 : : *
2907 : : * Also skip groups without any registered fast-path locks.
2908 : : */
417 fujii@postgresql.org 2909 [ + + ]: 36432551 : if (proc->databaseId != locktag->locktag_field1 ||
2910 [ + + ]: 14356580 : proc->fpLockBits[group] == 0)
2911 : : {
2181 tgl@sss.pgh.pa.us 2912 : 36250668 : LWLockRelease(&proc->fpInfoLock);
5456 rhaas@postgresql.org 2913 : 36250668 : continue;
2914 : : }
2915 : :
591 tomas.vondra@postgre 2916 [ + + ]: 3090422 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2917 : : {
2918 : : uint32 lockmode;
2919 : :
2920 : : /* index into the whole per-backend array */
2921 [ - + - + ]: 2910041 : uint32 f = FAST_PATH_SLOT(group, j);
2922 : :
2923 : : /* Look for an allocated slot matching the given relid. */
5456 rhaas@postgresql.org 2924 [ + + - + : 2910041 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
- + + + ]
2925 : 2908539 : continue;
2926 : :
2927 : : /* Find or create lock object. */
2928 : 1502 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2929 : 1502 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
3240 tgl@sss.pgh.pa.us 2930 [ + + ]: 6008 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
5456 rhaas@postgresql.org 2931 : 4506 : ++lockmode)
2932 : : {
2933 : : PROCLOCK *proclock;
2934 : :
2935 [ - + - + : 4506 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
- + - + -
+ + + ]
2936 : 2947 : continue;
2937 : 1559 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2938 : : hashcode, lockmode);
2939 [ - + ]: 1559 : if (!proclock)
2940 : : {
5456 rhaas@postgresql.org 2941 :UBC 0 : LWLockRelease(partitionLock);
2181 tgl@sss.pgh.pa.us 2942 : 0 : LWLockRelease(&proc->fpInfoLock);
5456 rhaas@postgresql.org 2943 : 0 : return false;
2944 : : }
5456 rhaas@postgresql.org 2945 :CBC 1559 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2946 [ - + - + : 1559 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
- + - + -
+ ]
2947 : : }
2948 : 1502 : LWLockRelease(partitionLock);
2949 : :
2950 : : /* No need to examine remaining slots. */
4542 tgl@sss.pgh.pa.us 2951 : 1502 : break;
2952 : : }
2181 2953 : 181883 : LWLockRelease(&proc->fpInfoLock);
2954 : : }
5456 rhaas@postgresql.org 2955 : 246328 : return true;
2956 : : }
2957 : :
2958 : : /*
2959 : : * FastPathGetRelationLockEntry
2960 : : * Return the PROCLOCK for a lock originally taken via the fast-path,
2961 : : * transferring it to the primary lock table if necessary.
2962 : : *
2963 : : * Note: caller takes care of updating the locallock object.
2964 : : */
2965 : : static PROCLOCK *
5404 2966 : 320 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2967 : : {
5077 bruce@momjian.us 2968 : 320 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2969 : 320 : LOCKTAG *locktag = &locallock->tag.lock;
2970 : 320 : PROCLOCK *proclock = NULL;
4481 rhaas@postgresql.org 2971 : 320 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
5077 bruce@momjian.us 2972 : 320 : Oid relid = locktag->locktag_field2;
2973 : : uint32 i,
2974 : : group;
2975 : :
2976 : : /* fast-path group the lock belongs to */
591 tomas.vondra@postgre 2977 : 320 : group = FAST_PATH_REL_GROUP(relid);
2978 : :
2181 tgl@sss.pgh.pa.us 2979 : 320 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2980 : :
591 tomas.vondra@postgre 2981 [ + + ]: 5130 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2982 : : {
2983 : : uint32 lockmode;
2984 : :
2985 : : /* index into the whole per-backend array */
2986 [ - + - + ]: 5114 : uint32 f = FAST_PATH_SLOT(group, i);
2987 : :
2988 : : /* Look for an allocated slot matching the given relid. */
5456 rhaas@postgresql.org 2989 [ + + - + : 5114 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
- + + + ]
2990 : 4810 : continue;
2991 : :
2992 : : /* If we don't have a lock of the given mode, forget it! */
2993 : 304 : lockmode = locallock->tag.mode;
2994 [ - + - + : 304 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + - + -
+ - + ]
5456 rhaas@postgresql.org 2995 :UBC 0 : break;
2996 : :
2997 : : /* Find or create lock object. */
5456 rhaas@postgresql.org 2998 :CBC 304 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2999 : :
3000 : 304 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3001 : : locallock->hashcode, lockmode);
3002 [ - + ]: 304 : if (!proclock)
3003 : : {
4865 tgl@sss.pgh.pa.us 3004 :UBC 0 : LWLockRelease(partitionLock);
2181 3005 : 0 : LWLockRelease(&MyProc->fpInfoLock);
5456 rhaas@postgresql.org 3006 [ # # ]: 0 : ereport(ERROR,
3007 : : (errcode(ERRCODE_OUT_OF_MEMORY),
3008 : : errmsg("out of shared memory"),
3009 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3010 : : }
5456 rhaas@postgresql.org 3011 :CBC 304 : GrantLock(proclock->tag.myLock, proclock, lockmode);
3012 [ - + - + : 304 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
3013 : :
3014 : 304 : LWLockRelease(partitionLock);
3015 : :
3016 : : /* No need to examine remaining slots. */
4542 tgl@sss.pgh.pa.us 3017 : 304 : break;
3018 : : }
3019 : :
2181 3020 : 320 : LWLockRelease(&MyProc->fpInfoLock);
3021 : :
3022 : : /* Lock may have already been transferred by some other backend. */
5456 rhaas@postgresql.org 3023 [ + + ]: 320 : if (proclock == NULL)
3024 : : {
3025 : : LOCK *lock;
3026 : : PROCLOCKTAG proclocktag;
3027 : : uint32 proclock_hashcode;
3028 : :
3029 : 16 : LWLockAcquire(partitionLock, LW_SHARED);
3030 : :
3031 : 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3032 : : locktag,
3033 : : locallock->hashcode,
3034 : : HASH_FIND,
3035 : : NULL);
3036 [ - + ]: 16 : if (!lock)
5456 rhaas@postgresql.org 3037 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
3038 : :
5456 rhaas@postgresql.org 3039 :CBC 16 : proclocktag.myLock = lock;
3040 : 16 : proclocktag.myProc = MyProc;
3041 : :
3042 : 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3043 : : proclock = (PROCLOCK *)
3044 : 16 : hash_search_with_hash_value(LockMethodProcLockHash,
3045 : : &proclocktag,
3046 : : proclock_hashcode,
3047 : : HASH_FIND,
3048 : : NULL);
3049 [ - + ]: 16 : if (!proclock)
5456 rhaas@postgresql.org 3050 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
5456 rhaas@postgresql.org 3051 :CBC 16 : LWLockRelease(partitionLock);
3052 : : }
3053 : :
3054 : 320 : return proclock;
3055 : : }
3056 : :
3057 : : /*
3058 : : * GetLockConflicts
3059 : : * Get an array of VirtualTransactionIds of xacts currently holding locks
3060 : : * that would conflict with the specified lock/lockmode.
3061 : : * xacts merely awaiting such a lock are NOT reported.
3062 : : *
3063 : : * The result array is palloc'd and is terminated with an invalid VXID.
3064 : : * *countp, if not null, is updated to the number of items set.
3065 : : *
3066 : : * Of course, the result could be out of date by the time it's returned, so
3067 : : * use of this function has to be thought about carefully. Similarly, a
3068 : : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3069 : : * lock it holds. Existing callers don't care about a locker after that
3070 : : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3071 : : * pg_xact updates and before releasing locks.
3072 : : *
3073 : : * Note we never include the current xact's vxid in the result array,
3074 : : * since an xact never blocks itself.
3075 : : */
3076 : : VirtualTransactionId *
2590 alvherre@alvh.no-ip. 3077 : 1610 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3078 : : {
3079 : : static VirtualTransactionId *vxids;
7191 tgl@sss.pgh.pa.us 3080 : 1610 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3081 : : LockMethod lockMethodTable;
3082 : : LOCK *lock;
3083 : : LOCKMASK conflictMask;
3084 : : dlist_iter proclock_iter;
3085 : : PROCLOCK *proclock;
3086 : : uint32 hashcode;
3087 : : LWLock *partitionLock;
6817 3088 : 1610 : int count = 0;
5456 rhaas@postgresql.org 3089 : 1610 : int fast_count = 0;
3090 : :
7191 tgl@sss.pgh.pa.us 3091 [ + - - + ]: 1610 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7191 tgl@sss.pgh.pa.us 3092 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7191 tgl@sss.pgh.pa.us 3093 :CBC 1610 : lockMethodTable = LockMethods[lockmethodid];
3094 [ + - - + ]: 1610 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
7191 tgl@sss.pgh.pa.us 3095 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
3096 : :
3097 : : /*
3098 : : * Allocate memory to store results, and fill with InvalidVXID. We only
3099 : : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3100 : : * InHotStandby allocate once in TopMemoryContext.
3101 : : */
5940 simon@2ndQuadrant.co 3102 [ + + ]:CBC 1610 : if (InHotStandby)
3103 : : {
5941 3104 [ + + ]: 4 : if (vxids == NULL)
3105 : 1 : vxids = (VirtualTransactionId *)
5940 3106 : 1 : MemoryContextAlloc(TopMemoryContext,
3107 : : sizeof(VirtualTransactionId) *
1484 rhaas@postgresql.org 3108 : 1 : (MaxBackends + max_prepared_xacts + 1));
3109 : : }
3110 : : else
146 michael@paquier.xyz 3111 :GNC 1606 : vxids = palloc0_array(VirtualTransactionId, (MaxBackends + max_prepared_xacts + 1));
3112 : :
3113 : : /* Compute hash code and partition lock, and look up conflicting modes. */
7191 tgl@sss.pgh.pa.us 3114 :CBC 1610 : hashcode = LockTagHashCode(locktag);
3115 : 1610 : partitionLock = LockHashPartitionLock(hashcode);
5456 rhaas@postgresql.org 3116 : 1610 : conflictMask = lockMethodTable->conflictTab[lockmode];
3117 : :
3118 : : /*
3119 : : * Fast path locks might not have been entered in the primary lock table.
3120 : : * If the lock we're dealing with could conflict with such a lock, we must
3121 : : * examine each backend's fast-path array for conflicts.
3122 : : */
5088 3123 [ + - + - : 1610 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ - + - ]
3124 : : {
3125 : : int i;
5456 3126 : 1610 : Oid relid = locktag->locktag_field2;
3127 : : VirtualTransactionId vxid;
3128 : :
3129 : : /* fast-path group the lock belongs to */
417 fujii@postgresql.org 3130 : 1610 : uint32 group = FAST_PATH_REL_GROUP(relid);
3131 : :
3132 : : /*
3133 : : * Iterate over relevant PGPROCs. Anything held by a prepared
3134 : : * transaction will have been transferred to the primary lock table,
3135 : : * so we need not worry about those. This is all a bit fuzzy, because
3136 : : * new locks could be taken after we've visited a particular
3137 : : * partition, but the callers had better be prepared to deal with that
3138 : : * anyway, since the locks could equally well be taken between the
3139 : : * time we return the value and the time the caller does something
3140 : : * with it.
3141 : : */
5456 rhaas@postgresql.org 3142 [ + + ]: 253402 : for (i = 0; i < ProcGlobal->allProcCount; i++)
3143 : : {
120 drowley@postgresql.o 3144 :GNC 251792 : PGPROC *proc = GetPGProcByNumber(i);
3145 : : uint32 j;
3146 : :
3147 : : /* A backend never blocks itself */
5456 rhaas@postgresql.org 3148 [ + + ]:CBC 251792 : if (proc == MyProc)
3149 : 1610 : continue;
3150 : :
2181 tgl@sss.pgh.pa.us 3151 : 250182 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3152 : :
3153 : : /*
3154 : : * If the target backend isn't referencing the same database as
3155 : : * the lock, then we needn't examine the individual relation IDs
3156 : : * at all; none of them can be relevant.
3157 : : *
3158 : : * See FastPathTransferRelationLocks() for discussion of why we do
3159 : : * this test after acquiring the lock.
3160 : : *
3161 : : * Also skip groups without any registered fast-path locks.
3162 : : */
417 fujii@postgresql.org 3163 [ + + ]: 250182 : if (proc->databaseId != locktag->locktag_field1 ||
3164 [ + + ]: 114819 : proc->fpLockBits[group] == 0)
3165 : : {
2181 tgl@sss.pgh.pa.us 3166 : 249728 : LWLockRelease(&proc->fpInfoLock);
5456 rhaas@postgresql.org 3167 : 249728 : continue;
3168 : : }
3169 : :
591 tomas.vondra@postgre 3170 [ + + ]: 7519 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3171 : : {
3172 : : uint32 lockmask;
3173 : :
3174 : : /* index into the whole per-backend array */
3175 [ - + - + ]: 7264 : uint32 f = FAST_PATH_SLOT(group, j);
3176 : :
3177 : : /* Look for an allocated slot matching the given relid. */
5456 rhaas@postgresql.org 3178 [ + + ]: 7264 : if (relid != proc->fpRelId[f])
3179 : 7065 : continue;
3180 [ - + - + ]: 199 : lockmask = FAST_PATH_GET_BITS(proc, f);
3181 [ - + ]: 199 : if (!lockmask)
5456 rhaas@postgresql.org 3182 :UBC 0 : continue;
5456 rhaas@postgresql.org 3183 :CBC 199 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3184 : :
3185 : : /*
3186 : : * There can only be one entry per relation, so if we found it
3187 : : * and it doesn't conflict, we can skip the rest of the slots.
3188 : : */
3189 [ + + ]: 199 : if ((lockmask & conflictMask) == 0)
3190 : 5 : break;
3191 : :
3192 : : /* Conflict! */
3193 : 194 : GET_VXID_FROM_PGPROC(vxid, *proc);
3194 : :
3195 [ + + ]: 194 : if (VirtualTransactionIdIsValid(vxid))
3196 : 193 : vxids[count++] = vxid;
3197 : : /* else, xact already committed or aborted */
3198 : :
3199 : : /* No need to examine remaining slots. */
3200 : 194 : break;
3201 : : }
3202 : :
2181 tgl@sss.pgh.pa.us 3203 : 454 : LWLockRelease(&proc->fpInfoLock);
3204 : : }
3205 : : }
3206 : :
3207 : : /* Remember how many fast-path conflicts we found. */
5456 rhaas@postgresql.org 3208 : 1610 : fast_count = count;
3209 : :
3210 : : /*
3211 : : * Look up the lock object matching the tag.
3212 : : */
7191 tgl@sss.pgh.pa.us 3213 : 1610 : LWLockAcquire(partitionLock, LW_SHARED);
3214 : :
3215 : 1610 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3216 : : locktag,
3217 : : hashcode,
3218 : : HASH_FIND,
3219 : : NULL);
3220 [ + + ]: 1610 : if (!lock)
3221 : : {
3222 : : /*
3223 : : * If the lock object doesn't exist, there is nothing holding a lock
3224 : : * on this lockable object.
3225 : : */
3226 : 72 : LWLockRelease(partitionLock);
793 heikki.linnakangas@i 3227 : 72 : vxids[count].procNumber = INVALID_PROC_NUMBER;
4114 andres@anarazel.de 3228 : 72 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2590 alvherre@alvh.no-ip. 3229 [ - + ]: 72 : if (countp)
2590 alvherre@alvh.no-ip. 3230 :UBC 0 : *countp = count;
6817 tgl@sss.pgh.pa.us 3231 :CBC 72 : return vxids;
3232 : : }
3233 : :
3234 : : /*
3235 : : * Examine each existing holder (or awaiter) of the lock.
3236 : : */
1203 andres@anarazel.de 3237 [ + - + + ]: 3097 : dlist_foreach(proclock_iter, &lock->procLocks)
3238 : : {
3239 : 1559 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3240 : :
7191 tgl@sss.pgh.pa.us 3241 [ + + ]: 1559 : if (conflictMask & proclock->holdMask)
3242 : : {
7153 bruce@momjian.us 3243 : 1555 : PGPROC *proc = proclock->tag.myProc;
3244 : :
3245 : : /* A backend never blocks itself */
7191 tgl@sss.pgh.pa.us 3246 [ + + ]: 1555 : if (proc != MyProc)
3247 : : {
3248 : : VirtualTransactionId vxid;
3249 : :
6817 3250 : 21 : GET_VXID_FROM_PGPROC(vxid, *proc);
3251 : :
3252 [ + - ]: 21 : if (VirtualTransactionIdIsValid(vxid))
3253 : : {
3254 : : int i;
3255 : :
3256 : : /* Avoid duplicate entries. */
5456 rhaas@postgresql.org 3257 [ + + ]: 24 : for (i = 0; i < fast_count; ++i)
3258 [ - + - - ]: 3 : if (VirtualTransactionIdEquals(vxids[i], vxid))
5456 rhaas@postgresql.org 3259 :UBC 0 : break;
5456 rhaas@postgresql.org 3260 [ + - ]:CBC 21 : if (i >= fast_count)
3261 : 21 : vxids[count++] = vxid;
3262 : : }
3263 : : /* else, xact already committed or aborted */
3264 : : }
3265 : : }
3266 : : }
3267 : :
7191 tgl@sss.pgh.pa.us 3268 : 1538 : LWLockRelease(partitionLock);
3269 : :
1484 rhaas@postgresql.org 3270 [ - + ]: 1538 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
6817 tgl@sss.pgh.pa.us 3271 [ # # ]:UBC 0 : elog(PANIC, "too many conflicting locks found");
3272 : :
793 heikki.linnakangas@i 3273 :CBC 1538 : vxids[count].procNumber = INVALID_PROC_NUMBER;
4114 andres@anarazel.de 3274 : 1538 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2590 alvherre@alvh.no-ip. 3275 [ + + ]: 1538 : if (countp)
3276 : 1535 : *countp = count;
6817 tgl@sss.pgh.pa.us 3277 : 1538 : return vxids;
3278 : : }
3279 : :
3280 : : /*
3281 : : * Find a lock in the shared lock table and release it. It is the caller's
3282 : : * responsibility to verify that this is a sane thing to do. (For example, it
3283 : : * would be bad to release a lock here if there might still be a LOCALLOCK
3284 : : * object with pointers to it.)
3285 : : *
3286 : : * We currently use this in two situations: first, to release locks held by
3287 : : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3288 : : * to release locks taken via the fast-path, transferred to the main hash
3289 : : * table, and then released (see LockReleaseAll).
3290 : : */
3291 : : static void
5456 rhaas@postgresql.org 3292 : 2647 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3293 : : LOCKTAG *locktag, LOCKMODE lockmode,
3294 : : bool decrement_strong_lock_count)
3295 : : {
3296 : : LOCK *lock;
3297 : : PROCLOCK *proclock;
3298 : : PROCLOCKTAG proclocktag;
3299 : : uint32 hashcode;
3300 : : uint32 proclock_hashcode;
3301 : : LWLock *partitionLock;
3302 : : bool wakeupNeeded;
3303 : :
3304 : 2647 : hashcode = LockTagHashCode(locktag);
3305 : 2647 : partitionLock = LockHashPartitionLock(hashcode);
3306 : :
3307 : 2647 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3308 : :
3309 : : /*
3310 : : * Re-find the lock object (it had better be there).
3311 : : */
3312 : 2647 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3313 : : locktag,
3314 : : hashcode,
3315 : : HASH_FIND,
3316 : : NULL);
3317 [ - + ]: 2647 : if (!lock)
5456 rhaas@postgresql.org 3318 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared lock object");
3319 : :
3320 : : /*
3321 : : * Re-find the proclock object (ditto).
3322 : : */
5456 rhaas@postgresql.org 3323 :CBC 2647 : proclocktag.myLock = lock;
3324 : 2647 : proclocktag.myProc = proc;
3325 : :
3326 : 2647 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3327 : :
3328 : 2647 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3329 : : &proclocktag,
3330 : : proclock_hashcode,
3331 : : HASH_FIND,
3332 : : NULL);
3333 [ - + ]: 2647 : if (!proclock)
5456 rhaas@postgresql.org 3334 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared proclock object");
3335 : :
3336 : : /*
3337 : : * Double-check that we are actually holding a lock of the type we want to
3338 : : * release.
3339 : : */
5456 rhaas@postgresql.org 3340 [ - + ]:CBC 2647 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3341 : : {
3342 : : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
5456 rhaas@postgresql.org 3343 :UBC 0 : LWLockRelease(partitionLock);
3344 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
3345 : : lockMethodTable->lockModeNames[lockmode]);
3346 : 0 : return;
3347 : : }
3348 : :
3349 : : /*
3350 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3351 : : */
5456 rhaas@postgresql.org 3352 :CBC 2647 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3353 : :
3354 : 2647 : CleanUpLock(lock, proclock,
3355 : : lockMethodTable, hashcode,
3356 : : wakeupNeeded);
3357 : :
3358 : 2647 : LWLockRelease(partitionLock);
3359 : :
3360 : : /*
3361 : : * Decrement strong lock count. This logic is needed only for 2PC.
3362 : : */
3363 [ + + ]: 2647 : if (decrement_strong_lock_count
4303 3364 [ + - + + : 814 : && ConflictsWithRelationFastPath(locktag, lockmode))
+ - + + ]
3365 : : {
5077 bruce@momjian.us 3366 : 111 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3367 : :
5404 rhaas@postgresql.org 3368 [ - + ]: 111 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4411 3369 [ - + ]: 111 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
5404 3370 : 111 : FastPathStrongRelationLocks->count[fasthashcode]--;
3371 : 111 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3372 : : }
3373 : : }
3374 : :
3375 : : /*
3376 : : * CheckForSessionAndXactLocks
3377 : : * Check to see if transaction holds both session-level and xact-level
3378 : : * locks on the same object; if so, throw an error.
3379 : : *
3380 : : * If we have both session- and transaction-level locks on the same object,
3381 : : * PREPARE TRANSACTION must fail. This should never happen with regular
3382 : : * locks, since we only take those at session level in some special operations
3383 : : * like VACUUM. It's possible to hit this with advisory locks, though.
3384 : : *
3385 : : * It would be nice if we could keep the session hold and give away the
3386 : : * transactional hold to the prepared xact. However, that would require two
3387 : : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3388 : : * available when it comes time for PostPrepare_Locks to do the deed.
3389 : : * So for now, we error out while we can still do so safely.
3390 : : *
3391 : : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3392 : : * we can't implement this check by examining LOCALLOCK entries in isolation.
3393 : : * We must build a transient hashtable that is indexed by locktag only.
3394 : : */
3395 : : static void
1746 tgl@sss.pgh.pa.us 3396 : 299 : CheckForSessionAndXactLocks(void)
3397 : : {
3398 : : typedef struct
3399 : : {
3400 : : LOCKTAG lock; /* identifies the lockable object */
3401 : : bool sessLock; /* is any lockmode held at session level? */
3402 : : bool xactLock; /* is any lockmode held at xact level? */
3403 : : } PerLockTagEntry;
3404 : :
3405 : : HASHCTL hash_ctl;
3406 : : HTAB *lockhtab;
3407 : : HASH_SEQ_STATUS status;
3408 : : LOCALLOCK *locallock;
3409 : :
3410 : : /* Create a local hash table keyed by LOCKTAG only */
3411 : 299 : hash_ctl.keysize = sizeof(LOCKTAG);
3412 : 299 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3413 : 299 : hash_ctl.hcxt = CurrentMemoryContext;
3414 : :
3415 : 299 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3416 : : 256, /* arbitrary initial size */
3417 : : &hash_ctl,
3418 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3419 : :
3420 : : /* Scan local lock table to find entries for each LOCKTAG */
3421 : 299 : hash_seq_init(&status, LockMethodLocalHash);
3422 : :
3423 [ + + ]: 1100 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3424 : : {
3425 : 803 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3426 : : PerLockTagEntry *hentry;
3427 : : bool found;
3428 : : int i;
3429 : :
3430 : : /*
3431 : : * Ignore VXID locks. We don't want those to be held by prepared
3432 : : * transactions, since they aren't meaningful after a restart.
3433 : : */
3434 [ - + ]: 803 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
1746 tgl@sss.pgh.pa.us 3435 :UBC 0 : continue;
3436 : :
3437 : : /* Ignore it if we don't actually hold the lock */
1746 tgl@sss.pgh.pa.us 3438 [ - + ]:CBC 803 : if (locallock->nLocks <= 0)
1746 tgl@sss.pgh.pa.us 3439 :UBC 0 : continue;
3440 : :
3441 : : /* Otherwise, find or make an entry in lockhtab */
1746 tgl@sss.pgh.pa.us 3442 :CBC 803 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
1184 peter@eisentraut.org 3443 : 803 : &locallock->tag.lock,
3444 : : HASH_ENTER, &found);
1746 tgl@sss.pgh.pa.us 3445 [ + + ]: 803 : if (!found) /* initialize, if newly created */
3446 : 734 : hentry->sessLock = hentry->xactLock = false;
3447 : :
3448 : : /* Scan to see if we hold lock at session or xact level or both */
3449 [ + + ]: 1606 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3450 : : {
3451 [ + + ]: 803 : if (lockOwners[i].owner == NULL)
3452 : 10 : hentry->sessLock = true;
3453 : : else
3454 : 793 : hentry->xactLock = true;
3455 : : }
3456 : :
3457 : : /*
3458 : : * We can throw error immediately when we see both types of locks; no
3459 : : * need to wait around to see if there are more violations.
3460 : : */
3461 [ + + + + ]: 803 : if (hentry->sessLock && hentry->xactLock)
3462 [ + - ]: 2 : ereport(ERROR,
3463 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3464 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3465 : : }
3466 : :
3467 : : /* Success, so clean up */
3468 : 297 : hash_destroy(lockhtab);
3469 : 297 : }
3470 : :
3471 : : /*
3472 : : * AtPrepare_Locks
3473 : : * Do the preparatory work for a PREPARE: make 2PC state file records
3474 : : * for all locks currently held.
3475 : : *
3476 : : * Session-level locks are ignored, as are VXID locks.
3477 : : *
3478 : : * For the most part, we don't need to touch shared memory for this ---
3479 : : * all the necessary state information is in the locallock table.
3480 : : * Fast-path locks are an exception, however: we move any such locks to
3481 : : * the main table before allowing PREPARE TRANSACTION to succeed.
3482 : : */
3483 : : void
7627 3484 : 299 : AtPrepare_Locks(void)
3485 : : {
3486 : : HASH_SEQ_STATUS status;
3487 : : LOCALLOCK *locallock;
3488 : :
3489 : : /* First, verify there aren't locks of both xact and session level */
1746 3490 : 299 : CheckForSessionAndXactLocks();
3491 : :
3492 : : /* Now do the per-locallock cleanup work */
7452 3493 : 297 : hash_seq_init(&status, LockMethodLocalHash);
3494 : :
7627 3495 [ + + ]: 1094 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3496 : : {
3497 : : TwoPhaseLockRecord record;
3498 : 797 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3499 : : bool haveSessionLock;
3500 : : bool haveXactLock;
3501 : : int i;
3502 : :
3503 : : /*
3504 : : * Ignore VXID locks. We don't want those to be held by prepared
3505 : : * transactions, since they aren't meaningful after a restart.
3506 : : */
6817 3507 [ - + ]: 797 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3508 : 8 : continue;
3509 : :
3510 : : /* Ignore it if we don't actually hold the lock */
7627 3511 [ - + ]: 797 : if (locallock->nLocks <= 0)
7627 tgl@sss.pgh.pa.us 3512 :UBC 0 : continue;
3513 : :
3514 : : /* Scan to see whether we hold it at session or transaction level */
5114 tgl@sss.pgh.pa.us 3515 :CBC 797 : haveSessionLock = haveXactLock = false;
7627 3516 [ + + ]: 1594 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3517 : : {
3518 [ + + ]: 797 : if (lockOwners[i].owner == NULL)
5114 3519 : 8 : haveSessionLock = true;
3520 : : else
3521 : 789 : haveXactLock = true;
3522 : : }
3523 : :
3524 : : /* Ignore it if we have only session lock */
3525 [ + + ]: 797 : if (!haveXactLock)
3526 : 8 : continue;
3527 : :
3528 : : /* This can't happen, because we already checked it */
3529 [ - + ]: 789 : if (haveSessionLock)
5114 tgl@sss.pgh.pa.us 3530 [ # # ]:UBC 0 : ereport(ERROR,
3531 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3532 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3533 : :
3534 : : /*
3535 : : * If the local lock was taken via the fast-path, we need to move it
3536 : : * to the primary lock table, or just get a pointer to the existing
3537 : : * primary lock table entry if by chance it's already been
3538 : : * transferred.
3539 : : */
5456 rhaas@postgresql.org 3540 [ + + ]:CBC 789 : if (locallock->proclock == NULL)
3541 : : {
5404 3542 : 320 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
5456 3543 : 320 : locallock->lock = locallock->proclock->tag.myLock;
3544 : : }
3545 : :
3546 : : /*
3547 : : * Arrange to not release any strong lock count held by this lock
3548 : : * entry. We must retain the count until the prepared transaction is
3549 : : * committed or rolled back.
3550 : : */
3184 peter_e@gmx.net 3551 : 789 : locallock->holdsStrongLockCount = false;
3552 : :
3553 : : /*
3554 : : * Create a 2PC record.
3555 : : */
7627 tgl@sss.pgh.pa.us 3556 : 789 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3557 : 789 : record.lockmode = locallock->tag.mode;
3558 : :
3559 : 789 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3560 : : &record, sizeof(TwoPhaseLockRecord));
3561 : : }
3562 : 297 : }
3563 : :
3564 : : /*
3565 : : * PostPrepare_Locks
3566 : : * Clean up after successful PREPARE
3567 : : *
3568 : : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3569 : : * that's now associated with the prepared transaction, and we want to
3570 : : * clean out the corresponding entries in the LOCALLOCK table.
3571 : : *
3572 : : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3573 : : * pointers in the transaction's resource owner. This is OK at the
3574 : : * moment since resowner.c doesn't try to free locks retail at a toplevel
3575 : : * transaction commit or abort. We could alternatively zero out nLocks
3576 : : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3577 : : * but that probably costs more cycles.
3578 : : */
3579 : : void
302 michael@paquier.xyz 3580 :GNC 297 : PostPrepare_Locks(FullTransactionId fxid)
3581 : : {
3582 : 297 : PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3583 : : HASH_SEQ_STATUS status;
3584 : : LOCALLOCK *locallock;
3585 : : LOCK *lock;
3586 : : PROCLOCK *proclock;
3587 : : PROCLOCKTAG proclocktag;
3588 : : int partition;
3589 : :
3590 : : /* Can't prepare a lock group follower. */
3740 rhaas@postgresql.org 3591 [ - + - - ]:CBC 297 : Assert(MyProc->lockGroupLeader == NULL ||
3592 : : MyProc->lockGroupLeader == MyProc);
3593 : :
3594 : : /* This is a critical section: any error means big trouble */
7627 tgl@sss.pgh.pa.us 3595 : 297 : START_CRIT_SECTION();
3596 : :
3597 : : /*
3598 : : * First we run through the locallock table and get rid of unwanted
3599 : : * entries, then we scan the process's proclocks and transfer them to the
3600 : : * target proc.
3601 : : *
3602 : : * We do this separately because we may have multiple locallock entries
3603 : : * pointing to the same proclock, and we daren't end up with any dangling
3604 : : * pointers.
3605 : : */
7452 3606 : 297 : hash_seq_init(&status, LockMethodLocalHash);
3607 : :
7627 3608 [ + + ]: 1094 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3609 : : {
5114 3610 : 797 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3611 : : bool haveSessionLock;
3612 : : bool haveXactLock;
3613 : : int i;
3614 : :
7627 3615 [ + - - + ]: 797 : if (locallock->proclock == NULL || locallock->lock == NULL)
3616 : : {
3617 : : /*
3618 : : * We must've run out of shared memory while trying to set up this
3619 : : * lock. Just forget the local entry.
3620 : : */
7627 tgl@sss.pgh.pa.us 3621 [ # # ]:UBC 0 : Assert(locallock->nLocks == 0);
3622 : 0 : RemoveLocalLock(locallock);
3623 : 0 : continue;
3624 : : }
3625 : :
3626 : : /* Ignore VXID locks */
6817 tgl@sss.pgh.pa.us 3627 [ - + ]:CBC 797 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
6817 tgl@sss.pgh.pa.us 3628 :UBC 0 : continue;
3629 : :
3630 : : /* Scan to see whether we hold it at session or transaction level */
5114 tgl@sss.pgh.pa.us 3631 :CBC 797 : haveSessionLock = haveXactLock = false;
3632 [ + + ]: 1594 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3633 : : {
3634 [ + + ]: 797 : if (lockOwners[i].owner == NULL)
3635 : 8 : haveSessionLock = true;
3636 : : else
3637 : 789 : haveXactLock = true;
3638 : : }
3639 : :
3640 : : /* Ignore it if we have only session lock */
3641 [ + + ]: 797 : if (!haveXactLock)
3642 : 8 : continue;
3643 : :
3644 : : /* This can't happen, because we already checked it */
3645 [ - + ]: 789 : if (haveSessionLock)
5114 tgl@sss.pgh.pa.us 3646 [ # # ]:UBC 0 : ereport(PANIC,
3647 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3648 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3649 : :
3650 : : /* Mark the proclock to show we need to release this lockmode */
7627 tgl@sss.pgh.pa.us 3651 [ + - ]:CBC 789 : if (locallock->nLocks > 0)
3652 : 789 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3653 : :
3654 : : /* And remove the locallock hashtable entry */
3655 : 789 : RemoveLocalLock(locallock);
3656 : : }
3657 : :
3658 : : /*
3659 : : * Now, scan each lock partition separately.
3660 : : */
7450 3661 [ + + ]: 5049 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3662 : : {
3663 : : LWLock *partitionLock;
1203 andres@anarazel.de 3664 : 4752 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3665 : : dlist_mutable_iter proclock_iter;
3666 : :
4481 rhaas@postgresql.org 3667 : 4752 : partitionLock = LockHashPartitionLockByIndex(partition);
3668 : :
3669 : : /*
3670 : : * If the proclock list for this partition is empty, we can skip
3671 : : * acquiring the partition lock. This optimization is safer than the
3672 : : * situation in LockReleaseAll, because we got rid of any fast-path
3673 : : * locks during AtPrepare_Locks, so there cannot be any case where
3674 : : * another backend is adding something to our lists now. For safety,
3675 : : * though, we code this the same way as in LockReleaseAll.
3676 : : */
1203 andres@anarazel.de 3677 [ + + ]: 4752 : if (dlist_is_empty(procLocks))
7450 tgl@sss.pgh.pa.us 3678 : 4055 : continue; /* needn't examine this partition */
3679 : :
3680 : 697 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3681 : :
1203 andres@anarazel.de 3682 [ + - + + ]: 1450 : dlist_foreach_modify(proclock_iter, procLocks)
3683 : : {
3684 : 753 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3685 : :
7226 tgl@sss.pgh.pa.us 3686 [ - + ]: 753 : Assert(proclock->tag.myProc == MyProc);
3687 : :
3688 : 753 : lock = proclock->tag.myLock;
3689 : :
3690 : : /* Ignore VXID locks */
6817 3691 [ + + ]: 753 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
4541 3692 : 23 : continue;
3693 : :
3694 : : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3695 : : LOCK_PRINT("PostPrepare_Locks", lock, 0);
7450 3696 [ - + ]: 730 : Assert(lock->nRequested >= 0);
3697 [ - + ]: 730 : Assert(lock->nGranted >= 0);
3698 [ - + ]: 730 : Assert(lock->nGranted <= lock->nRequested);
3699 [ - + ]: 730 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3700 : :
3701 : : /* Ignore it if nothing to release (must be a session lock) */
5114 3702 [ + + ]: 730 : if (proclock->releaseMask == 0)
4541 3703 : 8 : continue;
3704 : :
3705 : : /* Else we should be releasing all locks */
7450 3706 [ - + ]: 722 : if (proclock->releaseMask != proclock->holdMask)
7450 tgl@sss.pgh.pa.us 3707 [ # # ]:UBC 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3708 : :
3709 : : /*
3710 : : * We cannot simply modify proclock->tag.myProc to reassign
3711 : : * ownership of the lock, because that's part of the hash key and
3712 : : * the proclock would then be in the wrong hash chain. Instead
3713 : : * use hash_update_hash_key. (We used to create a new hash entry,
3714 : : * but that risks out-of-memory failure if other processes are
3715 : : * busy making proclocks too.) We must unlink the proclock from
3716 : : * our procLink chain and put it into the new proc's chain, too.
3717 : : *
3718 : : * Note: the updated proclock hash key will still belong to the
3719 : : * same hash partition, cf proclock_hash(). So the partition lock
3720 : : * we already hold is sufficient for this.
3721 : : */
1203 andres@anarazel.de 3722 :CBC 722 : dlist_delete(&proclock->procLink);
3723 : :
3724 : : /*
3725 : : * Create the new hash key for the proclock.
3726 : : */
7226 tgl@sss.pgh.pa.us 3727 : 722 : proclocktag.myLock = lock;
3728 : 722 : proclocktag.myProc = newproc;
3729 : :
3730 : : /*
3731 : : * Update groupLeader pointer to point to the new proc. (We'd
3732 : : * better not be a member of somebody else's lock group!)
3733 : : */
3740 rhaas@postgresql.org 3734 [ - + ]: 722 : Assert(proclock->groupLeader == proclock->tag.myProc);
3735 : 722 : proclock->groupLeader = newproc;
3736 : :
3737 : : /*
3738 : : * Update the proclock. We should not find any existing entry for
3739 : : * the same hash key, since there can be only one entry for any
3740 : : * given lock with my own proc.
3741 : : */
4860 tgl@sss.pgh.pa.us 3742 [ - + ]: 722 : if (!hash_update_hash_key(LockMethodProcLockHash,
3743 : : proclock,
3744 : : &proclocktag))
4860 tgl@sss.pgh.pa.us 3745 [ # # ]:UBC 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3746 : :
3747 : : /* Re-link into the new proc's proclock list */
1203 andres@anarazel.de 3748 :CBC 722 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3749 : :
3750 : : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3751 : : } /* loop over PROCLOCKs within this partition */
3752 : :
7450 tgl@sss.pgh.pa.us 3753 : 697 : LWLockRelease(partitionLock);
3754 : : } /* loop over partitions */
3755 : :
7627 3756 [ - + ]: 297 : END_CRIT_SECTION();
3757 : 297 : }
3758 : :
3759 : :
3760 : : /*
3761 : : * GetLockStatusData - Return a summary of the lock manager's internal
3762 : : * status, for use in a user-level reporting function.
3763 : : *
3764 : : * The return data consists of an array of LockInstanceData objects,
3765 : : * which are a lightly abstracted version of the PROCLOCK data structures,
3766 : : * i.e. there is one entry for each unique lock and interested PGPROC.
3767 : : * It is the caller's responsibility to match up related items (such as
3768 : : * references to the same lockable object or PGPROC) if wanted.
3769 : : *
3770 : : * The design goal is to hold the LWLocks for as short a time as possible;
3771 : : * thus, this function simply makes a copy of the necessary data and releases
3772 : : * the locks, allowing the caller to contemplate and format the data for as
3773 : : * long as it pleases.
3774 : : */
3775 : : LockData *
8648 3776 : 302 : GetLockStatusData(void)
3777 : : {
3778 : : LockData *data;
3779 : : PROCLOCK *proclock;
3780 : : HASH_SEQ_STATUS seqstat;
3781 : : int els;
3782 : : int el;
3783 : : int i;
3784 : :
146 michael@paquier.xyz 3785 :GNC 302 : data = palloc_object(LockData);
3786 : :
3787 : : /* Guess how much space we'll need. */
1484 rhaas@postgresql.org 3788 :CBC 302 : els = MaxBackends;
5456 3789 : 302 : el = 0;
146 michael@paquier.xyz 3790 :GNC 302 : data->locks = palloc_array(LockInstanceData, els);
3791 : :
3792 : : /*
3793 : : * First, we iterate through the per-backend fast-path arrays, locking
3794 : : * them one at a time. This might produce an inconsistent picture of the
3795 : : * system state, but taking all of those LWLocks at the same time seems
3796 : : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3797 : : * matter too much, because none of these locks can be involved in lock
3798 : : * conflicts anyway - anything that might must be present in the main lock
3799 : : * table. (For the same reason, we don't sweat about making leaderPid
3800 : : * completely valid. We cannot safely dereference another backend's
3801 : : * lockGroupLeader field without holding all lock partition locks, and
3802 : : * it's not worth that.)
3803 : : */
5456 rhaas@postgresql.org 3804 [ + + ]:CBC 45077 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3805 : : {
120 drowley@postgresql.o 3806 :GNC 44775 : PGPROC *proc = GetPGProcByNumber(i);
3807 : :
3808 : : /* Skip backends with pid=0, as they don't hold fast-path locks */
557 fujii@postgresql.org 3809 [ + + ]:CBC 44775 : if (proc->pid == 0)
3810 : 40128 : continue;
3811 : :
2181 tgl@sss.pgh.pa.us 3812 : 4647 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3813 : :
557 fujii@postgresql.org 3814 [ + + ]: 41823 : for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3815 : : {
3816 : : /* Skip groups without registered fast-path locks */
3817 [ + + ]: 37176 : if (proc->fpLockBits[g] == 0)
5456 rhaas@postgresql.org 3818 : 32506 : continue;
3819 : :
557 fujii@postgresql.org 3820 [ + + ]: 79390 : for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3821 : : {
3822 : : LockInstanceData *instance;
3823 [ - + - + ]: 74720 : uint32 f = FAST_PATH_SLOT(g, j);
3824 [ - + - + ]: 74720 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3825 : :
3826 : : /* Skip unallocated slots */
3827 [ + + ]: 74720 : if (!lockbits)
3828 : 68254 : continue;
3829 : :
3830 [ + + ]: 6466 : if (el >= els)
3831 : : {
3832 : 19 : els += MaxBackends;
3833 : 19 : data->locks = (LockInstanceData *)
3834 : 19 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3835 : : }
3836 : :
3837 : 6466 : instance = &data->locks[el];
3838 : 6466 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3839 : : proc->fpRelId[f]);
3840 : 6466 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3841 : 6466 : instance->waitLockMode = NoLock;
3842 : 6466 : instance->vxid.procNumber = proc->vxid.procNumber;
3843 : 6466 : instance->vxid.localTransactionId = proc->vxid.lxid;
3844 : 6466 : instance->pid = proc->pid;
3845 : 6466 : instance->leaderPid = proc->pid;
3846 : 6466 : instance->fastpath = true;
3847 : :
3848 : : /*
3849 : : * Successfully taking fast path lock means there were no
3850 : : * conflicting locks.
3851 : : */
3852 : 6466 : instance->waitStart = 0;
3853 : :
3854 : 6466 : el++;
3855 : : }
3856 : : }
3857 : :
5388 rhaas@postgresql.org 3858 [ + + ]: 4647 : if (proc->fpVXIDLock)
3859 : : {
3860 : : VirtualTransactionId vxid;
3861 : : LockInstanceData *instance;
3862 : :
3863 [ + + ]: 1886 : if (el >= els)
3864 : : {
1484 3865 : 8 : els += MaxBackends;
5388 3866 : 8 : data->locks = (LockInstanceData *)
3867 : 8 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3868 : : }
3869 : :
793 heikki.linnakangas@i 3870 : 1886 : vxid.procNumber = proc->vxid.procNumber;
5388 rhaas@postgresql.org 3871 : 1886 : vxid.localTransactionId = proc->fpLocalTransactionId;
3872 : :
3873 : 1886 : instance = &data->locks[el];
3874 : 1886 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3875 : 1886 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3876 : 1886 : instance->waitLockMode = NoLock;
793 heikki.linnakangas@i 3877 : 1886 : instance->vxid.procNumber = proc->vxid.procNumber;
3878 : 1886 : instance->vxid.localTransactionId = proc->vxid.lxid;
5388 rhaas@postgresql.org 3879 : 1886 : instance->pid = proc->pid;
3725 tgl@sss.pgh.pa.us 3880 : 1886 : instance->leaderPid = proc->pid;
5388 rhaas@postgresql.org 3881 : 1886 : instance->fastpath = true;
1905 fujii@postgresql.org 3882 : 1886 : instance->waitStart = 0;
3883 : :
5388 rhaas@postgresql.org 3884 : 1886 : el++;
3885 : : }
3886 : :
2181 tgl@sss.pgh.pa.us 3887 : 4647 : LWLockRelease(&proc->fpInfoLock);
3888 : : }
3889 : :
3890 : : /*
3891 : : * Next, acquire lock on the entire shared lock data structure. We do
3892 : : * this so that, at least for locks in the primary lock table, the state
3893 : : * will be self-consistent.
3894 : : *
3895 : : * Since this is a read-only operation, we take shared instead of
3896 : : * exclusive lock. There's not a whole lot of point to this, because all
3897 : : * the normal operations require exclusive lock, but it doesn't hurt
3898 : : * anything either. It will at least allow two backends to do
3899 : : * GetLockStatusData in parallel.
3900 : : *
3901 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3902 : : */
7450 3903 [ + + ]: 5134 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4481 rhaas@postgresql.org 3904 : 4832 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3905 : :
3906 : : /* Now we can safely count the number of proclocks */
5456 3907 : 302 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3908 [ + + ]: 302 : if (data->nelements > els)
3909 : : {
3910 : 20 : els = data->nelements;
3911 : 20 : data->locks = (LockInstanceData *)
3912 : 20 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3913 : : }
3914 : :
3915 : : /* Now scan the tables to copy the data */
7226 tgl@sss.pgh.pa.us 3916 : 302 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3917 : :
3918 [ + + ]: 4412 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3919 : : {
3920 : 4110 : PGPROC *proc = proclock->tag.myProc;
3921 : 4110 : LOCK *lock = proclock->tag.myLock;
5077 bruce@momjian.us 3922 : 4110 : LockInstanceData *instance = &data->locks[el];
3923 : :
5456 rhaas@postgresql.org 3924 : 4110 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3925 : 4110 : instance->holdMask = proclock->holdMask;
3926 [ + + ]: 4110 : if (proc->waitLock == proclock->tag.myLock)
3927 : 10 : instance->waitLockMode = proc->waitLockMode;
3928 : : else
3929 : 4100 : instance->waitLockMode = NoLock;
793 heikki.linnakangas@i 3930 : 4110 : instance->vxid.procNumber = proc->vxid.procNumber;
3931 : 4110 : instance->vxid.localTransactionId = proc->vxid.lxid;
5456 rhaas@postgresql.org 3932 : 4110 : instance->pid = proc->pid;
3725 tgl@sss.pgh.pa.us 3933 : 4110 : instance->leaderPid = proclock->groupLeader->pid;
5456 rhaas@postgresql.org 3934 : 4110 : instance->fastpath = false;
1905 fujii@postgresql.org 3935 : 4110 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3936 : :
7226 tgl@sss.pgh.pa.us 3937 : 4110 : el++;
3938 : : }
3939 : :
3940 : : /*
3941 : : * And release locks. We do this in reverse order for two reasons: (1)
3942 : : * Anyone else who needs more than one of the locks will be trying to lock
3943 : : * them in increasing order; we don't want to release the other process
3944 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
3945 : : * behavior inside LWLockRelease.
3946 : : */
7153 bruce@momjian.us 3947 [ + + ]: 5134 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4481 rhaas@postgresql.org 3948 : 4832 : LWLockRelease(LockHashPartitionLockByIndex(i));
3949 : :
7450 tgl@sss.pgh.pa.us 3950 [ - + ]: 302 : Assert(el == data->nelements);
3951 : :
8648 3952 : 302 : return data;
3953 : : }
3954 : :
3955 : : /*
3956 : : * GetBlockerStatusData - Return a summary of the lock manager's state
3957 : : * concerning locks that are blocking the specified PID or any member of
3958 : : * the PID's lock group, for use in a user-level reporting function.
3959 : : *
3960 : : * For each PID within the lock group that is awaiting some heavyweight lock,
3961 : : * the return data includes an array of LockInstanceData objects, which are
3962 : : * the same data structure used by GetLockStatusData; but unlike that function,
3963 : : * this one reports only the PROCLOCKs associated with the lock that that PID
3964 : : * is blocked on. (Hence, all the locktags should be the same for any one
3965 : : * blocked PID.) In addition, we return an array of the PIDs of those backends
3966 : : * that are ahead of the blocked PID in the lock's wait queue. These can be
3967 : : * compared with the PIDs in the LockInstanceData objects to determine which
3968 : : * waiters are ahead of or behind the blocked PID in the queue.
3969 : : *
3970 : : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3971 : : * waiting on any heavyweight lock, return empty arrays.
3972 : : *
3973 : : * The design goal is to hold the LWLocks for as short a time as possible;
3974 : : * thus, this function simply makes a copy of the necessary data and releases
3975 : : * the locks, allowing the caller to contemplate and format the data for as
3976 : : * long as it pleases.
3977 : : */
3978 : : BlockedProcsData *
3725 3979 : 2078 : GetBlockerStatusData(int blocked_pid)
3980 : : {
3981 : : BlockedProcsData *data;
3982 : : PGPROC *proc;
3983 : : int i;
3984 : :
146 michael@paquier.xyz 3985 :GNC 2078 : data = palloc_object(BlockedProcsData);
3986 : :
3987 : : /*
3988 : : * Guess how much space we'll need, and preallocate. Most of the time
3989 : : * this will avoid needing to do repalloc while holding the LWLocks. (We
3990 : : * assume, but check with an Assert, that MaxBackends is enough entries
3991 : : * for the procs[] array; the other two could need enlargement, though.)
3992 : : */
3725 tgl@sss.pgh.pa.us 3993 :CBC 2078 : data->nprocs = data->nlocks = data->npids = 0;
1484 rhaas@postgresql.org 3994 : 2078 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
146 michael@paquier.xyz 3995 :GNC 2078 : data->procs = palloc_array(BlockedProcData, data->maxprocs);
3996 : 2078 : data->locks = palloc_array(LockInstanceData, data->maxlocks);
3997 : 2078 : data->waiter_pids = palloc_array(int, data->maxpids);
3998 : :
3999 : : /*
4000 : : * In order to search the ProcArray for blocked_pid and assume that that
4001 : : * entry won't immediately disappear under us, we must hold ProcArrayLock.
4002 : : * In addition, to examine the lock grouping fields of any other backend,
4003 : : * we must hold all the hash partition locks. (Only one of those locks is
4004 : : * actually relevant for any one lock group, but we can't know which one
4005 : : * ahead of time.) It's fairly annoying to hold all those locks
4006 : : * throughout this, but it's no worse than GetLockStatusData(), and it
4007 : : * does have the advantage that we're guaranteed to return a
4008 : : * self-consistent instantaneous state.
4009 : : */
3725 tgl@sss.pgh.pa.us 4010 :CBC 2078 : LWLockAcquire(ProcArrayLock, LW_SHARED);
4011 : :
4012 : 2078 : proc = BackendPidGetProcWithLock(blocked_pid);
4013 : :
4014 : : /* Nothing to do if it's gone */
4015 [ + - ]: 2078 : if (proc != NULL)
4016 : : {
4017 : : /*
4018 : : * Acquire lock on the entire shared lock data structure. See notes
4019 : : * in GetLockStatusData().
4020 : : */
4021 [ + + ]: 35326 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4022 : 33248 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4023 : :
4024 [ + + ]: 2078 : if (proc->lockGroupLeader == NULL)
4025 : : {
4026 : : /* Easy case, proc is not a lock group member */
4027 : 1834 : GetSingleProcBlockerStatusData(proc, data);
4028 : : }
4029 : : else
4030 : : {
4031 : : /* Examine all procs in proc's lock group */
4032 : : dlist_iter iter;
4033 : :
4034 [ + - + + ]: 582 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
4035 : : {
4036 : : PGPROC *memberProc;
4037 : :
4038 : 338 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4039 : 338 : GetSingleProcBlockerStatusData(memberProc, data);
4040 : : }
4041 : : }
4042 : :
4043 : : /*
4044 : : * And release locks. See notes in GetLockStatusData().
4045 : : */
4046 [ + + ]: 35326 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4047 : 33248 : LWLockRelease(LockHashPartitionLockByIndex(i));
4048 : :
4049 [ - + ]: 2078 : Assert(data->nprocs <= data->maxprocs);
4050 : : }
4051 : :
4052 : 2078 : LWLockRelease(ProcArrayLock);
4053 : :
4054 : 2078 : return data;
4055 : : }
4056 : :
4057 : : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4058 : : static void
4059 : 2172 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
4060 : : {
4061 : 2172 : LOCK *theLock = blocked_proc->waitLock;
4062 : : BlockedProcData *bproc;
4063 : : dlist_iter proclock_iter;
4064 : : dlist_iter proc_iter;
4065 : : dclist_head *waitQueue;
4066 : : int queue_size;
4067 : :
4068 : : /* Nothing to do if this proc is not blocked */
4069 [ + + ]: 2172 : if (theLock == NULL)
4070 : 911 : return;
4071 : :
4072 : : /* Set up a procs[] element */
4073 : 1261 : bproc = &data->procs[data->nprocs++];
4074 : 1261 : bproc->pid = blocked_proc->pid;
4075 : 1261 : bproc->first_lock = data->nlocks;
4076 : 1261 : bproc->first_waiter = data->npids;
4077 : :
4078 : : /*
4079 : : * We may ignore the proc's fast-path arrays, since nothing in those could
4080 : : * be related to a contended lock.
4081 : : */
4082 : :
4083 : : /* Collect all PROCLOCKs associated with theLock */
1203 andres@anarazel.de 4084 [ + - + + ]: 3834 : dlist_foreach(proclock_iter, &theLock->procLocks)
4085 : : {
4086 : 2573 : PROCLOCK *proclock =
4087 : 2573 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3725 tgl@sss.pgh.pa.us 4088 : 2573 : PGPROC *proc = proclock->tag.myProc;
4089 : 2573 : LOCK *lock = proclock->tag.myLock;
4090 : : LockInstanceData *instance;
4091 : :
4092 [ - + ]: 2573 : if (data->nlocks >= data->maxlocks)
4093 : : {
1484 rhaas@postgresql.org 4094 :UBC 0 : data->maxlocks += MaxBackends;
3725 tgl@sss.pgh.pa.us 4095 : 0 : data->locks = (LockInstanceData *)
4096 : 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4097 : : }
4098 : :
3725 tgl@sss.pgh.pa.us 4099 :CBC 2573 : instance = &data->locks[data->nlocks];
4100 : 2573 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4101 : 2573 : instance->holdMask = proclock->holdMask;
4102 [ + + ]: 2573 : if (proc->waitLock == lock)
4103 : 1307 : instance->waitLockMode = proc->waitLockMode;
4104 : : else
4105 : 1266 : instance->waitLockMode = NoLock;
793 heikki.linnakangas@i 4106 : 2573 : instance->vxid.procNumber = proc->vxid.procNumber;
4107 : 2573 : instance->vxid.localTransactionId = proc->vxid.lxid;
3725 tgl@sss.pgh.pa.us 4108 : 2573 : instance->pid = proc->pid;
4109 : 2573 : instance->leaderPid = proclock->groupLeader->pid;
4110 : 2573 : instance->fastpath = false;
4111 : 2573 : data->nlocks++;
4112 : : }
4113 : :
4114 : : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4115 : 1261 : waitQueue = &(theLock->waitProcs);
1203 andres@anarazel.de 4116 : 1261 : queue_size = dclist_count(waitQueue);
4117 : :
3725 tgl@sss.pgh.pa.us 4118 [ - + ]: 1261 : if (queue_size > data->maxpids - data->npids)
4119 : : {
1484 rhaas@postgresql.org 4120 :UBC 0 : data->maxpids = Max(data->maxpids + MaxBackends,
4121 : : data->npids + queue_size);
3725 tgl@sss.pgh.pa.us 4122 : 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
4123 : 0 : sizeof(int) * data->maxpids);
4124 : : }
4125 : :
4126 : : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
1203 andres@anarazel.de 4127 [ + - + - ]:CBC 1284 : dclist_foreach(proc_iter, waitQueue)
4128 : : {
74 heikki.linnakangas@i 4129 :GNC 1284 : PGPROC *queued_proc = dlist_container(PGPROC, waitLink, proc_iter.cur);
4130 : :
1308 drowley@postgresql.o 4131 [ + + ]:CBC 1284 : if (queued_proc == blocked_proc)
3725 tgl@sss.pgh.pa.us 4132 : 1261 : break;
1308 drowley@postgresql.o 4133 : 23 : data->waiter_pids[data->npids++] = queued_proc->pid;
4134 : : }
4135 : :
3725 tgl@sss.pgh.pa.us 4136 : 1261 : bproc->num_locks = data->nlocks - bproc->first_lock;
4137 : 1261 : bproc->num_waiters = data->npids - bproc->first_waiter;
4138 : : }
4139 : :
4140 : : /*
4141 : : * Returns a list of currently held AccessExclusiveLocks, for use by
4142 : : * LogStandbySnapshot(). The result is a palloc'd array,
4143 : : * with the number of elements returned into *nlocks.
4144 : : *
4145 : : * XXX This currently takes a lock on all partitions of the lock table,
4146 : : * but it's possible to do better. By reference counting locks and storing
4147 : : * the value in the ProcArray entry for each backend we could tell if any
4148 : : * locks need recording without having to acquire the partition locks and
4149 : : * scan the lock table. Whether that's worth the additional overhead
4150 : : * is pretty dubious though.
4151 : : */
4152 : : xl_standby_lock *
5981 simon@2ndQuadrant.co 4153 : 1537 : GetRunningTransactionLocks(int *nlocks)
4154 : : {
4155 : : xl_standby_lock *accessExclusiveLocks;
4156 : : PROCLOCK *proclock;
4157 : : HASH_SEQ_STATUS seqstat;
4158 : : int i;
4159 : : int index;
4160 : : int els;
4161 : :
4162 : : /*
4163 : : * Acquire lock on the entire shared lock data structure.
4164 : : *
4165 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4166 : : */
4167 [ + + ]: 26129 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4481 rhaas@postgresql.org 4168 : 24592 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4169 : :
4170 : : /* Now we can safely count the number of proclocks */
5981 simon@2ndQuadrant.co 4171 : 1537 : els = hash_get_num_entries(LockMethodProcLockHash);
4172 : :
4173 : : /*
4174 : : * Allocating enough space for all locks in the lock table is overkill,
4175 : : * but it's more convenient and faster than having to enlarge the array.
4176 : : */
4177 : 1537 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4178 : :
4179 : : /* Now scan the tables to copy the data */
4865 tgl@sss.pgh.pa.us 4180 : 1537 : hash_seq_init(&seqstat, LockMethodProcLockHash);
4181 : :
4182 : : /*
4183 : : * If lock is a currently granted AccessExclusiveLock then it will have
4184 : : * just one proclock holder, so locks are never accessed twice in this
4185 : : * particular case. Don't copy this code for use elsewhere because in the
4186 : : * general case this will give you duplicate locks when looking at
4187 : : * non-exclusive lock types.
4188 : : */
5981 simon@2ndQuadrant.co 4189 : 1537 : index = 0;
4190 [ + + ]: 8001 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4191 : : {
4192 : : /* make sure this definition matches the one used in LockAcquire */
4193 [ + + ]: 6464 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4194 [ + + ]: 3929 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4195 : : {
5912 bruce@momjian.us 4196 : 2205 : PGPROC *proc = proclock->tag.myProc;
4197 : 2205 : LOCK *lock = proclock->tag.myLock;
2090 andres@anarazel.de 4198 : 2205 : TransactionId xid = proc->xid;
4199 : :
4200 : : /*
4201 : : * Don't record locks for transactions if we know they have
4202 : : * already issued their WAL record for commit but not yet released
4203 : : * lock. It is still possible that we see locks held by already
4204 : : * complete transactions, if they haven't yet zeroed their xids.
4205 : : */
5216 simon@2ndQuadrant.co 4206 [ + + ]: 2205 : if (!TransactionIdIsValid(xid))
4207 : 4 : continue;
4208 : :
4209 : 2201 : accessExclusiveLocks[index].xid = xid;
5912 bruce@momjian.us 4210 : 2201 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
5981 simon@2ndQuadrant.co 4211 : 2201 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4212 : :
4213 : 2201 : index++;
4214 : : }
4215 : : }
4216 : :
4718 tgl@sss.pgh.pa.us 4217 [ - + ]: 1537 : Assert(index <= els);
4218 : :
4219 : : /*
4220 : : * And release locks. We do this in reverse order for two reasons: (1)
4221 : : * Anyone else who needs more than one of the locks will be trying to lock
4222 : : * them in increasing order; we don't want to release the other process
4223 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
4224 : : * behavior inside LWLockRelease.
4225 : : */
5981 simon@2ndQuadrant.co 4226 [ + + ]: 26129 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4481 rhaas@postgresql.org 4227 : 24592 : LWLockRelease(LockHashPartitionLockByIndex(i));
4228 : :
5981 simon@2ndQuadrant.co 4229 : 1537 : *nlocks = index;
4230 : 1537 : return accessExclusiveLocks;
4231 : : }
4232 : :
4233 : : /* Provide the textual name of any lock mode */
4234 : : const char *
7452 tgl@sss.pgh.pa.us 4235 : 14413 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4236 : : {
4237 [ + - - + ]: 14413 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4238 [ + - - + ]: 14413 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4239 : 14413 : return LockMethods[lockmethodid]->lockModeNames[mode];
4240 : : }
4241 : :
4242 : : #ifdef LOCK_DEBUG
4243 : : /*
4244 : : * Dump all locks in the given proc's myProcLocks lists.
4245 : : *
4246 : : * Caller is responsible for having acquired appropriate LWLocks.
4247 : : */
4248 : : void
4249 : : DumpLocks(PGPROC *proc)
4250 : : {
4251 : : int i;
4252 : :
4253 : : if (proc == NULL)
4254 : : return;
4255 : :
4256 : : if (proc->waitLock)
4257 : : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4258 : :
4259 : : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4260 : : {
4261 : : dlist_head *procLocks = &proc->myProcLocks[i];
4262 : : dlist_iter iter;
4263 : :
4264 : : dlist_foreach(iter, procLocks)
4265 : : {
4266 : : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4267 : : LOCK *lock = proclock->tag.myLock;
4268 : :
4269 : : Assert(proclock->tag.myProc == proc);
4270 : : PROCLOCK_PRINT("DumpLocks", proclock);
4271 : : LOCK_PRINT("DumpLocks", lock, 0);
4272 : : }
4273 : : }
4274 : : }
4275 : :
4276 : : /*
4277 : : * Dump all lmgr locks.
4278 : : *
4279 : : * Caller is responsible for having acquired appropriate LWLocks.
4280 : : */
4281 : : void
4282 : : DumpAllLocks(void)
4283 : : {
4284 : : PGPROC *proc;
4285 : : PROCLOCK *proclock;
4286 : : LOCK *lock;
4287 : : HASH_SEQ_STATUS status;
4288 : :
4289 : : proc = MyProc;
4290 : :
4291 : : if (proc && proc->waitLock)
4292 : : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4293 : :
4294 : : hash_seq_init(&status, LockMethodProcLockHash);
4295 : :
4296 : : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4297 : : {
4298 : : PROCLOCK_PRINT("DumpAllLocks", proclock);
4299 : :
4300 : : lock = proclock->tag.myLock;
4301 : : if (lock)
4302 : : LOCK_PRINT("DumpAllLocks", lock, 0);
4303 : : else
4304 : : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4305 : : }
4306 : : }
4307 : : #endif /* LOCK_DEBUG */
4308 : :
4309 : : /*
4310 : : * LOCK 2PC resource manager's routines
4311 : : */
4312 : :
4313 : : /*
4314 : : * Re-acquire a lock belonging to a transaction that was prepared.
4315 : : *
4316 : : * Because this function is run at db startup, re-acquiring the locks should
4317 : : * never conflict with running transactions because there are none. We
4318 : : * assume that the lock state represented by the stored 2PC files is legal.
4319 : : *
4320 : : * When switching from Hot Standby mode to normal operation, the locks will
4321 : : * be already held by the startup process. The locks are acquired for the new
4322 : : * procs without checking for conflicts, so we don't get a conflict between the
4323 : : * startup process and the dummy procs, even though we will momentarily have
4324 : : * a situation where two procs are holding the same AccessExclusiveLock,
4325 : : * which isn't normally possible because the conflict. If we're in standby
4326 : : * mode, but a recovery snapshot hasn't been established yet, it's possible
4327 : : * that some but not all of the locks are already held by the startup process.
4328 : : *
4329 : : * This approach is simple, but also a bit dangerous, because if there isn't
4330 : : * enough shared memory to acquire the locks, an error will be thrown, which
4331 : : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4332 : : * A safer approach would be to transfer the locks like we do in
4333 : : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4334 : : * read-only backends to use up all the shared lock memory anyway, so that
4335 : : * replaying the WAL record that needs to acquire a lock will throw an error
4336 : : * and PANIC anyway.
4337 : : */
4338 : : void
302 michael@paquier.xyz 4339 :GNC 90 : lock_twophase_recover(FullTransactionId fxid, uint16 info,
4340 : : void *recdata, uint32 len)
4341 : : {
7627 tgl@sss.pgh.pa.us 4342 :CBC 90 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
302 michael@paquier.xyz 4343 :GNC 90 : PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4344 : : LOCKTAG *locktag;
4345 : : LOCKMODE lockmode;
4346 : : LOCKMETHODID lockmethodid;
4347 : : LOCK *lock;
4348 : : PROCLOCK *proclock;
4349 : : PROCLOCKTAG proclocktag;
4350 : : bool found;
4351 : : uint32 hashcode;
4352 : : uint32 proclock_hashcode;
4353 : : int partition;
4354 : : LWLock *partitionLock;
4355 : : LockMethod lockMethodTable;
4356 : :
7627 tgl@sss.pgh.pa.us 4357 [ - + ]:CBC 90 : Assert(len == sizeof(TwoPhaseLockRecord));
4358 : 90 : locktag = &rec->locktag;
4359 : 90 : lockmode = rec->lockmode;
4360 : 90 : lockmethodid = locktag->locktag_lockmethodid;
4361 : :
7452 4362 [ + - - + ]: 90 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7627 tgl@sss.pgh.pa.us 4363 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7452 tgl@sss.pgh.pa.us 4364 :CBC 90 : lockMethodTable = LockMethods[lockmethodid];
4365 : :
7226 4366 : 90 : hashcode = LockTagHashCode(locktag);
4367 : 90 : partition = LockHashPartition(hashcode);
4368 : 90 : partitionLock = LockHashPartitionLock(hashcode);
4369 : :
7450 4370 : 90 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4371 : :
4372 : : /*
4373 : : * Find or create a lock with this tag.
4374 : : */
7226 4375 : 90 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4376 : : locktag,
4377 : : hashcode,
4378 : : HASH_ENTER_NULL,
4379 : : &found);
7627 4380 [ - + ]: 90 : if (!lock)
4381 : : {
7450 tgl@sss.pgh.pa.us 4382 :UBC 0 : LWLockRelease(partitionLock);
7627 4383 [ # # ]: 0 : ereport(ERROR,
4384 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4385 : : errmsg("out of shared memory"),
4386 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4387 : : }
4388 : :
4389 : : /*
4390 : : * if it's a new lock object, initialize it
4391 : : */
7627 tgl@sss.pgh.pa.us 4392 [ + + ]:CBC 90 : if (!found)
4393 : : {
4394 : 78 : lock->grantMask = 0;
4395 : 78 : lock->waitMask = 0;
1203 andres@anarazel.de 4396 : 78 : dlist_init(&lock->procLocks);
4397 : 78 : dclist_init(&lock->waitProcs);
7627 tgl@sss.pgh.pa.us 4398 : 78 : lock->nRequested = 0;
4399 : 78 : lock->nGranted = 0;
4400 [ + - + - : 468 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
4401 [ - + - - : 78 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
4402 : : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4403 : : }
4404 : : else
4405 : : {
4406 : : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4407 [ + - - + ]: 12 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4408 [ + - - + ]: 12 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4409 [ - + ]: 12 : Assert(lock->nGranted <= lock->nRequested);
4410 : : }
4411 : :
4412 : : /*
4413 : : * Create the hash key for the proclock table.
4414 : : */
7226 4415 : 90 : proclocktag.myLock = lock;
4416 : 90 : proclocktag.myProc = proc;
4417 : :
4418 : 90 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4419 : :
4420 : : /*
4421 : : * Find or create a proclock entry with this tag
4422 : : */
4423 : 90 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4424 : : &proclocktag,
4425 : : proclock_hashcode,
4426 : : HASH_ENTER_NULL,
4427 : : &found);
7627 4428 [ - + ]: 90 : if (!proclock)
4429 : : {
4430 : : /* Oops, not enough shmem for the proclock */
7627 tgl@sss.pgh.pa.us 4431 [ # # ]:UBC 0 : if (lock->nRequested == 0)
4432 : : {
4433 : : /*
4434 : : * There are no other requestors of this lock, so garbage-collect
4435 : : * the lock object. We *must* do this to avoid a permanent leak
4436 : : * of shared memory, because there won't be anything to cause
4437 : : * anyone to release the lock object later.
4438 : : */
1203 andres@anarazel.de 4439 [ # # ]: 0 : Assert(dlist_is_empty(&lock->procLocks));
7226 tgl@sss.pgh.pa.us 4440 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1184 peter@eisentraut.org 4441 : 0 : &(lock->tag),
4442 : : hashcode,
4443 : : HASH_REMOVE,
4444 : : NULL))
7627 tgl@sss.pgh.pa.us 4445 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
4446 : : }
7450 4447 : 0 : LWLockRelease(partitionLock);
7627 4448 [ # # ]: 0 : ereport(ERROR,
4449 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4450 : : errmsg("out of shared memory"),
4451 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4452 : : }
4453 : :
4454 : : /*
4455 : : * If new, initialize the new entry
4456 : : */
7627 tgl@sss.pgh.pa.us 4457 [ + + ]:CBC 90 : if (!found)
4458 : : {
3740 rhaas@postgresql.org 4459 [ - + ]: 82 : Assert(proc->lockGroupLeader == NULL);
4460 : 82 : proclock->groupLeader = proc;
7627 tgl@sss.pgh.pa.us 4461 : 82 : proclock->holdMask = 0;
4462 : 82 : proclock->releaseMask = 0;
4463 : : /* Add proclock to appropriate lists */
1203 andres@anarazel.de 4464 : 82 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4465 : 82 : dlist_push_tail(&proc->myProcLocks[partition],
4466 : : &proclock->procLink);
4467 : : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4468 : : }
4469 : : else
4470 : : {
4471 : : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
7627 tgl@sss.pgh.pa.us 4472 [ - + ]: 8 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4473 : : }
4474 : :
4475 : : /*
4476 : : * lock->nRequested and lock->requested[] count the total number of
4477 : : * requests, whether granted or waiting, so increment those immediately.
4478 : : */
4479 : 90 : lock->nRequested++;
4480 : 90 : lock->requested[lockmode]++;
4481 [ + - - + ]: 90 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4482 : :
4483 : : /*
4484 : : * We shouldn't already hold the desired lock.
4485 : : */
4486 [ - + ]: 90 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
7627 tgl@sss.pgh.pa.us 4487 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4488 : : lockMethodTable->lockModeNames[lockmode],
4489 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
4490 : : lock->tag.locktag_field3);
4491 : :
4492 : : /*
4493 : : * We ignore any possible conflicts and just grant ourselves the lock. Not
4494 : : * only because we don't bother, but also to avoid deadlocks when
4495 : : * switching from standby to normal mode. See function comment.
4496 : : */
7627 tgl@sss.pgh.pa.us 4497 :CBC 90 : GrantLock(lock, proclock, lockmode);
4498 : :
4499 : : /*
4500 : : * Bump strong lock count, to make sure any fast-path lock requests won't
4501 : : * be granted without consulting the primary lock table.
4502 : : */
5088 rhaas@postgresql.org 4503 [ + - + + : 90 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
+ - + + ]
4504 : : {
5077 bruce@momjian.us 4505 : 18 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4506 : :
5404 rhaas@postgresql.org 4507 [ - + ]: 18 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4508 : 18 : FastPathStrongRelationLocks->count[fasthashcode]++;
4509 : 18 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4510 : : }
4511 : :
7450 tgl@sss.pgh.pa.us 4512 : 90 : LWLockRelease(partitionLock);
7627 4513 : 90 : }
4514 : :
4515 : : /*
4516 : : * Re-acquire a lock belonging to a transaction that was prepared, when
4517 : : * starting up into hot standby mode.
4518 : : */
4519 : : void
302 michael@paquier.xyz 4520 :UNC 0 : lock_twophase_standby_recover(FullTransactionId fxid, uint16 info,
4521 : : void *recdata, uint32 len)
4522 : : {
5981 simon@2ndQuadrant.co 4523 :UBC 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4524 : : LOCKTAG *locktag;
4525 : : LOCKMODE lockmode;
4526 : : LOCKMETHODID lockmethodid;
4527 : :
4528 [ # # ]: 0 : Assert(len == sizeof(TwoPhaseLockRecord));
4529 : 0 : locktag = &rec->locktag;
4530 : 0 : lockmode = rec->lockmode;
4531 : 0 : lockmethodid = locktag->locktag_lockmethodid;
4532 : :
4533 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4534 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4535 : :
4536 [ # # ]: 0 : if (lockmode == AccessExclusiveLock &&
4537 [ # # ]: 0 : locktag->locktag_type == LOCKTAG_RELATION)
4538 : : {
302 michael@paquier.xyz 4539 :UNC 0 : StandbyAcquireAccessExclusiveLock(XidFromFullTransactionId(fxid),
4540 : : locktag->locktag_field1 /* dboid */ ,
4541 : : locktag->locktag_field2 /* reloid */ );
4542 : : }
5981 simon@2ndQuadrant.co 4543 :UBC 0 : }
4544 : :
4545 : :
4546 : : /*
4547 : : * 2PC processing routine for COMMIT PREPARED case.
4548 : : *
4549 : : * Find and release the lock indicated by the 2PC record.
4550 : : */
4551 : : void
302 michael@paquier.xyz 4552 :GNC 814 : lock_twophase_postcommit(FullTransactionId fxid, uint16 info,
4553 : : void *recdata, uint32 len)
4554 : : {
7627 tgl@sss.pgh.pa.us 4555 :CBC 814 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
302 michael@paquier.xyz 4556 :GNC 814 : PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4557 : : LOCKTAG *locktag;
4558 : : LOCKMETHODID lockmethodid;
4559 : : LockMethod lockMethodTable;
4560 : :
7627 tgl@sss.pgh.pa.us 4561 [ - + ]:CBC 814 : Assert(len == sizeof(TwoPhaseLockRecord));
4562 : 814 : locktag = &rec->locktag;
4563 : 814 : lockmethodid = locktag->locktag_lockmethodid;
4564 : :
7452 4565 [ + - - + ]: 814 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7627 tgl@sss.pgh.pa.us 4566 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7452 tgl@sss.pgh.pa.us 4567 :CBC 814 : lockMethodTable = LockMethods[lockmethodid];
4568 : :
5456 rhaas@postgresql.org 4569 : 814 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
7627 tgl@sss.pgh.pa.us 4570 : 814 : }
4571 : :
4572 : : /*
4573 : : * 2PC processing routine for ROLLBACK PREPARED case.
4574 : : *
4575 : : * This is actually just the same as the COMMIT case.
4576 : : */
4577 : : void
302 michael@paquier.xyz 4578 :GNC 168 : lock_twophase_postabort(FullTransactionId fxid, uint16 info,
4579 : : void *recdata, uint32 len)
4580 : : {
4581 : 168 : lock_twophase_postcommit(fxid, info, recdata, len);
7627 tgl@sss.pgh.pa.us 4582 :CBC 168 : }
4583 : :
4584 : : /*
4585 : : * VirtualXactLockTableInsert
4586 : : *
4587 : : * Take vxid lock via the fast-path. There can't be any pre-existing
4588 : : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4589 : : *
4590 : : * Since MyProc->fpLocalTransactionId will normally contain the same data
4591 : : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4592 : : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4593 : : * examined by procarray.c, while fpLocalTransactionId is protected by
4594 : : * fpInfoLock and is used only by the locking subsystem. Doing it this
4595 : : * way makes it easier to verify that there are no funny race conditions.
4596 : : *
4597 : : * We don't bother recording this lock in the local lock table, since it's
4598 : : * only ever released at the end of a transaction. Instead,
4599 : : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4600 : : */
4601 : : void
5388 rhaas@postgresql.org 4602 : 423432 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4603 : : {
4604 [ - + ]: 423432 : Assert(VirtualTransactionIdIsValid(vxid));
4605 : :
2181 tgl@sss.pgh.pa.us 4606 : 423432 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4607 : :
793 heikki.linnakangas@i 4608 [ - + ]: 423432 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
5388 rhaas@postgresql.org 4609 [ - + ]: 423432 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4610 [ - + ]: 423432 : Assert(MyProc->fpVXIDLock == false);
4611 : :
4612 : 423432 : MyProc->fpVXIDLock = true;
4613 : 423432 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4614 : :
2181 tgl@sss.pgh.pa.us 4615 : 423432 : LWLockRelease(&MyProc->fpInfoLock);
5388 rhaas@postgresql.org 4616 : 423432 : }
4617 : :
4618 : : /*
4619 : : * VirtualXactLockTableCleanup
4620 : : *
4621 : : * Check whether a VXID lock has been materialized; if so, release it,
4622 : : * unblocking waiters.
4623 : : */
4624 : : void
4541 tgl@sss.pgh.pa.us 4625 : 423954 : VirtualXactLockTableCleanup(void)
4626 : : {
4627 : : bool fastpath;
4628 : : LocalTransactionId lxid;
4629 : :
793 heikki.linnakangas@i 4630 [ - + ]: 423954 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4631 : :
4632 : : /*
4633 : : * Clean up shared memory state.
4634 : : */
2181 tgl@sss.pgh.pa.us 4635 : 423954 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4636 : :
5388 rhaas@postgresql.org 4637 : 423954 : fastpath = MyProc->fpVXIDLock;
4638 : 423954 : lxid = MyProc->fpLocalTransactionId;
4639 : 423954 : MyProc->fpVXIDLock = false;
4640 : 423954 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4641 : :
2181 tgl@sss.pgh.pa.us 4642 : 423954 : LWLockRelease(&MyProc->fpInfoLock);
4643 : :
4644 : : /*
4645 : : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4646 : : * that means someone transferred the lock to the main lock table.
4647 : : */
5388 rhaas@postgresql.org 4648 [ + + + + ]: 423954 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4649 : : {
4650 : : VirtualTransactionId vxid;
4651 : : LOCKTAG locktag;
4652 : :
793 heikki.linnakangas@i 4653 : 300 : vxid.procNumber = MyProcNumber;
5388 rhaas@postgresql.org 4654 : 300 : vxid.localTransactionId = lxid;
4655 : 300 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4656 : :
4657 : 300 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4658 : : &locktag, ExclusiveLock, false);
4659 : : }
4660 : 423954 : }
4661 : :
4662 : : /*
4663 : : * XactLockForVirtualXact
4664 : : *
4665 : : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4666 : : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4667 : : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4668 : : * prepared, committed, or aborted.
4669 : : *
4670 : : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4671 : : * known as "vxid" before its PREPARE TRANSACTION.
4672 : : */
4673 : : static bool
1655 noah@leadboat.com 4674 : 320 : XactLockForVirtualXact(VirtualTransactionId vxid,
4675 : : TransactionId xid, bool wait)
4676 : : {
4677 : 320 : bool more = false;
4678 : :
4679 : : /* There is no point to wait for 2PCs if you have no 2PCs. */
4680 [ + + ]: 320 : if (max_prepared_xacts == 0)
4681 : 134 : return true;
4682 : :
4683 : : do
4684 : : {
4685 : : LockAcquireResult lar;
4686 : : LOCKTAG tag;
4687 : :
4688 : : /* Clear state from previous iterations. */
4689 [ - + ]: 186 : if (more)
4690 : : {
1655 noah@leadboat.com 4691 :UBC 0 : xid = InvalidTransactionId;
4692 : 0 : more = false;
4693 : : }
4694 : :
4695 : : /* If we have no xid, try to find one. */
1655 noah@leadboat.com 4696 [ + + ]:CBC 186 : if (!TransactionIdIsValid(xid))
4697 : 87 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4698 [ + + ]: 186 : if (!TransactionIdIsValid(xid))
4699 : : {
4700 [ - + ]: 70 : Assert(!more);
4701 : 70 : return true;
4702 : : }
4703 : :
4704 : : /* Check or wait for XID completion. */
4705 : 116 : SET_LOCKTAG_TRANSACTION(tag, xid);
4706 : 116 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4707 [ - + ]: 116 : if (lar == LOCKACQUIRE_NOT_AVAIL)
1655 noah@leadboat.com 4708 :UBC 0 : return false;
1655 noah@leadboat.com 4709 :CBC 116 : LockRelease(&tag, ShareLock, false);
4710 [ - + ]: 116 : } while (more);
4711 : :
4712 : 116 : return true;
4713 : : }
4714 : :
4715 : : /*
4716 : : * VirtualXactLock
4717 : : *
4718 : : * If wait = true, wait as long as the given VXID or any XID acquired by the
4719 : : * same transaction is still running. Then, return true.
4720 : : *
4721 : : * If wait = false, just check whether that VXID or one of those XIDs is still
4722 : : * running, and return true or false.
4723 : : */
4724 : : bool
5388 rhaas@postgresql.org 4725 : 373 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4726 : : {
4727 : : LOCKTAG tag;
4728 : : PGPROC *proc;
1655 noah@leadboat.com 4729 : 373 : TransactionId xid = InvalidTransactionId;
4730 : :
5388 rhaas@postgresql.org 4731 [ - + ]: 373 : Assert(VirtualTransactionIdIsValid(vxid));
4732 : :
1655 noah@leadboat.com 4733 [ + + ]: 373 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4734 : : /* no vxid lock; localTransactionId is a normal, locked XID */
4735 : 1 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4736 : :
5388 rhaas@postgresql.org 4737 : 372 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4738 : :
4739 : : /*
4740 : : * If a lock table entry must be made, this is the PGPROC on whose behalf
4741 : : * it must be done. Note that the transaction might end or the PGPROC
4742 : : * might be reassigned to a new backend before we get around to examining
4743 : : * it, but it doesn't matter. If we find upon examination that the
4744 : : * relevant lxid is no longer running here, that's enough to prove that
4745 : : * it's no longer running anywhere.
4746 : : */
793 heikki.linnakangas@i 4747 : 372 : proc = ProcNumberGetProc(vxid.procNumber);
5320 rhaas@postgresql.org 4748 [ + + ]: 372 : if (proc == NULL)
1655 noah@leadboat.com 4749 : 4 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4750 : :
4751 : : /*
4752 : : * We must acquire this lock before checking the procNumber and lxid
4753 : : * against the ones we're waiting for. The target backend will only set
4754 : : * or clear lxid while holding this lock.
4755 : : */
2181 tgl@sss.pgh.pa.us 4756 : 368 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4757 : :
793 heikki.linnakangas@i 4758 [ + - ]: 368 : if (proc->vxid.procNumber != vxid.procNumber
5388 rhaas@postgresql.org 4759 [ + + ]: 368 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4760 : : {
4761 : : /* VXID ended */
2181 tgl@sss.pgh.pa.us 4762 : 34 : LWLockRelease(&proc->fpInfoLock);
1655 noah@leadboat.com 4763 : 34 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4764 : : }
4765 : :
4766 : : /*
4767 : : * If we aren't asked to wait, there's no need to set up a lock table
4768 : : * entry. The transaction is still in progress, so just return false.
4769 : : */
5388 rhaas@postgresql.org 4770 [ + + ]: 334 : if (!wait)
4771 : : {
2181 tgl@sss.pgh.pa.us 4772 : 28 : LWLockRelease(&proc->fpInfoLock);
5388 rhaas@postgresql.org 4773 : 28 : return false;
4774 : : }
4775 : :
4776 : : /*
4777 : : * OK, we're going to need to sleep on the VXID. But first, we must set
4778 : : * up the primary lock table entry, if needed (ie, convert the proc's
4779 : : * fast-path lock on its VXID to a regular lock).
4780 : : */
4781 [ + + ]: 306 : if (proc->fpVXIDLock)
4782 : : {
4783 : : PROCLOCK *proclock;
4784 : : uint32 hashcode;
4785 : : LWLock *partitionLock;
4786 : :
4787 : 300 : hashcode = LockTagHashCode(&tag);
4788 : :
4865 tgl@sss.pgh.pa.us 4789 : 300 : partitionLock = LockHashPartitionLock(hashcode);
4790 : 300 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4791 : :
5388 rhaas@postgresql.org 4792 : 300 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4793 : : &tag, hashcode, ExclusiveLock);
4794 [ - + ]: 300 : if (!proclock)
4795 : : {
4865 tgl@sss.pgh.pa.us 4796 :UBC 0 : LWLockRelease(partitionLock);
2181 4797 : 0 : LWLockRelease(&proc->fpInfoLock);
5388 rhaas@postgresql.org 4798 [ # # ]: 0 : ereport(ERROR,
4799 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4800 : : errmsg("out of shared memory"),
4801 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4802 : : }
5388 rhaas@postgresql.org 4803 :CBC 300 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4804 : :
4865 tgl@sss.pgh.pa.us 4805 : 300 : LWLockRelease(partitionLock);
4806 : :
5388 rhaas@postgresql.org 4807 : 300 : proc->fpVXIDLock = false;
4808 : : }
4809 : :
4810 : : /*
4811 : : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4812 : : * search. The proc might have assigned this XID but not yet locked it,
4813 : : * in which case the proc will lock this XID before releasing the VXID.
4814 : : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4815 : : * so we won't save an XID of a different VXID. It doesn't matter whether
4816 : : * we save this before or after setting up the primary lock table entry.
4817 : : */
1655 noah@leadboat.com 4818 : 306 : xid = proc->xid;
4819 : :
4820 : : /* Done with proc->fpLockBits */
2181 tgl@sss.pgh.pa.us 4821 : 306 : LWLockRelease(&proc->fpInfoLock);
4822 : :
4823 : : /* Time to wait. */
5388 rhaas@postgresql.org 4824 : 306 : (void) LockAcquire(&tag, ShareLock, false, false);
4825 : :
4826 : 281 : LockRelease(&tag, ShareLock, false);
1655 noah@leadboat.com 4827 : 281 : return XactLockForVirtualXact(vxid, xid, wait);
4828 : : }
4829 : :
4830 : : /*
4831 : : * LockWaiterCount
4832 : : *
4833 : : * Find the number of lock requester on this locktag
4834 : : */
4835 : : int
3679 rhaas@postgresql.org 4836 : 92877 : LockWaiterCount(const LOCKTAG *locktag)
4837 : : {
4838 : 92877 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4839 : : LOCK *lock;
4840 : : bool found;
4841 : : uint32 hashcode;
4842 : : LWLock *partitionLock;
4843 : 92877 : int waiters = 0;
4844 : :
4845 [ + - - + ]: 92877 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3679 rhaas@postgresql.org 4846 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4847 : :
3679 rhaas@postgresql.org 4848 :CBC 92877 : hashcode = LockTagHashCode(locktag);
4849 : 92877 : partitionLock = LockHashPartitionLock(hashcode);
4850 : 92877 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4851 : :
4852 : 92877 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4853 : : locktag,
4854 : : hashcode,
4855 : : HASH_FIND,
4856 : : &found);
4857 [ + + ]: 92877 : if (found)
4858 : : {
4859 [ - + ]: 21 : Assert(lock != NULL);
4860 : 21 : waiters = lock->nRequested;
4861 : : }
4862 : 92877 : LWLockRelease(partitionLock);
4863 : :
4864 : 92877 : return waiters;
4865 : : }
|