Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * lock.c
4 : : * POSTGRES primary lock mechanism
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/lock.c
12 : : *
13 : : * NOTES
14 : : * A lock table is a shared memory hash table. When
15 : : * a process tries to acquire a lock of a type that conflicts
16 : : * with existing locks, it is put to sleep using the routines
17 : : * in storage/lmgr/proc.c.
18 : : *
19 : : * For the most part, this code should be invoked via lmgr.c
20 : : * or another lock-management module, not directly.
21 : : *
22 : : * Interface:
23 : : *
24 : : * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : : * LockCheckConflicts(), GrantLock()
27 : : *
28 : : *-------------------------------------------------------------------------
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : :
35 : : #include "access/transam.h"
36 : : #include "access/twophase.h"
37 : : #include "access/twophase_rmgr.h"
38 : : #include "access/xlog.h"
39 : : #include "access/xlogutils.h"
40 : : #include "miscadmin.h"
41 : : #include "pg_trace.h"
42 : : #include "storage/lmgr.h"
43 : : #include "storage/proc.h"
44 : : #include "storage/procarray.h"
45 : : #include "storage/spin.h"
46 : : #include "storage/standby.h"
47 : : #include "utils/memutils.h"
48 : : #include "utils/ps_status.h"
49 : : #include "utils/resowner.h"
50 : :
51 : :
52 : : /* GUC variables */
53 : : int max_locks_per_xact; /* used to set the lock table size */
54 : : bool log_lock_failures = false;
55 : :
56 : : #define NLOCKENTS() \
57 : : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58 : :
59 : :
60 : : /*
61 : : * Data structures defining the semantics of the standard lock methods.
62 : : *
63 : : * The conflict table defines the semantics of the various lock modes.
64 : : */
65 : : static const LOCKMASK LockConflicts[] = {
66 : : 0,
67 : :
68 : : /* AccessShareLock */
69 : : LOCKBIT_ON(AccessExclusiveLock),
70 : :
71 : : /* RowShareLock */
72 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
73 : :
74 : : /* RowExclusiveLock */
75 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
76 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
77 : :
78 : : /* ShareUpdateExclusiveLock */
79 : : LOCKBIT_ON(ShareUpdateExclusiveLock) |
80 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
81 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
82 : :
83 : : /* ShareLock */
84 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
85 : : LOCKBIT_ON(ShareRowExclusiveLock) |
86 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
87 : :
88 : : /* ShareRowExclusiveLock */
89 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
90 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
91 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
92 : :
93 : : /* ExclusiveLock */
94 : : LOCKBIT_ON(RowShareLock) |
95 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
96 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
97 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
98 : :
99 : : /* AccessExclusiveLock */
100 : : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
101 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
102 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
103 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
104 : :
105 : : };
106 : :
107 : : /* Names of lock modes, for debug printouts */
108 : : static const char *const lock_mode_names[] =
109 : : {
110 : : "INVALID",
111 : : "AccessShareLock",
112 : : "RowShareLock",
113 : : "RowExclusiveLock",
114 : : "ShareUpdateExclusiveLock",
115 : : "ShareLock",
116 : : "ShareRowExclusiveLock",
117 : : "ExclusiveLock",
118 : : "AccessExclusiveLock"
119 : : };
120 : :
121 : : #ifndef LOCK_DEBUG
122 : : static bool Dummy_trace = false;
123 : : #endif
124 : :
125 : : static const LockMethodData default_lockmethod = {
126 : : MaxLockMode,
127 : : LockConflicts,
128 : : lock_mode_names,
129 : : #ifdef LOCK_DEBUG
130 : : &Trace_locks
131 : : #else
132 : : &Dummy_trace
133 : : #endif
134 : : };
135 : :
136 : : static const LockMethodData user_lockmethod = {
137 : : MaxLockMode,
138 : : LockConflicts,
139 : : lock_mode_names,
140 : : #ifdef LOCK_DEBUG
141 : : &Trace_userlocks
142 : : #else
143 : : &Dummy_trace
144 : : #endif
145 : : };
146 : :
147 : : /*
148 : : * map from lock method id to the lock table data structures
149 : : */
150 : : static const LockMethod LockMethods[] = {
151 : : NULL,
152 : : &default_lockmethod,
153 : : &user_lockmethod
154 : : };
155 : :
156 : :
157 : : /* Record that's written to 2PC state file when a lock is persisted */
158 : : typedef struct TwoPhaseLockRecord
159 : : {
160 : : LOCKTAG locktag;
161 : : LOCKMODE lockmode;
162 : : } TwoPhaseLockRecord;
163 : :
164 : :
165 : : /*
166 : : * Count of the number of fast path lock slots we believe to be used. This
167 : : * might be higher than the real number if another backend has transferred
168 : : * our locks to the primary lock table, but it can never be lower than the
169 : : * real value, since only we can acquire locks on our own behalf.
170 : : *
171 : : * XXX Allocate a static array of the maximum size. We could use a pointer
172 : : * and then allocate just the right size to save a couple kB, but then we
173 : : * would have to initialize that, while for the static array that happens
174 : : * automatically. Doesn't seem worth the extra complexity.
175 : : */
176 : : static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX];
177 : :
178 : : /*
179 : : * Flag to indicate if the relation extension lock is held by this backend.
180 : : * This flag is used to ensure that while holding the relation extension lock
181 : : * we don't try to acquire a heavyweight lock on any other object. This
182 : : * restriction implies that the relation extension lock won't ever participate
183 : : * in the deadlock cycle because we can never wait for any other heavyweight
184 : : * lock after acquiring this lock.
185 : : *
186 : : * Such a restriction is okay for relation extension locks as unlike other
187 : : * heavyweight locks these are not held till the transaction end. These are
188 : : * taken for a short duration to extend a particular relation and then
189 : : * released.
190 : : */
191 : : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
192 : :
193 : : /*
194 : : * Number of fast-path locks per backend - size of the arrays in PGPROC.
195 : : * This is set only once during start, before initializing shared memory,
196 : : * and remains constant after that.
197 : : *
198 : : * We set the limit based on max_locks_per_transaction GUC, because that's
199 : : * the best information about expected number of locks per backend we have.
200 : : * See InitializeFastPathLocks() for details.
201 : : */
202 : : int FastPathLockGroupsPerBackend = 0;
203 : :
204 : : /*
205 : : * Macros to calculate the fast-path group and index for a relation.
206 : : *
207 : : * The formula is a simple hash function, designed to spread the OIDs a bit,
208 : : * so that even contiguous values end up in different groups. In most cases
209 : : * there will be gaps anyway, but the multiplication should help a bit.
210 : : *
211 : : * The selected constant (49157) is a prime not too close to 2^k, and it's
212 : : * small enough to not cause overflows (in 64-bit).
213 : : *
214 : : * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
215 : : * InitializeFastPathLocks().
216 : : */
217 : : #define FAST_PATH_REL_GROUP(rel) \
218 : : (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
219 : :
220 : : /*
221 : : * Given the group/slot indexes, calculate the slot index in the whole array
222 : : * of fast-path lock slots.
223 : : */
224 : : #define FAST_PATH_SLOT(group, index) \
225 : : (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
226 : : AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
227 : : ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
228 : :
229 : : /*
230 : : * Given a slot index (into the whole per-backend array), calculated using
231 : : * the FAST_PATH_SLOT macro, split it into group and index (in the group).
232 : : */
233 : : #define FAST_PATH_GROUP(index) \
234 : : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
235 : : ((index) / FP_LOCK_SLOTS_PER_GROUP))
236 : : #define FAST_PATH_INDEX(index) \
237 : : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 : : ((index) % FP_LOCK_SLOTS_PER_GROUP))
239 : :
240 : : /* Macros for manipulating proc->fpLockBits */
241 : : #define FAST_PATH_BITS_PER_SLOT 3
242 : : #define FAST_PATH_LOCKNUMBER_OFFSET 1
243 : : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
244 : : #define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
245 : : #define FAST_PATH_GET_BITS(proc, n) \
246 : : ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
247 : : #define FAST_PATH_BIT_POSITION(n, l) \
248 : : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
249 : : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
250 : : AssertMacro((n) < FastPathLockSlotsPerBackend()), \
251 : : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
252 : : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
253 : : FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
254 : : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
255 : : FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
256 : : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
257 : : (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
258 : :
259 : : /*
260 : : * The fast-path lock mechanism is concerned only with relation locks on
261 : : * unshared relations by backends bound to a database. The fast-path
262 : : * mechanism exists mostly to accelerate acquisition and release of locks
263 : : * that rarely conflict. Because ShareUpdateExclusiveLock is
264 : : * self-conflicting, it can't use the fast-path mechanism; but it also does
265 : : * not conflict with any of the locks that do, so we can ignore it completely.
266 : : */
267 : : #define EligibleForRelationFastPath(locktag, mode) \
268 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
269 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
270 : : (locktag)->locktag_field1 == MyDatabaseId && \
271 : : MyDatabaseId != InvalidOid && \
272 : : (mode) < ShareUpdateExclusiveLock)
273 : : #define ConflictsWithRelationFastPath(locktag, mode) \
274 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
275 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
276 : : (locktag)->locktag_field1 != InvalidOid && \
277 : : (mode) > ShareUpdateExclusiveLock)
278 : :
279 : : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
280 : : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
281 : : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
282 : : const LOCKTAG *locktag, uint32 hashcode);
283 : : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
284 : :
285 : : /*
286 : : * To make the fast-path lock mechanism work, we must have some way of
287 : : * preventing the use of the fast-path when a conflicting lock might be present.
288 : : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
289 : : * and maintain an integer count of the number of "strong" lockers
290 : : * in each partition. When any "strong" lockers are present (which is
291 : : * hopefully not very often), the fast-path mechanism can't be used, and we
292 : : * must fall back to the slower method of pushing matching locks directly
293 : : * into the main lock tables.
294 : : *
295 : : * The deadlock detector does not know anything about the fast path mechanism,
296 : : * so any locks that might be involved in a deadlock must be transferred from
297 : : * the fast-path queues to the main lock table.
298 : : */
299 : :
300 : : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
301 : : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
302 : : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
303 : : #define FastPathStrongLockHashPartition(hashcode) \
304 : : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
305 : :
306 : : typedef struct
307 : : {
308 : : slock_t mutex;
309 : : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
310 : : } FastPathStrongRelationLockData;
311 : :
312 : : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
313 : :
314 : :
315 : : /*
316 : : * Pointers to hash tables containing lock state
317 : : *
318 : : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
319 : : * shared memory; LockMethodLocalHash is local to each backend.
320 : : */
321 : : static HTAB *LockMethodLockHash;
322 : : static HTAB *LockMethodProcLockHash;
323 : : static HTAB *LockMethodLocalHash;
324 : :
325 : :
326 : : /* private state for error cleanup */
327 : : static LOCALLOCK *StrongLockInProgress;
328 : : static LOCALLOCK *awaitedLock;
329 : : static ResourceOwner awaitedOwner;
330 : :
331 : :
332 : : #ifdef LOCK_DEBUG
333 : :
334 : : /*------
335 : : * The following configuration options are available for lock debugging:
336 : : *
337 : : * TRACE_LOCKS -- give a bunch of output what's going on in this file
338 : : * TRACE_USERLOCKS -- same but for user locks
339 : : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
340 : : * (use to avoid output on system tables)
341 : : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
342 : : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
343 : : *
344 : : * Furthermore, but in storage/lmgr/lwlock.c:
345 : : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
346 : : *
347 : : * Define LOCK_DEBUG at compile time to get all these enabled.
348 : : * --------
349 : : */
350 : :
351 : : int Trace_lock_oidmin = FirstNormalObjectId;
352 : : bool Trace_locks = false;
353 : : bool Trace_userlocks = false;
354 : : int Trace_lock_table = 0;
355 : : bool Debug_deadlocks = false;
356 : :
357 : :
358 : : inline static bool
359 : : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
360 : : {
361 : : return
362 : : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
363 : : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
364 : : || (Trace_lock_table &&
365 : : (tag->locktag_field2 == Trace_lock_table));
366 : : }
367 : :
368 : :
369 : : inline static void
370 : : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
371 : : {
372 : : if (LOCK_DEBUG_ENABLED(&lock->tag))
373 : : elog(LOG,
374 : : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
375 : : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
376 : : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
377 : : where, lock,
378 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
379 : : lock->tag.locktag_field3, lock->tag.locktag_field4,
380 : : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
381 : : lock->grantMask,
382 : : lock->requested[1], lock->requested[2], lock->requested[3],
383 : : lock->requested[4], lock->requested[5], lock->requested[6],
384 : : lock->requested[7], lock->nRequested,
385 : : lock->granted[1], lock->granted[2], lock->granted[3],
386 : : lock->granted[4], lock->granted[5], lock->granted[6],
387 : : lock->granted[7], lock->nGranted,
388 : : dclist_count(&lock->waitProcs),
389 : : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
390 : : }
391 : :
392 : :
393 : : inline static void
394 : : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
395 : : {
396 : : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
397 : : elog(LOG,
398 : : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
399 : : where, proclockP, proclockP->tag.myLock,
400 : : PROCLOCK_LOCKMETHOD(*(proclockP)),
401 : : proclockP->tag.myProc, (int) proclockP->holdMask);
402 : : }
403 : : #else /* not LOCK_DEBUG */
404 : :
405 : : #define LOCK_PRINT(where, lock, type) ((void) 0)
406 : : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
407 : : #endif /* not LOCK_DEBUG */
408 : :
409 : :
410 : : static uint32 proclock_hash(const void *key, Size keysize);
411 : : static void RemoveLocalLock(LOCALLOCK *locallock);
412 : : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
413 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
414 : : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
415 : : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
416 : : static void FinishStrongLockAcquire(void);
417 : : static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
418 : : static void waitonlock_error_callback(void *arg);
419 : : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
420 : : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
421 : : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
422 : : PROCLOCK *proclock, LockMethod lockMethodTable);
423 : : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
424 : : LockMethod lockMethodTable, uint32 hashcode,
425 : : bool wakeupNeeded);
426 : : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
427 : : LOCKTAG *locktag, LOCKMODE lockmode,
428 : : bool decrement_strong_lock_count);
429 : : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
430 : : BlockedProcsData *data);
431 : :
432 : :
433 : : /*
434 : : * Initialize the lock manager's shmem data structures.
435 : : *
436 : : * This is called from CreateSharedMemoryAndSemaphores(), which see for more
437 : : * comments. In the normal postmaster case, the shared hash tables are
438 : : * created here, and backends inherit pointers to them via fork(). In the
439 : : * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
440 : : * the already existing shared hash tables. In either case, each backend must
441 : : * also call InitLockManagerAccess() to create the locallock hash table.
442 : : */
443 : : void
373 heikki.linnakangas@i 444 :CBC 1029 : LockManagerShmemInit(void)
445 : : {
446 : : HASHCTL info;
447 : : int64 init_table_size,
448 : : max_table_size;
449 : : bool found;
450 : :
451 : : /*
452 : : * Compute init/max size to request for lock hashtables. Note these
453 : : * calculations must agree with LockManagerShmemSize!
454 : : */
7386 tgl@sss.pgh.pa.us 455 : 1029 : max_table_size = NLOCKENTS();
7648 456 : 1029 : init_table_size = max_table_size / 2;
457 : :
458 : : /*
459 : : * Allocate hash table for LOCK structs. This stores per-locked-object
460 : : * information.
461 : : */
8741 462 : 1029 : info.keysize = sizeof(LOCKTAG);
463 : 1029 : info.entrysize = sizeof(LOCK);
6985 464 : 1029 : info.num_partitions = NUM_LOCK_PARTITIONS;
465 : :
466 : 1029 : LockMethodLockHash = ShmemInitHash("LOCK hash",
467 : : init_table_size,
468 : : max_table_size,
469 : : &info,
470 : : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
471 : :
472 : : /* Assume an average of 2 holders per lock */
7209 473 : 1029 : max_table_size *= 2;
474 : 1029 : init_table_size *= 2;
475 : :
476 : : /*
477 : : * Allocate hash table for PROCLOCK structs. This stores
478 : : * per-lock-per-holder information.
479 : : */
8450 bruce@momjian.us 480 : 1029 : info.keysize = sizeof(PROCLOCKTAG);
481 : 1029 : info.entrysize = sizeof(PROCLOCK);
6985 tgl@sss.pgh.pa.us 482 : 1029 : info.hash = proclock_hash;
483 : 1029 : info.num_partitions = NUM_LOCK_PARTITIONS;
484 : :
485 : 1029 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
486 : : init_table_size,
487 : : max_table_size,
488 : : &info,
489 : : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
490 : :
491 : : /*
492 : : * Allocate fast-path structures.
493 : : */
5163 rhaas@postgresql.org 494 : 1029 : FastPathStrongRelationLocks =
495 : 1029 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
496 : : sizeof(FastPathStrongRelationLockData), &found);
5215 497 [ + - ]: 1029 : if (!found)
5163 498 : 1029 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
373 heikki.linnakangas@i 499 : 1029 : }
500 : :
501 : : /*
502 : : * Initialize the lock manager's backend-private data structures.
503 : : */
504 : : void
505 : 18766 : InitLockManagerAccess(void)
506 : : {
507 : : /*
508 : : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
509 : : * counts and resource owner information.
510 : : */
511 : : HASHCTL info;
512 : :
7680 tgl@sss.pgh.pa.us 513 : 18766 : info.keysize = sizeof(LOCALLOCKTAG);
514 : 18766 : info.entrysize = sizeof(LOCALLOCK);
515 : :
7211 516 : 18766 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
517 : : 16,
518 : : &info,
519 : : HASH_ELEM | HASH_BLOBS);
10651 scrappy@hub.org 520 : 18766 : }
521 : :
522 : :
523 : : /*
524 : : * Fetch the lock method table associated with a given lock
525 : : */
526 : : LockMethod
7211 tgl@sss.pgh.pa.us 527 : 103 : GetLocksMethodTable(const LOCK *lock)
528 : : {
529 : 103 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
530 : :
531 [ + - - + ]: 103 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
532 : 103 : return LockMethods[lockmethodid];
533 : : }
534 : :
535 : : /*
536 : : * Fetch the lock method table associated with a given locktag
537 : : */
538 : : LockMethod
3484 539 : 1135 : GetLockTagsMethodTable(const LOCKTAG *locktag)
540 : : {
541 : 1135 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
542 : :
543 [ + - - + ]: 1135 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
544 : 1135 : return LockMethods[lockmethodid];
545 : : }
546 : :
547 : :
548 : : /*
549 : : * Compute the hash code associated with a LOCKTAG.
550 : : *
551 : : * To avoid unnecessary recomputations of the hash code, we try to do this
552 : : * just once per function, and then pass it around as needed. Aside from
553 : : * passing the hashcode to hash_search_with_hash_value(), we can extract
554 : : * the lock partition number from the hashcode.
555 : : */
556 : : uint32
6985 557 : 17197138 : LockTagHashCode(const LOCKTAG *locktag)
558 : : {
274 peter@eisentraut.org 559 : 17197138 : return get_hash_value(LockMethodLockHash, locktag);
560 : : }
561 : :
562 : : /*
563 : : * Compute the hash code associated with a PROCLOCKTAG.
564 : : *
565 : : * Because we want to use just one set of partition locks for both the
566 : : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
567 : : * fall into the same partition number as their associated LOCKs.
568 : : * dynahash.c expects the partition number to be the low-order bits of
569 : : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
570 : : * same low-order bits as the associated LOCKTAG's hash code. We achieve
571 : : * this with this specialized hash function.
572 : : */
573 : : static uint32
6985 tgl@sss.pgh.pa.us 574 : 653 : proclock_hash(const void *key, Size keysize)
575 : : {
576 : 653 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577 : : uint32 lockhash;
578 : : Datum procptr;
579 : :
580 [ - + ]: 653 : Assert(keysize == sizeof(PROCLOCKTAG));
581 : :
582 : : /* Look into the associated LOCK object, and compute its hash code */
583 : 653 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
584 : :
585 : : /*
586 : : * To make the hash code also depend on the PGPROC, we xor the proc
587 : : * struct's address into the hash code, left-shifted so that the
588 : : * partition-number bits don't change. Since this is only a hash, we
589 : : * don't care if we lose high-order bits of the address; use an
590 : : * intermediate variable to suppress cast-pointer-to-int warnings.
591 : : */
592 : 653 : procptr = PointerGetDatum(proclocktag->myProc);
29 peter@eisentraut.org 593 :GNC 653 : lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
594 : :
6985 tgl@sss.pgh.pa.us 595 :CBC 653 : return lockhash;
596 : : }
597 : :
598 : : /*
599 : : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
600 : : * for its underlying LOCK.
601 : : *
602 : : * We use this just to avoid redundant calls of LockTagHashCode().
603 : : */
604 : : static inline uint32
605 : 4061573 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
606 : : {
6912 bruce@momjian.us 607 : 4061573 : uint32 lockhash = hashcode;
608 : : Datum procptr;
609 : :
610 : : /*
611 : : * This must match proclock_hash()!
612 : : */
6985 tgl@sss.pgh.pa.us 613 : 4061573 : procptr = PointerGetDatum(proclocktag->myProc);
29 peter@eisentraut.org 614 :GNC 4061573 : lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
615 : :
6985 tgl@sss.pgh.pa.us 616 :CBC 4061573 : return lockhash;
617 : : }
618 : :
619 : : /*
620 : : * Given two lock modes, return whether they would conflict.
621 : : */
622 : : bool
4609 alvherre@alvh.no-ip. 623 : 143683 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
624 : : {
625 : 143683 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
626 : :
627 [ + + ]: 143683 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
628 : 143581 : return true;
629 : :
630 : 102 : return false;
631 : : }
632 : :
633 : : /*
634 : : * LockHeldByMe -- test whether lock 'locktag' is held by the current
635 : : * transaction
636 : : *
637 : : * Returns true if current transaction holds a lock on 'tag' of mode
638 : : * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
639 : : * ("Stronger" is defined as "numerically higher", which is a bit
640 : : * semantically dubious but is OK for the purposes we use this for.)
641 : : */
642 : : bool
436 noah@leadboat.com 643 : 4604339 : LockHeldByMe(const LOCKTAG *locktag,
644 : : LOCKMODE lockmode, bool orstronger)
645 : : {
646 : : LOCALLOCKTAG localtag;
647 : : LOCALLOCK *locallock;
648 : :
649 : : /*
650 : : * See if there is a LOCALLOCK entry for this lock and lockmode
651 : : */
2532 tgl@sss.pgh.pa.us 652 [ + - - + : 4604339 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
653 : 4604339 : localtag.lock = *locktag;
654 : 4604339 : localtag.mode = lockmode;
655 : :
656 : 4604339 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
657 : : &localtag,
658 : : HASH_FIND, NULL);
659 : :
436 noah@leadboat.com 660 [ + + + - ]: 4604339 : if (locallock && locallock->nLocks > 0)
661 : 2334515 : return true;
662 : :
663 [ + + ]: 2269824 : if (orstronger)
664 : : {
665 : : LOCKMODE slockmode;
666 : :
667 : 754775 : for (slockmode = lockmode + 1;
668 [ + - ]: 2196597 : slockmode <= MaxLockMode;
669 : 1441822 : slockmode++)
670 : : {
671 [ + + ]: 2196597 : if (LockHeldByMe(locktag, slockmode, false))
672 : 754775 : return true;
673 : : }
674 : : }
675 : :
676 : 1515049 : return false;
677 : : }
678 : :
679 : : #ifdef USE_ASSERT_CHECKING
680 : : /*
681 : : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
682 : : * evaluate assertions based on all locks held.
683 : : */
684 : : HTAB *
1981 685 : 6054 : GetLockMethodLocalHash(void)
686 : : {
687 : 6054 : return LockMethodLocalHash;
688 : : }
689 : : #endif
690 : :
691 : : /*
692 : : * LockHasWaiters -- look up 'locktag' and check if releasing this
693 : : * lock would wake up other processes waiting for it.
694 : : */
695 : : bool
4652 kgrittn@postgresql.o 696 :UBC 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
697 : : {
698 : 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
699 : : LockMethod lockMethodTable;
700 : : LOCALLOCKTAG localtag;
701 : : LOCALLOCK *locallock;
702 : : LOCK *lock;
703 : : PROCLOCK *proclock;
704 : : LWLock *partitionLock;
705 : 0 : bool hasWaiters = false;
706 : :
707 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
708 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 : 0 : lockMethodTable = LockMethods[lockmethodid];
710 [ # # # # ]: 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
711 [ # # ]: 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
712 : :
713 : : #ifdef LOCK_DEBUG
714 : : if (LOCK_DEBUG_ENABLED(locktag))
715 : : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
716 : : locktag->locktag_field1, locktag->locktag_field2,
717 : : lockMethodTable->lockModeNames[lockmode]);
718 : : #endif
719 : :
720 : : /*
721 : : * Find the LOCALLOCK entry for this lock and lockmode
722 : : */
2999 tgl@sss.pgh.pa.us 723 [ # # # # : 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
# # # # #
# ]
4652 kgrittn@postgresql.o 724 : 0 : localtag.lock = *locktag;
725 : 0 : localtag.mode = lockmode;
726 : :
727 : 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
728 : : &localtag,
729 : : HASH_FIND, NULL);
730 : :
731 : : /*
732 : : * let the caller print its own error message, too. Do not ereport(ERROR).
733 : : */
734 [ # # # # ]: 0 : if (!locallock || locallock->nLocks <= 0)
735 : : {
736 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
737 : : lockMethodTable->lockModeNames[lockmode]);
738 : 0 : return false;
739 : : }
740 : :
741 : : /*
742 : : * Check the shared lock table.
743 : : */
744 : 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
745 : :
746 : 0 : LWLockAcquire(partitionLock, LW_SHARED);
747 : :
748 : : /*
749 : : * We don't need to re-find the lock or proclock, since we kept their
750 : : * addresses in the locallock table, and they couldn't have been removed
751 : : * while we were holding a lock on them.
752 : : */
753 : 0 : lock = locallock->lock;
754 : : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
755 : 0 : proclock = locallock->proclock;
756 : : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
757 : :
758 : : /*
759 : : * Double-check that we are actually holding a lock of the type we want to
760 : : * release.
761 : : */
762 [ # # ]: 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
763 : : {
764 : : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
765 : 0 : LWLockRelease(partitionLock);
766 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
767 : : lockMethodTable->lockModeNames[lockmode]);
768 : 0 : RemoveLocalLock(locallock);
769 : 0 : return false;
770 : : }
771 : :
772 : : /*
773 : : * Do the checking.
774 : : */
775 [ # # ]: 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
776 : 0 : hasWaiters = true;
777 : :
778 : 0 : LWLockRelease(partitionLock);
779 : :
780 : 0 : return hasWaiters;
781 : : }
782 : :
783 : : /*
784 : : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
785 : : * set lock if/when no conflicts.
786 : : *
787 : : * Inputs:
788 : : * locktag: unique identifier for the lockable object
789 : : * lockmode: lock mode to acquire
790 : : * sessionLock: if true, acquire lock for session not current transaction
791 : : * dontWait: if true, don't wait to acquire lock
792 : : *
793 : : * Returns one of:
794 : : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
795 : : * LOCKACQUIRE_OK lock successfully acquired
796 : : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
797 : : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
798 : : *
799 : : * In the normal case where dontWait=false and the caller doesn't need to
800 : : * distinguish a freshly acquired lock from one already taken earlier in
801 : : * this same transaction, there is no need to examine the return value.
802 : : *
803 : : * Side Effects: The lock is acquired and recorded in lock tables.
804 : : *
805 : : * NOTE: if we wait for the lock, there is no way to abort the wait
806 : : * short of aborting the transaction.
807 : : */
808 : : LockAcquireResult
7211 tgl@sss.pgh.pa.us 809 :CBC 664742 : LockAcquire(const LOCKTAG *locktag,
810 : : LOCKMODE lockmode,
811 : : bool sessionLock,
812 : : bool dontWait)
813 : : {
2556 814 : 664742 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
815 : : true, NULL, false);
816 : : }
817 : :
818 : : /*
819 : : * LockAcquireExtended - allows us to specify additional options
820 : : *
821 : : * reportMemoryError specifies whether a lock request that fills the lock
822 : : * table should generate an ERROR or not. Passing "false" allows the caller
823 : : * to attempt to recover from lock-table-full situations, perhaps by forcibly
824 : : * canceling other lock holders and then retrying. Note, however, that the
825 : : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
826 : : * in combination with dontWait = true, as the cause of failure couldn't be
827 : : * distinguished.
828 : : *
829 : : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
830 : : * table entry if a lock is successfully acquired, or NULL if not.
831 : : *
832 : : * logLockFailure indicates whether to log details when a lock acquisition
833 : : * fails with dontWait = true.
834 : : */
835 : : LockAcquireResult
5740 simon@2ndQuadrant.co 836 : 18623643 : LockAcquireExtended(const LOCKTAG *locktag,
837 : : LOCKMODE lockmode,
838 : : bool sessionLock,
839 : : bool dontWait,
840 : : bool reportMemoryError,
841 : : LOCALLOCK **locallockp,
842 : : bool logLockFailure)
843 : : {
7211 tgl@sss.pgh.pa.us 844 : 18623643 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
845 : : LockMethod lockMethodTable;
846 : : LOCALLOCKTAG localtag;
847 : : LOCALLOCK *locallock;
848 : : LOCK *lock;
849 : : PROCLOCK *proclock;
850 : : bool found;
851 : : ResourceOwner owner;
852 : : uint32 hashcode;
853 : : LWLock *partitionLock;
854 : : bool found_conflict;
855 : : ProcWaitStatus waitResult;
5395 simon@2ndQuadrant.co 856 : 18623643 : bool log_lock = false;
857 : :
7211 tgl@sss.pgh.pa.us 858 [ + - - + ]: 18623643 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7211 tgl@sss.pgh.pa.us 859 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7211 tgl@sss.pgh.pa.us 860 :CBC 18623643 : lockMethodTable = LockMethods[lockmethodid];
861 [ + - - + ]: 18623643 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
7211 tgl@sss.pgh.pa.us 862 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
863 : :
5740 simon@2ndQuadrant.co 864 [ + + + + ]:CBC 18623643 : if (RecoveryInProgress() && !InRecovery &&
865 [ + + ]: 233650 : (locktag->locktag_type == LOCKTAG_OBJECT ||
5671 bruce@momjian.us 866 [ + - - + ]: 233650 : locktag->locktag_type == LOCKTAG_RELATION) &&
867 : : lockmode > RowExclusiveLock)
5740 simon@2ndQuadrant.co 868 [ # # ]:UBC 0 : ereport(ERROR,
869 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
870 : : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
871 : : lockMethodTable->lockModeNames[lockmode]),
872 : : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
873 : :
874 : : #ifdef LOCK_DEBUG
875 : : if (LOCK_DEBUG_ENABLED(locktag))
876 : : elog(LOG, "LockAcquire: lock [%u,%u] %s",
877 : : locktag->locktag_field1, locktag->locktag_field2,
878 : : lockMethodTable->lockModeNames[lockmode]);
879 : : #endif
880 : :
881 : : /* Identify owner for lock */
4873 tgl@sss.pgh.pa.us 882 [ + + ]:CBC 18623643 : if (sessionLock)
7680 883 : 39736 : owner = NULL;
884 : : else
4873 885 : 18583907 : owner = CurrentResourceOwner;
886 : :
887 : : /*
888 : : * Find or create a LOCALLOCK entry for this lock and lockmode
889 : : */
2999 890 [ + - - + : 18623643 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7680 891 : 18623643 : localtag.lock = *locktag;
892 : 18623643 : localtag.mode = lockmode;
893 : :
7211 894 : 18623643 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
895 : : &localtag,
896 : : HASH_ENTER, &found);
897 : :
898 : : /*
899 : : * if it's a new locallock object, initialize it
900 : : */
7680 901 [ + + ]: 18623643 : if (!found)
902 : : {
903 : 16660984 : locallock->lock = NULL;
904 : 16660984 : locallock->proclock = NULL;
6985 905 : 16660984 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
7680 906 : 16660984 : locallock->nLocks = 0;
2556 907 : 16660984 : locallock->holdsStrongLockCount = false;
908 : 16660984 : locallock->lockCleared = false;
7680 909 : 16660984 : locallock->numLockOwners = 0;
910 : 16660984 : locallock->maxLockOwners = 8;
3639 911 : 16660984 : locallock->lockOwners = NULL; /* in case next line fails */
7680 912 : 16660984 : locallock->lockOwners = (LOCALLOCKOWNER *)
913 : 16660984 : MemoryContextAlloc(TopMemoryContext,
2999 914 : 16660984 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
915 : : }
916 : : else
917 : : {
918 : : /* Make sure there will be room to remember the lock */
7680 919 [ + + ]: 1962659 : if (locallock->numLockOwners >= locallock->maxLockOwners)
920 : : {
7678 bruce@momjian.us 921 : 19 : int newsize = locallock->maxLockOwners * 2;
922 : :
7680 tgl@sss.pgh.pa.us 923 : 19 : locallock->lockOwners = (LOCALLOCKOWNER *)
924 : 19 : repalloc(locallock->lockOwners,
925 : : newsize * sizeof(LOCALLOCKOWNER));
926 : 19 : locallock->maxLockOwners = newsize;
927 : : }
928 : : }
5215 rhaas@postgresql.org 929 : 18623643 : hashcode = locallock->hashcode;
930 : :
2556 tgl@sss.pgh.pa.us 931 [ + + ]: 18623643 : if (locallockp)
932 : 17958815 : *locallockp = locallock;
933 : :
934 : : /*
935 : : * If we already hold the lock, we can just increase the count locally.
936 : : *
937 : : * If lockCleared is already set, caller need not worry about absorbing
938 : : * sinval messages related to the lock's object.
939 : : */
7680 940 [ + + ]: 18623643 : if (locallock->nLocks > 0)
941 : : {
942 : 1962659 : GrantLockLocal(locallock, owner);
2556 943 [ + + ]: 1962659 : if (locallock->lockCleared)
944 : 1889147 : return LOCKACQUIRE_ALREADY_CLEAR;
945 : : else
946 : 73512 : return LOCKACQUIRE_ALREADY_HELD;
947 : : }
948 : :
949 : : /*
950 : : * We don't acquire any other heavyweight lock while holding the relation
951 : : * extension lock. We do allow to acquire the same relation extension
952 : : * lock more than once but that case won't reach here.
953 : : */
1998 akapila@postgresql.o 954 [ - + ]: 16660984 : Assert(!IsRelationExtensionLockHeld);
955 : :
956 : : /*
957 : : * Prepare to emit a WAL record if acquisition of this lock needs to be
958 : : * replayed in a standby server.
959 : : *
960 : : * Here we prepare to log; after lock is acquired we'll issue log record.
961 : : * This arrangement simplifies error recovery in case the preparation step
962 : : * fails.
963 : : *
964 : : * Only AccessExclusiveLocks can conflict with lock types that read-only
965 : : * transactions can acquire in a standby server. Make sure this definition
966 : : * matches the one in GetRunningTransactionLocks().
967 : : */
5395 simon@2ndQuadrant.co 968 [ + + ]: 16660984 : if (lockmode >= AccessExclusiveLock &&
969 [ + + ]: 226108 : locktag->locktag_type == LOCKTAG_RELATION &&
970 [ + + ]: 152825 : !RecoveryInProgress() &&
971 [ + + ]: 128587 : XLogStandbyInfoActive())
972 : : {
973 : 97449 : LogAccessExclusiveLockPrepare();
974 : 97449 : log_lock = true;
975 : : }
976 : :
977 : : /*
978 : : * Attempt to take lock via fast path, if eligible. But if we remember
979 : : * having filled up the fast path array, we don't attempt to make any
980 : : * further use of it until we release some locks. It's possible that some
981 : : * other backend has transferred some of those locks to the shared hash
982 : : * table, leaving space free, but it's not worth acquiring the LWLock just
983 : : * to check. It's also possible that we're acquiring a second or third
984 : : * lock type on a relation we have already locked using the fast-path, but
985 : : * for now we don't worry about that case either.
986 : : */
4301 tgl@sss.pgh.pa.us 987 [ + + + + : 16660984 : if (EligibleForRelationFastPath(locktag, lockmode) &&
+ + + + +
+ ]
350 tomas.vondra@postgre 988 [ + + ]: 15006435 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] < FP_LOCK_SLOTS_PER_GROUP)
989 : : {
4836 bruce@momjian.us 990 : 14820825 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
991 : : bool acquired;
992 : :
993 : : /*
994 : : * LWLockAcquire acts as a memory sequencing point, so it's safe to
995 : : * assume that any strong locker whose increment to
996 : : * FastPathStrongRelationLocks->counts becomes visible after we test
997 : : * it has yet to begin to transfer fast-path locks.
998 : : */
1940 tgl@sss.pgh.pa.us 999 : 14820825 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4847 rhaas@postgresql.org 1000 [ + + ]: 14820825 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1001 : 338769 : acquired = false;
1002 : : else
1003 : 14482056 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1004 : : lockmode);
1940 tgl@sss.pgh.pa.us 1005 : 14820825 : LWLockRelease(&MyProc->fpInfoLock);
4847 rhaas@postgresql.org 1006 [ + + ]: 14820825 : if (acquired)
1007 : : {
1008 : : /*
1009 : : * The locallock might contain stale pointers to some old shared
1010 : : * objects; we MUST reset these to null before considering the
1011 : : * lock to be acquired via fast-path.
1012 : : */
4301 tgl@sss.pgh.pa.us 1013 : 14482056 : locallock->lock = NULL;
1014 : 14482056 : locallock->proclock = NULL;
4847 rhaas@postgresql.org 1015 : 14482056 : GrantLockLocal(locallock, owner);
1016 : 14482056 : return LOCKACQUIRE_OK;
1017 : : }
1018 : : }
1019 : :
1020 : : /*
1021 : : * If this lock could potentially have been taken via the fast-path by
1022 : : * some other backend, we must (temporarily) disable further use of the
1023 : : * fast-path for this lock tag, and migrate any locks already taken via
1024 : : * this method to the main lock table.
1025 : : */
1026 [ + + + + : 2178928 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ + + + ]
1027 : : {
4836 bruce@momjian.us 1028 : 181674 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1029 : :
4847 rhaas@postgresql.org 1030 : 181674 : BeginStrongLockAcquire(locallock, fasthashcode);
1031 [ - + ]: 181674 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1032 : : hashcode))
1033 : : {
4847 rhaas@postgresql.org 1034 :UBC 0 : AbortStrongLockAcquire();
2556 tgl@sss.pgh.pa.us 1035 [ # # ]: 0 : if (locallock->nLocks == 0)
1036 : 0 : RemoveLocalLock(locallock);
1037 [ # # ]: 0 : if (locallockp)
1038 : 0 : *locallockp = NULL;
4847 rhaas@postgresql.org 1039 [ # # ]: 0 : if (reportMemoryError)
1040 [ # # ]: 0 : ereport(ERROR,
1041 : : (errcode(ERRCODE_OUT_OF_MEMORY),
1042 : : errmsg("out of shared memory"),
1043 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1044 : : else
1045 : 0 : return LOCKACQUIRE_NOT_AVAIL;
1046 : : }
1047 : : }
1048 : :
1049 : : /*
1050 : : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1051 : : * take it via the fast-path, either, so we've got to mess with the shared
1052 : : * lock table.
1053 : : */
6985 tgl@sss.pgh.pa.us 1054 :CBC 2178928 : partitionLock = LockHashPartitionLock(hashcode);
1055 : :
7209 1056 : 2178928 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1057 : :
1058 : : /*
1059 : : * Find or create lock and proclock entries with this tag
1060 : : *
1061 : : * Note: if the locallock object already existed, it might have a pointer
1062 : : * to the lock already ... but we should not assume that that pointer is
1063 : : * valid, since a lock object with zero hold and request counts can go
1064 : : * away anytime. So we have to use SetupLockInTable() to recompute the
1065 : : * lock and proclock pointers, even if they're already set.
1066 : : */
5215 rhaas@postgresql.org 1067 : 2178928 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1068 : : hashcode, lockmode);
1069 [ - + ]: 2178928 : if (!proclock)
1070 : : {
4889 rhaas@postgresql.org 1071 :UBC 0 : AbortStrongLockAcquire();
5215 1072 : 0 : LWLockRelease(partitionLock);
2556 tgl@sss.pgh.pa.us 1073 [ # # ]: 0 : if (locallock->nLocks == 0)
1074 : 0 : RemoveLocalLock(locallock);
1075 [ # # ]: 0 : if (locallockp)
1076 : 0 : *locallockp = NULL;
5215 rhaas@postgresql.org 1077 [ # # ]: 0 : if (reportMemoryError)
1078 [ # # ]: 0 : ereport(ERROR,
1079 : : (errcode(ERRCODE_OUT_OF_MEMORY),
1080 : : errmsg("out of shared memory"),
1081 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1082 : : else
1083 : 0 : return LOCKACQUIRE_NOT_AVAIL;
1084 : : }
5215 rhaas@postgresql.org 1085 :CBC 2178928 : locallock->proclock = proclock;
1086 : 2178928 : lock = proclock->tag.myLock;
1087 : 2178928 : locallock->lock = lock;
1088 : :
1089 : : /*
1090 : : * If lock requested conflicts with locks requested by waiters, must join
1091 : : * wait queue. Otherwise, check for conflict with already-held locks.
1092 : : * (That's last because most complex check.)
1093 : : */
1094 [ + + ]: 2178928 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
2078 peter@eisentraut.org 1095 : 257 : found_conflict = true;
1096 : : else
1097 : 2178671 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1098 : : lock, proclock);
1099 : :
1100 [ + + ]: 2178928 : if (!found_conflict)
1101 : : {
1102 : : /* No conflict with held or previously requested locks */
5215 rhaas@postgresql.org 1103 : 2176862 : GrantLock(lock, proclock, lockmode);
306 heikki.linnakangas@i 1104 : 2176862 : waitResult = PROC_WAIT_STATUS_OK;
1105 : : }
1106 : : else
1107 : : {
1108 : : /*
1109 : : * Join the lock's wait queue. We call this even in the dontWait
1110 : : * case, because JoinWaitQueue() may discover that we can acquire the
1111 : : * lock immediately after all.
1112 : : */
1113 : 2066 : waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1114 : : }
1115 : :
1116 [ + + ]: 2178928 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1117 : : {
1118 : : /*
1119 : : * We're not getting the lock because a deadlock was detected already
1120 : : * while trying to join the wait queue, or because we would have to
1121 : : * wait but the caller requested no blocking.
1122 : : *
1123 : : * Undo the changes to shared entries before releasing the partition
1124 : : * lock.
1125 : : */
1126 : 769 : AbortStrongLockAcquire();
1127 : :
1128 [ + + ]: 769 : if (proclock->holdMask == 0)
1129 : : {
1130 : : uint32 proclock_hashcode;
1131 : :
1132 : 564 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1133 : : hashcode);
1134 : 564 : dlist_delete(&proclock->lockLink);
1135 : 564 : dlist_delete(&proclock->procLink);
1136 [ - + ]: 564 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1137 : 564 : &(proclock->tag),
1138 : : proclock_hashcode,
1139 : : HASH_REMOVE,
1140 : : NULL))
306 heikki.linnakangas@i 1141 [ # # ]:UBC 0 : elog(PANIC, "proclock table corrupted");
1142 : : }
1143 : : else
1144 : : PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
306 heikki.linnakangas@i 1145 :CBC 769 : lock->nRequested--;
1146 : 769 : lock->requested[lockmode]--;
1147 : : LOCK_PRINT("LockAcquire: did not join wait queue",
1148 : : lock, lockmode);
1149 [ + - - + ]: 769 : Assert((lock->nRequested > 0) &&
1150 : : (lock->requested[lockmode] >= 0));
1151 [ - + ]: 769 : Assert(lock->nGranted <= lock->nRequested);
1152 : 769 : LWLockRelease(partitionLock);
1153 [ + - ]: 769 : if (locallock->nLocks == 0)
1154 : 769 : RemoveLocalLock(locallock);
1155 : :
1156 [ + + ]: 769 : if (dontWait)
1157 : : {
1158 : : /*
1159 : : * Log lock holders and waiters as a detail log message if
1160 : : * logLockFailure = true and lock acquisition fails with dontWait
1161 : : * = true
1162 : : */
176 fujii@postgresql.org 1163 [ - + ]: 768 : if (logLockFailure)
1164 : : {
1165 : : StringInfoData buf,
1166 : : lock_waiters_sbuf,
1167 : : lock_holders_sbuf;
1168 : : const char *modename;
176 fujii@postgresql.org 1169 :UBC 0 : int lockHoldersNum = 0;
1170 : :
1171 : 0 : initStringInfo(&buf);
1172 : 0 : initStringInfo(&lock_waiters_sbuf);
1173 : 0 : initStringInfo(&lock_holders_sbuf);
1174 : :
1175 : 0 : DescribeLockTag(&buf, &locallock->tag.lock);
1176 : 0 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1177 : : lockmode);
1178 : :
1179 : : /* Gather a list of all lock holders and waiters */
1180 : 0 : LWLockAcquire(partitionLock, LW_SHARED);
1181 : 0 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1182 : : &lock_waiters_sbuf, &lockHoldersNum);
1183 : 0 : LWLockRelease(partitionLock);
1184 : :
1185 [ # # ]: 0 : ereport(LOG,
1186 : : (errmsg("process %d could not obtain %s on %s",
1187 : : MyProcPid, modename, buf.data),
1188 : : errdetail_log_plural(
1189 : : "Process holding the lock: %s, Wait queue: %s.",
1190 : : "Processes holding the lock: %s, Wait queue: %s.",
1191 : : lockHoldersNum,
1192 : : lock_holders_sbuf.data,
1193 : : lock_waiters_sbuf.data)));
1194 : :
1195 : 0 : pfree(buf.data);
1196 : 0 : pfree(lock_holders_sbuf.data);
1197 : 0 : pfree(lock_waiters_sbuf.data);
1198 : : }
306 heikki.linnakangas@i 1199 [ + + ]:CBC 768 : if (locallockp)
1200 : 224 : *locallockp = NULL;
1201 : 768 : return LOCKACQUIRE_NOT_AVAIL;
1202 : : }
1203 : : else
1204 : : {
1205 : 1 : DeadLockReport();
1206 : : /* DeadLockReport() will not return */
1207 : : }
1208 : : }
1209 : :
1210 : : /*
1211 : : * We are now in the lock queue, or the lock was already granted. If
1212 : : * queued, go to sleep.
1213 : : */
1214 [ + + ]: 2178159 : if (waitResult == PROC_WAIT_STATUS_WAITING)
1215 : : {
1216 [ - + ]: 1291 : Assert(!dontWait);
1217 : : PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1218 : : LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1219 : 1291 : LWLockRelease(partitionLock);
1220 : :
1221 : 1291 : waitResult = WaitOnLock(locallock, owner);
1222 : :
1223 : : /*
1224 : : * NOTE: do not do any material change of state between here and
1225 : : * return. All required changes in locktable state must have been
1226 : : * done when the lock was granted to us --- see notes in WaitOnLock.
1227 : : */
1228 : :
1229 [ + + ]: 1250 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1230 : : {
1231 : : /*
1232 : : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1233 : : * now.
1234 : : */
1235 [ - + ]: 5 : Assert(!dontWait);
1236 : 5 : DeadLockReport();
1237 : : /* DeadLockReport() will not return */
1238 : : }
1239 : : }
1240 : : else
1241 : 2176868 : LWLockRelease(partitionLock);
1242 [ - + ]: 2178113 : Assert(waitResult == PROC_WAIT_STATUS_OK);
1243 : :
1244 : : /* The lock was granted to us. Update the local lock entry accordingly */
1245 [ - + ]: 2178113 : Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1246 : 2178113 : GrantLockLocal(locallock, owner);
1247 : :
1248 : : /*
1249 : : * Lock state is fully up-to-date now; if we error out after this, no
1250 : : * special error cleanup is required.
1251 : : */
4889 rhaas@postgresql.org 1252 : 2178113 : FinishStrongLockAcquire();
1253 : :
1254 : : /*
1255 : : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1256 : : * standby server.
1257 : : */
5215 1258 [ + + ]: 2178113 : if (log_lock)
1259 : : {
1260 : : /*
1261 : : * Decode the locktag back to the original values, to avoid sending
1262 : : * lots of empty bytes with every message. See lock.h to check how a
1263 : : * locktag is defined for LOCKTAG_RELATION
1264 : : */
1265 : 97234 : LogAccessExclusiveLock(locktag->locktag_field1,
1266 : 97234 : locktag->locktag_field2);
1267 : : }
1268 : :
1269 : 2178113 : return LOCKACQUIRE_OK;
1270 : : }
1271 : :
1272 : : /*
1273 : : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1274 : : * request.
1275 : : *
1276 : : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1277 : : * for lack of shared memory.
1278 : : *
1279 : : * The appropriate partition lock must be held at entry, and will be
1280 : : * held at exit.
1281 : : */
1282 : : static PROCLOCK *
1283 : 2180605 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1284 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1285 : : {
1286 : : LOCK *lock;
1287 : : PROCLOCK *proclock;
1288 : : PROCLOCKTAG proclocktag;
1289 : : uint32 proclock_hashcode;
1290 : : bool found;
1291 : :
1292 : : /*
1293 : : * Find or create a lock with this tag.
1294 : : */
6985 tgl@sss.pgh.pa.us 1295 : 2180605 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1296 : : locktag,
1297 : : hashcode,
1298 : : HASH_ENTER_NULL,
1299 : : &found);
10226 bruce@momjian.us 1300 [ - + ]: 2180605 : if (!lock)
5215 rhaas@postgresql.org 1301 :UBC 0 : return NULL;
1302 : :
1303 : : /*
1304 : : * if it's a new lock object, initialize it
1305 : : */
10226 bruce@momjian.us 1306 [ + + ]:CBC 2180605 : if (!found)
1307 : : {
8999 tgl@sss.pgh.pa.us 1308 : 1848498 : lock->grantMask = 0;
1309 : 1848498 : lock->waitMask = 0;
962 andres@anarazel.de 1310 : 1848498 : dlist_init(&lock->procLocks);
1311 : 1848498 : dclist_init(&lock->waitProcs);
8999 tgl@sss.pgh.pa.us 1312 : 1848498 : lock->nRequested = 0;
1313 : 1848498 : lock->nGranted = 0;
7423 neilc@samurai.com 1314 [ + - + - : 11090988 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
1315 [ - + - - : 1848498 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
1316 : : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1317 : : }
1318 : : else
1319 : : {
1320 : : LOCK_PRINT("LockAcquire: found", lock, lockmode);
8999 tgl@sss.pgh.pa.us 1321 [ + - - + ]: 332107 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1322 [ + - - + ]: 332107 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1323 [ - + ]: 332107 : Assert(lock->nGranted <= lock->nRequested);
1324 : : }
1325 : :
1326 : : /*
1327 : : * Create the hash key for the proclock table.
1328 : : */
6985 1329 : 2180605 : proclocktag.myLock = lock;
5215 rhaas@postgresql.org 1330 : 2180605 : proclocktag.myProc = proc;
1331 : :
6985 tgl@sss.pgh.pa.us 1332 : 2180605 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1333 : :
1334 : : /*
1335 : : * Find or create a proclock entry with this tag
1336 : : */
1337 : 2180605 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1338 : : &proclocktag,
1339 : : proclock_hashcode,
1340 : : HASH_ENTER_NULL,
1341 : : &found);
8236 bruce@momjian.us 1342 [ - + ]: 2180605 : if (!proclock)
1343 : : {
1344 : : /* Oops, not enough shmem for the proclock */
7664 tgl@sss.pgh.pa.us 1345 [ # # ]:UBC 0 : if (lock->nRequested == 0)
1346 : : {
1347 : : /*
1348 : : * There are no other requestors of this lock, so garbage-collect
1349 : : * the lock object. We *must* do this to avoid a permanent leak
1350 : : * of shared memory, because there won't be anything to cause
1351 : : * anyone to release the lock object later.
1352 : : */
962 andres@anarazel.de 1353 [ # # ]: 0 : Assert(dlist_is_empty(&(lock->procLocks)));
6985 tgl@sss.pgh.pa.us 1354 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
943 peter@eisentraut.org 1355 : 0 : &(lock->tag),
1356 : : hashcode,
1357 : : HASH_REMOVE,
1358 : : NULL))
7415 tgl@sss.pgh.pa.us 1359 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
1360 : : }
5215 rhaas@postgresql.org 1361 : 0 : return NULL;
1362 : : }
1363 : :
1364 : : /*
1365 : : * If new, initialize the new entry
1366 : : */
10226 bruce@momjian.us 1367 [ + + ]:CBC 2180605 : if (!found)
1368 : : {
5215 rhaas@postgresql.org 1369 : 1878766 : uint32 partition = LockHashPartition(hashcode);
1370 : :
1371 : : /*
1372 : : * It might seem unsafe to access proclock->groupLeader without a
1373 : : * lock, but it's not really. Either we are initializing a proclock
1374 : : * on our own behalf, in which case our group leader isn't changing
1375 : : * because the group leader for a process can only ever be changed by
1376 : : * the process itself; or else we are transferring a fast-path lock to
1377 : : * the main lock table, in which case that process can't change its
1378 : : * lock group leader without first releasing all of its locks (and in
1379 : : * particular the one we are currently transferring).
1380 : : */
3499 1381 : 3757532 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1382 [ + + ]: 1878766 : proc->lockGroupLeader : proc;
7680 tgl@sss.pgh.pa.us 1383 : 1878766 : proclock->holdMask = 0;
7389 1384 : 1878766 : proclock->releaseMask = 0;
1385 : : /* Add proclock to appropriate lists */
962 andres@anarazel.de 1386 : 1878766 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1387 : 1878766 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1388 : : PROCLOCK_PRINT("LockAcquire: new", proclock);
1389 : : }
1390 : : else
1391 : : {
1392 : : PROCLOCK_PRINT("LockAcquire: found", proclock);
7680 tgl@sss.pgh.pa.us 1393 [ - + ]: 301839 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1394 : :
1395 : : #ifdef CHECK_DEADLOCK_RISK
1396 : :
1397 : : /*
1398 : : * Issue warning if we already hold a lower-level lock on this object
1399 : : * and do not hold a lock of the requested level or higher. This
1400 : : * indicates a deadlock-prone coding practice (eg, we'd have a
1401 : : * deadlock if another backend were following the same code path at
1402 : : * about the same time).
1403 : : *
1404 : : * This is not enabled by default, because it may generate log entries
1405 : : * about user-level coding practices that are in fact safe in context.
1406 : : * It can be enabled to help find system-level problems.
1407 : : *
1408 : : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1409 : : * better to use a table. For now, though, this works.
1410 : : */
1411 : : {
1412 : : int i;
1413 : :
1414 : : for (i = lockMethodTable->numLockModes; i > 0; i--)
1415 : : {
1416 : : if (proclock->holdMask & LOCKBIT_ON(i))
1417 : : {
1418 : : if (i >= (int) lockmode)
1419 : : break; /* safe: we have a lock >= req level */
1420 : : elog(LOG, "deadlock risk: raising lock level"
1421 : : " from %s to %s on object %u/%u/%u",
1422 : : lockMethodTable->lockModeNames[i],
1423 : : lockMethodTable->lockModeNames[lockmode],
1424 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1425 : : lock->tag.locktag_field3);
1426 : : break;
1427 : : }
1428 : : }
1429 : : }
1430 : : #endif /* CHECK_DEADLOCK_RISK */
1431 : : }
1432 : :
1433 : : /*
1434 : : * lock->nRequested and lock->requested[] count the total number of
1435 : : * requests, whether granted or waiting, so increment those immediately.
1436 : : * The other counts don't increment till we get the lock.
1437 : : */
8999 1438 : 2180605 : lock->nRequested++;
1439 : 2180605 : lock->requested[lockmode]++;
1440 [ + - - + ]: 2180605 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1441 : :
1442 : : /*
1443 : : * We shouldn't already hold the desired lock; else locallock table is
1444 : : * broken.
1445 : : */
7389 1446 [ - + ]: 2180605 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
7389 tgl@sss.pgh.pa.us 1447 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1448 : : lockMethodTable->lockModeNames[lockmode],
1449 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1450 : : lock->tag.locktag_field3);
1451 : :
5215 rhaas@postgresql.org 1452 :CBC 2180605 : return proclock;
1453 : : }
1454 : :
1455 : : /*
1456 : : * Check and set/reset the flag that we hold the relation extension lock.
1457 : : *
1458 : : * It is callers responsibility that this function is called after
1459 : : * acquiring/releasing the relation extension lock.
1460 : : *
1461 : : * Pass acquired as true if lock is acquired, false otherwise.
1462 : : */
1463 : : static inline void
1998 akapila@postgresql.o 1464 : 33812717 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1465 : : {
1466 : : #ifdef USE_ASSERT_CHECKING
1467 [ + + ]: 33812717 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1468 : 312130 : IsRelationExtensionLockHeld = acquired;
1469 : : #endif
1470 : 33812717 : }
1471 : :
1472 : : /*
1473 : : * Subroutine to free a locallock entry
1474 : : */
1475 : : static void
7680 tgl@sss.pgh.pa.us 1476 : 16660984 : RemoveLocalLock(LOCALLOCK *locallock)
1477 : : {
1478 : : int i;
1479 : :
4825 heikki.linnakangas@i 1480 [ + + ]: 16742044 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1481 : : {
1482 [ + + ]: 81060 : if (locallock->lockOwners[i].owner != NULL)
1483 : 81023 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1484 : : }
3639 tgl@sss.pgh.pa.us 1485 : 16660984 : locallock->numLockOwners = 0;
1486 [ + - ]: 16660984 : if (locallock->lockOwners != NULL)
1487 : 16660984 : pfree(locallock->lockOwners);
7680 1488 : 16660984 : locallock->lockOwners = NULL;
1489 : :
5215 rhaas@postgresql.org 1490 [ + + ]: 16660984 : if (locallock->holdsStrongLockCount)
1491 : : {
1492 : : uint32 fasthashcode;
1493 : :
1494 : 181394 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1495 : :
5163 1496 [ + + ]: 181394 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1497 [ - + ]: 181394 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1498 : 181394 : FastPathStrongRelationLocks->count[fasthashcode]--;
2943 peter_e@gmx.net 1499 : 181394 : locallock->holdsStrongLockCount = false;
5163 rhaas@postgresql.org 1500 : 181394 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1501 : : }
1502 : :
7211 tgl@sss.pgh.pa.us 1503 [ - + ]: 16660984 : if (!hash_search(LockMethodLocalHash,
943 peter@eisentraut.org 1504 : 16660984 : &(locallock->tag),
1505 : : HASH_REMOVE, NULL))
7680 tgl@sss.pgh.pa.us 1506 [ # # ]:UBC 0 : elog(WARNING, "locallock table corrupted");
1507 : :
1508 : : /*
1509 : : * Indicate that the lock is released for certain types of locks
1510 : : */
1998 akapila@postgresql.o 1511 :CBC 16660984 : CheckAndSetLockHeld(locallock, false);
7680 tgl@sss.pgh.pa.us 1512 : 16660984 : }
1513 : :
1514 : : /*
1515 : : * LockCheckConflicts -- test whether requested lock conflicts
1516 : : * with those already granted
1517 : : *
1518 : : * Returns true if conflict, false if no conflict.
1519 : : *
1520 : : * NOTES:
1521 : : * Here's what makes this complicated: one process's locks don't
1522 : : * conflict with one another, no matter what purpose they are held for
1523 : : * (eg, session and transaction locks do not conflict). Nor do the locks
1524 : : * of one process in a lock group conflict with those of another process in
1525 : : * the same group. So, we must subtract off these locks when determining
1526 : : * whether the requested new lock conflicts with those already held.
1527 : : */
1528 : : bool
7950 bruce@momjian.us 1529 : 2180202 : LockCheckConflicts(LockMethod lockMethodTable,
1530 : : LOCKMODE lockmode,
1531 : : LOCK *lock,
1532 : : PROCLOCK *proclock)
1533 : : {
8451 1534 : 2180202 : int numLockModes = lockMethodTable->numLockModes;
1535 : : LOCKMASK myLocks;
3499 rhaas@postgresql.org 1536 : 2180202 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1537 : : int conflictsRemaining[MAX_LOCKMODES];
1538 : 2180202 : int totalConflictsRemaining = 0;
1539 : : dlist_iter proclock_iter;
1540 : : int i;
1541 : :
1542 : : /*
1543 : : * first check for global conflicts: If no locks conflict with my request,
1544 : : * then I get the lock.
1545 : : *
1546 : : * Checking for conflict: lock->grantMask represents the types of
1547 : : * currently held locks. conflictTable[lockmode] has a bit set for each
1548 : : * type of lock that conflicts with request. Bitwise compare tells if
1549 : : * there is a conflict.
1550 : : */
1551 [ + + ]: 2180202 : if (!(conflictMask & lock->grantMask))
1552 : : {
1553 : : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
2078 peter@eisentraut.org 1554 : 2058805 : return false;
1555 : : }
1556 : :
1557 : : /*
1558 : : * Rats. Something conflicts. But it could still be my own lock, or a
1559 : : * lock held by another member of my locking group. First, figure out how
1560 : : * many conflicts remain after subtracting out any locks I hold myself.
1561 : : */
7389 tgl@sss.pgh.pa.us 1562 : 121397 : myLocks = proclock->holdMask;
7950 bruce@momjian.us 1563 [ + + ]: 1092573 : for (i = 1; i <= numLockModes; i++)
1564 : : {
3499 rhaas@postgresql.org 1565 [ + + ]: 971176 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1566 : : {
1567 : 590434 : conflictsRemaining[i] = 0;
1568 : 590434 : continue;
1569 : : }
1570 : 380742 : conflictsRemaining[i] = lock->granted[i];
1571 [ + + ]: 380742 : if (myLocks & LOCKBIT_ON(i))
1572 : 129100 : --conflictsRemaining[i];
1573 : 380742 : totalConflictsRemaining += conflictsRemaining[i];
1574 : : }
1575 : :
1576 : : /* If no conflicts remain, we get the lock. */
1577 [ + + ]: 121397 : if (totalConflictsRemaining == 0)
1578 : : {
1579 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
2078 peter@eisentraut.org 1580 : 118819 : return false;
1581 : : }
1582 : :
1583 : : /* If no group locking, it's definitely a conflict. */
3499 rhaas@postgresql.org 1584 [ + + + + ]: 2578 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1585 : : {
1586 [ - + ]: 1802 : Assert(proclock->tag.myProc == MyProc);
1587 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1588 : : proclock);
2078 peter@eisentraut.org 1589 : 1802 : return true;
1590 : : }
1591 : :
1592 : : /*
1593 : : * The relation extension lock conflict even between the group members.
1594 : : */
793 akapila@postgresql.o 1595 [ + + ]: 776 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1596 : : {
1597 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1598 : : proclock);
1996 akapila@postgresql.o 1599 :GBC 9 : return true;
1600 : : }
1601 : :
1602 : : /*
1603 : : * Locks held in conflicting modes by members of our own lock group are
1604 : : * not real conflicts; we can subtract those out and see if we still have
1605 : : * a conflict. This is O(N) in the number of processes holding or
1606 : : * awaiting locks on this object. We could improve that by making the
1607 : : * shared memory state more complex (and larger) but it doesn't seem worth
1608 : : * it.
1609 : : */
962 andres@anarazel.de 1610 [ + - + + ]:CBC 1870 : dlist_foreach(proclock_iter, &lock->procLocks)
1611 : : {
1612 : 1594 : PROCLOCK *otherproclock =
1613 : 1594 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1614 : :
3499 rhaas@postgresql.org 1615 [ + + ]: 1594 : if (proclock != otherproclock &&
1616 [ + + ]: 1318 : proclock->groupLeader == otherproclock->groupLeader &&
1617 [ + + ]: 500 : (otherproclock->holdMask & conflictMask) != 0)
1618 : : {
3376 1619 : 498 : int intersectMask = otherproclock->holdMask & conflictMask;
1620 : :
3499 1621 [ + + ]: 4482 : for (i = 1; i <= numLockModes; i++)
1622 : : {
1623 [ + + ]: 3984 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1624 : : {
1625 [ - + ]: 508 : if (conflictsRemaining[i] <= 0)
3499 rhaas@postgresql.org 1626 [ # # ]:UBC 0 : elog(PANIC, "proclocks held do not match lock");
3499 rhaas@postgresql.org 1627 :CBC 508 : conflictsRemaining[i]--;
1628 : 508 : totalConflictsRemaining--;
1629 : : }
1630 : : }
1631 : :
1632 [ + + ]: 498 : if (totalConflictsRemaining == 0)
1633 : : {
1634 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1635 : : proclock);
2078 peter@eisentraut.org 1636 : 491 : return false;
1637 : : }
1638 : : }
1639 : : }
1640 : :
1641 : : /* Nope, it's a real conflict. */
1642 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1643 : 276 : return true;
1644 : : }
1645 : :
1646 : : /*
1647 : : * GrantLock -- update the lock and proclock data structures to show
1648 : : * the lock request has been granted.
1649 : : *
1650 : : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1651 : : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1652 : : *
1653 : : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1654 : : * table entry; but since we may be awaking some other process, we can't do
1655 : : * that here; it's done by GrantLockLocal, instead.
1656 : : */
1657 : : void
8236 bruce@momjian.us 1658 : 2179882 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1659 : : {
8999 tgl@sss.pgh.pa.us 1660 : 2179882 : lock->nGranted++;
1661 : 2179882 : lock->granted[lockmode]++;
7950 bruce@momjian.us 1662 : 2179882 : lock->grantMask |= LOCKBIT_ON(lockmode);
8999 tgl@sss.pgh.pa.us 1663 [ + + ]: 2179882 : if (lock->granted[lockmode] == lock->requested[lockmode])
7950 bruce@momjian.us 1664 : 2179498 : lock->waitMask &= LOCKBIT_OFF(lockmode);
7680 tgl@sss.pgh.pa.us 1665 : 2179882 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1666 : : LOCK_PRINT("GrantLock", lock, lockmode);
8999 1667 [ + - - + ]: 2179882 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1668 [ - + ]: 2179882 : Assert(lock->nGranted <= lock->nRequested);
7680 1669 : 2179882 : }
1670 : :
1671 : : /*
1672 : : * UnGrantLock -- opposite of GrantLock.
1673 : : *
1674 : : * Updates the lock and proclock data structures to show that the lock
1675 : : * is no longer held nor requested by the current holder.
1676 : : *
1677 : : * Returns true if there were any waiters waiting on the lock that
1678 : : * should now be woken up with ProcLockWakeup.
1679 : : */
1680 : : static bool
7519 neilc@samurai.com 1681 : 2179817 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1682 : : PROCLOCK *proclock, LockMethod lockMethodTable)
1683 : : {
7266 bruce@momjian.us 1684 : 2179817 : bool wakeupNeeded = false;
1685 : :
7519 neilc@samurai.com 1686 [ + - - + ]: 2179817 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1687 [ + - - + ]: 2179817 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1688 [ - + ]: 2179817 : Assert(lock->nGranted <= lock->nRequested);
1689 : :
1690 : : /*
1691 : : * fix the general lock stats
1692 : : */
1693 : 2179817 : lock->nRequested--;
1694 : 2179817 : lock->requested[lockmode]--;
1695 : 2179817 : lock->nGranted--;
1696 : 2179817 : lock->granted[lockmode]--;
1697 : :
1698 [ + + ]: 2179817 : if (lock->granted[lockmode] == 0)
1699 : : {
1700 : : /* change the conflict mask. No more of this lock type. */
1701 : 2155312 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1702 : : }
1703 : :
1704 : : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1705 : :
1706 : : /*
1707 : : * We need only run ProcLockWakeup if the released lock conflicts with at
1708 : : * least one of the lock types requested by waiter(s). Otherwise whatever
1709 : : * conflict made them wait must still exist. NOTE: before MVCC, we could
1710 : : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1711 : : * not true anymore, because the remaining granted locks might belong to
1712 : : * some waiter, who could now be awakened because he doesn't conflict with
1713 : : * his own locks.
1714 : : */
1715 [ + + ]: 2179817 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1716 : 1227 : wakeupNeeded = true;
1717 : :
1718 : : /*
1719 : : * Now fix the per-proclock state.
1720 : : */
1721 : 2179817 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1722 : : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1723 : :
1724 : 2179817 : return wakeupNeeded;
1725 : : }
1726 : :
1727 : : /*
1728 : : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1729 : : * proclock and lock objects if possible, and call ProcLockWakeup if there
1730 : : * are remaining requests and the caller says it's OK. (Normally, this
1731 : : * should be called after UnGrantLock, and wakeupNeeded is the result from
1732 : : * UnGrantLock.)
1733 : : *
1734 : : * The appropriate partition lock must be held at entry, and will be
1735 : : * held at exit.
1736 : : */
1737 : : static void
7209 tgl@sss.pgh.pa.us 1738 : 2144735 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1739 : : LockMethod lockMethodTable, uint32 hashcode,
1740 : : bool wakeupNeeded)
1741 : : {
1742 : : /*
1743 : : * If this was my last hold on this lock, delete my entry in the proclock
1744 : : * table.
1745 : : */
7415 1746 [ + + ]: 2144735 : if (proclock->holdMask == 0)
1747 : : {
1748 : : uint32 proclock_hashcode;
1749 : :
1750 : : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
962 andres@anarazel.de 1751 : 1878223 : dlist_delete(&proclock->lockLink);
1752 : 1878223 : dlist_delete(&proclock->procLink);
6985 tgl@sss.pgh.pa.us 1753 : 1878223 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1754 [ - + ]: 1878223 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
943 peter@eisentraut.org 1755 : 1878223 : &(proclock->tag),
1756 : : proclock_hashcode,
1757 : : HASH_REMOVE,
1758 : : NULL))
7415 tgl@sss.pgh.pa.us 1759 [ # # ]:UBC 0 : elog(PANIC, "proclock table corrupted");
1760 : : }
1761 : :
7415 tgl@sss.pgh.pa.us 1762 [ + + ]:CBC 2144735 : if (lock->nRequested == 0)
1763 : : {
1764 : : /*
1765 : : * The caller just released the last lock, so garbage-collect the lock
1766 : : * object.
1767 : : */
1768 : : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
962 andres@anarazel.de 1769 [ - + ]: 1848517 : Assert(dlist_is_empty(&lock->procLocks));
6985 tgl@sss.pgh.pa.us 1770 [ - + ]: 1848517 : if (!hash_search_with_hash_value(LockMethodLockHash,
943 peter@eisentraut.org 1771 : 1848517 : &(lock->tag),
1772 : : hashcode,
1773 : : HASH_REMOVE,
1774 : : NULL))
7415 tgl@sss.pgh.pa.us 1775 [ # # ]:UBC 0 : elog(PANIC, "lock table corrupted");
1776 : : }
7415 tgl@sss.pgh.pa.us 1777 [ + + ]:CBC 296218 : else if (wakeupNeeded)
1778 : : {
1779 : : /* There are waiters on this lock, so wake them up. */
7209 1780 : 1268 : ProcLockWakeup(lockMethodTable, lock);
1781 : : }
7415 1782 : 2144735 : }
1783 : :
1784 : : /*
1785 : : * GrantLockLocal -- update the locallock data structures to show
1786 : : * the lock request has been granted.
1787 : : *
1788 : : * We expect that LockAcquire made sure there is room to add a new
1789 : : * ResourceOwner entry.
1790 : : */
1791 : : static void
7680 1792 : 18622829 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1793 : : {
1794 : 18622829 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1795 : : int i;
1796 : :
1797 [ - + ]: 18622829 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1798 : : /* Count the total */
1799 : 18622829 : locallock->nLocks++;
1800 : : /* Count the per-owner lock */
1801 [ + + ]: 19295722 : for (i = 0; i < locallock->numLockOwners; i++)
1802 : : {
1803 [ + + ]: 2143989 : if (lockOwners[i].owner == owner)
1804 : : {
1805 : 1471096 : lockOwners[i].nLocks++;
1806 : 1471096 : return;
1807 : : }
1808 : : }
1809 : 17151733 : lockOwners[i].owner = owner;
1810 : 17151733 : lockOwners[i].nLocks = 1;
1811 : 17151733 : locallock->numLockOwners++;
4825 heikki.linnakangas@i 1812 [ + + ]: 17151733 : if (owner != NULL)
1813 : 17112558 : ResourceOwnerRememberLock(owner, locallock);
1814 : :
1815 : : /* Indicate that the lock is acquired for certain types of locks. */
1998 akapila@postgresql.o 1816 : 17151733 : CheckAndSetLockHeld(locallock, true);
1817 : : }
1818 : :
1819 : : /*
1820 : : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1821 : : * and arrange for error cleanup if it fails
1822 : : */
1823 : : static void
4889 rhaas@postgresql.org 1824 : 181674 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1825 : : {
1826 [ - + ]: 181674 : Assert(StrongLockInProgress == NULL);
2943 peter_e@gmx.net 1827 [ - + ]: 181674 : Assert(locallock->holdsStrongLockCount == false);
1828 : :
1829 : : /*
1830 : : * Adding to a memory location is not atomic, so we take a spinlock to
1831 : : * ensure we don't collide with someone else trying to bump the count at
1832 : : * the same time.
1833 : : *
1834 : : * XXX: It might be worth considering using an atomic fetch-and-add
1835 : : * instruction here, on architectures where that is supported.
1836 : : */
1837 : :
4889 rhaas@postgresql.org 1838 [ + + ]: 181674 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1839 : 181674 : FastPathStrongRelationLocks->count[fasthashcode]++;
2943 peter_e@gmx.net 1840 : 181674 : locallock->holdsStrongLockCount = true;
4889 rhaas@postgresql.org 1841 : 181674 : StrongLockInProgress = locallock;
1842 : 181674 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1843 : 181674 : }
1844 : :
1845 : : /*
1846 : : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1847 : : * acquisition once it's no longer needed
1848 : : */
1849 : : static void
1850 : 2178113 : FinishStrongLockAcquire(void)
1851 : : {
1852 : 2178113 : StrongLockInProgress = NULL;
1853 : 2178113 : }
1854 : :
1855 : : /*
1856 : : * AbortStrongLockAcquire - undo strong lock state changes performed by
1857 : : * BeginStrongLockAcquire.
1858 : : */
1859 : : void
1860 : 349652 : AbortStrongLockAcquire(void)
1861 : : {
1862 : : uint32 fasthashcode;
1863 : 349652 : LOCALLOCK *locallock = StrongLockInProgress;
1864 : :
1865 [ + + ]: 349652 : if (locallock == NULL)
1866 : 349437 : return;
1867 : :
1868 : 215 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
2943 peter_e@gmx.net 1869 [ - + ]: 215 : Assert(locallock->holdsStrongLockCount == true);
4889 rhaas@postgresql.org 1870 [ - + ]: 215 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4170 1871 [ - + ]: 215 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
4889 1872 : 215 : FastPathStrongRelationLocks->count[fasthashcode]--;
2943 peter_e@gmx.net 1873 : 215 : locallock->holdsStrongLockCount = false;
4889 rhaas@postgresql.org 1874 : 215 : StrongLockInProgress = NULL;
1875 : 215 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1876 : : }
1877 : :
1878 : : /*
1879 : : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1880 : : * WaitOnLock on.
1881 : : *
1882 : : * proc.c needs this for the case where we are booted off the lock by
1883 : : * timeout, but discover that someone granted us the lock anyway.
1884 : : *
1885 : : * We could just export GrantLockLocal, but that would require including
1886 : : * resowner.h in lock.h, which creates circularity.
1887 : : */
1888 : : void
7680 tgl@sss.pgh.pa.us 1889 :GBC 1 : GrantAwaitedLock(void)
1890 : : {
1891 : 1 : GrantLockLocal(awaitedLock, awaitedOwner);
9874 scrappy@hub.org 1892 : 1 : }
1893 : :
1894 : : /*
1895 : : * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1896 : : */
1897 : : LOCALLOCK *
306 heikki.linnakangas@i 1898 :CBC 350181 : GetAwaitedLock(void)
1899 : : {
1900 : 350181 : return awaitedLock;
1901 : : }
1902 : :
1903 : : /*
1904 : : * ResetAwaitedLock -- Forget that we are waiting on a lock.
1905 : : */
1906 : : void
162 1907 : 40 : ResetAwaitedLock(void)
1908 : : {
1909 : 40 : awaitedLock = NULL;
1910 : 40 : }
1911 : :
1912 : : /*
1913 : : * MarkLockClear -- mark an acquired lock as "clear"
1914 : : *
1915 : : * This means that we know we have absorbed all sinval messages that other
1916 : : * sessions generated before we acquired this lock, and so we can confidently
1917 : : * assume we know about any catalog changes protected by this lock.
1918 : : */
1919 : : void
2556 tgl@sss.pgh.pa.us 1920 : 16083749 : MarkLockClear(LOCALLOCK *locallock)
1921 : : {
1922 [ - + ]: 16083749 : Assert(locallock->nLocks > 0);
1923 : 16083749 : locallock->lockCleared = true;
1924 : 16083749 : }
1925 : :
1926 : : /*
1927 : : * WaitOnLock -- wait to acquire a lock
1928 : : *
1929 : : * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1930 : : */
1931 : : static ProcWaitStatus
306 heikki.linnakangas@i 1932 : 1291 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1933 : : {
1934 : : ProcWaitStatus result;
1935 : : ErrorContextCallback waiterrcontext;
1936 : :
1937 : : TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1938 : : locallock->tag.lock.locktag_field2,
1939 : : locallock->tag.lock.locktag_field3,
1940 : : locallock->tag.lock.locktag_field4,
1941 : : locallock->tag.lock.locktag_type,
1942 : : locallock->tag.mode);
1943 : :
1944 : : /* Setup error traceback support for ereport() */
8 tgl@sss.pgh.pa.us 1945 :GNC 1291 : waiterrcontext.callback = waitonlock_error_callback;
1946 : 1291 : waiterrcontext.arg = (void *) locallock;
1947 : 1291 : waiterrcontext.previous = error_context_stack;
1948 : 1291 : error_context_stack = &waiterrcontext;
1949 : :
1950 : : /* adjust the process title to indicate that it's waiting */
929 drowley@postgresql.o 1951 :CBC 1291 : set_ps_display_suffix("waiting");
1952 : :
1953 : : /*
1954 : : * Record the fact that we are waiting for a lock, so that
1955 : : * LockErrorCleanup will clean up if cancel/die happens.
1956 : : */
7680 tgl@sss.pgh.pa.us 1957 : 1291 : awaitedLock = locallock;
1958 : 1291 : awaitedOwner = owner;
1959 : :
1960 : : /*
1961 : : * NOTE: Think not to put any shared-state cleanup after the call to
1962 : : * ProcSleep, in either the normal or failure path. The lock state must
1963 : : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1964 : : * waiting for the lock. This is necessary because of the possibility
1965 : : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1966 : : * grants us the lock, but before we've noticed it. Hence, after granting,
1967 : : * the locktable state must fully reflect the fact that we own the lock;
1968 : : * we can't do additional work on return.
1969 : : *
1970 : : * We can and do use a PG_TRY block to try to clean up after failure, but
1971 : : * this still has a major limitation: elog(FATAL) can occur while waiting
1972 : : * (eg, a "die" interrupt), and then control won't come back here. So all
1973 : : * cleanup of essential state should happen in LockErrorCleanup, not here.
1974 : : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1975 : : * is unimportant if the process exits.
1976 : : */
6426 1977 [ + + ]: 1291 : PG_TRY();
1978 : : {
306 heikki.linnakangas@i 1979 : 1291 : result = ProcSleep(locallock);
1980 : : }
6426 tgl@sss.pgh.pa.us 1981 : 38 : PG_CATCH();
1982 : : {
1983 : : /* In this path, awaitedLock remains set until LockErrorCleanup */
1984 : :
1985 : : /* reset ps display to remove the suffix */
929 drowley@postgresql.o 1986 : 38 : set_ps_display_remove_suffix();
1987 : :
1988 : : /* and propagate the error */
6426 tgl@sss.pgh.pa.us 1989 : 38 : PG_RE_THROW();
1990 : : }
1991 [ - + ]: 1250 : PG_END_TRY();
1992 : :
1993 : : /*
1994 : : * We no longer want LockErrorCleanup to do anything.
1995 : : */
7680 1996 : 1250 : awaitedLock = NULL;
1997 : :
1998 : : /* reset ps display to remove the suffix */
929 drowley@postgresql.o 1999 : 1250 : set_ps_display_remove_suffix();
2000 : :
8 tgl@sss.pgh.pa.us 2001 :GNC 1250 : error_context_stack = waiterrcontext.previous;
2002 : :
2003 : : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2004 : : locallock->tag.lock.locktag_field2,
2005 : : locallock->tag.lock.locktag_field3,
2006 : : locallock->tag.lock.locktag_field4,
2007 : : locallock->tag.lock.locktag_type,
2008 : : locallock->tag.mode);
2009 : :
306 heikki.linnakangas@i 2010 :CBC 1250 : return result;
2011 : : }
2012 : :
2013 : : /*
2014 : : * error context callback for failures in WaitOnLock
2015 : : *
2016 : : * We report which lock was being waited on, in the same style used in
2017 : : * deadlock reports. This helps with lock timeout errors in particular.
2018 : : */
2019 : : static void
8 tgl@sss.pgh.pa.us 2020 :GNC 65 : waitonlock_error_callback(void *arg)
2021 : : {
2022 : 65 : LOCALLOCK *locallock = (LOCALLOCK *) arg;
2023 : 65 : const LOCKTAG *tag = &locallock->tag.lock;
2024 : 65 : LOCKMODE mode = locallock->tag.mode;
2025 : : StringInfoData locktagbuf;
2026 : :
2027 : 65 : initStringInfo(&locktagbuf);
2028 : 65 : DescribeLockTag(&locktagbuf, tag);
2029 : :
2030 : 130 : errcontext("waiting for %s on %s",
2031 : 65 : GetLockmodeName(tag->locktag_lockmethodid, mode),
2032 : : locktagbuf.data);
2033 : 65 : }
2034 : :
2035 : : /*
2036 : : * Remove a proc from the wait-queue it is on (caller must know it is on one).
2037 : : * This is only used when the proc has failed to get the lock, so we set its
2038 : : * waitStatus to PROC_WAIT_STATUS_ERROR.
2039 : : *
2040 : : * Appropriate partition lock must be held by caller. Also, caller is
2041 : : * responsible for signaling the proc if needed.
2042 : : *
2043 : : * NB: this does not clean up any locallock object that may exist for the lock.
2044 : : */
2045 : : void
6985 tgl@sss.pgh.pa.us 2046 :CBC 44 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2047 : : {
8934 bruce@momjian.us 2048 : 44 : LOCK *waitLock = proc->waitLock;
7494 tgl@sss.pgh.pa.us 2049 : 44 : PROCLOCK *proclock = proc->waitProcLock;
8934 bruce@momjian.us 2050 : 44 : LOCKMODE lockmode = proc->waitLockMode;
7494 tgl@sss.pgh.pa.us 2051 : 44 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2052 : :
2053 : : /* Make sure proc is waiting */
1907 peter@eisentraut.org 2054 [ - + ]: 44 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
6152 tgl@sss.pgh.pa.us 2055 [ - + ]: 44 : Assert(proc->links.next != NULL);
8990 2056 [ - + ]: 44 : Assert(waitLock);
962 andres@anarazel.de 2057 [ - + ]: 44 : Assert(!dclist_is_empty(&waitLock->waitProcs));
7211 tgl@sss.pgh.pa.us 2058 [ + - - + ]: 44 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2059 : :
2060 : : /* Remove proc from lock's wait queue */
773 msawada@postgresql.o 2061 : 44 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2062 : :
2063 : : /* Undo increments of request counts by waiting process */
8990 tgl@sss.pgh.pa.us 2064 [ - + ]: 44 : Assert(waitLock->nRequested > 0);
2065 [ - + ]: 44 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
2066 : 44 : waitLock->nRequested--;
2067 [ - + ]: 44 : Assert(waitLock->requested[lockmode] > 0);
2068 : 44 : waitLock->requested[lockmode]--;
2069 : : /* don't forget to clear waitMask bit if appropriate */
2070 [ + - ]: 44 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
7950 bruce@momjian.us 2071 : 44 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2072 : :
2073 : : /* Clean up the proc's own state, and pass it the ok/fail signal */
8990 tgl@sss.pgh.pa.us 2074 : 44 : proc->waitLock = NULL;
7680 2075 : 44 : proc->waitProcLock = NULL;
1907 peter@eisentraut.org 2076 : 44 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
2077 : :
2078 : : /*
2079 : : * Delete the proclock immediately if it represents no already-held locks.
2080 : : * (This must happen now because if the owner of the lock decides to
2081 : : * release it, and the requested/granted counts then go to zero,
2082 : : * LockRelease expects there to be no remaining proclocks.) Then see if
2083 : : * any other waiters for the lock can be woken up now.
2084 : : */
7209 tgl@sss.pgh.pa.us 2085 : 44 : CleanUpLock(waitLock, proclock,
6985 2086 : 44 : LockMethods[lockmethodid], hashcode,
2087 : : true);
8990 2088 : 44 : }
2089 : :
2090 : : /*
2091 : : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2092 : : * Release a session lock if 'sessionLock' is true, else release a
2093 : : * regular transaction lock.
2094 : : *
2095 : : * Side Effects: find any waiting processes that are now wakable,
2096 : : * grant them their requested locks and awaken them.
2097 : : * (We have to grant the lock here to avoid a race between
2098 : : * the waking process and any new process to
2099 : : * come along and request the lock.)
2100 : : */
2101 : : bool
7211 2102 : 16601333 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2103 : : {
2104 : 16601333 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2105 : : LockMethod lockMethodTable;
2106 : : LOCALLOCKTAG localtag;
2107 : : LOCALLOCK *locallock;
2108 : : LOCK *lock;
2109 : : PROCLOCK *proclock;
2110 : : LWLock *partitionLock;
2111 : : bool wakeupNeeded;
2112 : :
2113 [ + - - + ]: 16601333 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7211 tgl@sss.pgh.pa.us 2114 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7211 tgl@sss.pgh.pa.us 2115 :CBC 16601333 : lockMethodTable = LockMethods[lockmethodid];
2116 [ + - - + ]: 16601333 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
7211 tgl@sss.pgh.pa.us 2117 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2118 : :
2119 : : #ifdef LOCK_DEBUG
2120 : : if (LOCK_DEBUG_ENABLED(locktag))
2121 : : elog(LOG, "LockRelease: lock [%u,%u] %s",
2122 : : locktag->locktag_field1, locktag->locktag_field2,
2123 : : lockMethodTable->lockModeNames[lockmode]);
2124 : : #endif
2125 : :
2126 : : /*
2127 : : * Find the LOCALLOCK entry for this lock and lockmode
2128 : : */
2999 tgl@sss.pgh.pa.us 2129 [ + - - + :CBC 16601333 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7680 2130 : 16601333 : localtag.lock = *locktag;
2131 : 16601333 : localtag.mode = lockmode;
2132 : :
7211 2133 : 16601333 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2134 : : &localtag,
2135 : : HASH_FIND, NULL);
2136 : :
2137 : : /*
2138 : : * let the caller print its own error message, too. Do not ereport(ERROR).
2139 : : */
7680 2140 [ + + - + ]: 16601333 : if (!locallock || locallock->nLocks <= 0)
2141 : : {
2142 [ + - ]: 13 : elog(WARNING, "you don't own a lock of type %s",
2143 : : lockMethodTable->lockModeNames[lockmode]);
2943 peter_e@gmx.net 2144 : 13 : return false;
2145 : : }
2146 : :
2147 : : /*
2148 : : * Decrease the count for the resource owner.
2149 : : */
2150 : : {
7680 tgl@sss.pgh.pa.us 2151 : 16601320 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2152 : : ResourceOwner owner;
2153 : : int i;
2154 : :
2155 : : /* Identify owner for lock */
4873 2156 [ + + ]: 16601320 : if (sessionLock)
7680 2157 : 39171 : owner = NULL;
2158 : : else
4873 2159 : 16562149 : owner = CurrentResourceOwner;
2160 : :
7680 2161 [ + + ]: 16602178 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2162 : : {
2163 [ + + ]: 16602166 : if (lockOwners[i].owner == owner)
2164 : : {
2165 [ - + ]: 16601308 : Assert(lockOwners[i].nLocks > 0);
2166 [ + + ]: 16601308 : if (--lockOwners[i].nLocks == 0)
2167 : : {
4825 heikki.linnakangas@i 2168 [ + + ]: 16029085 : if (owner != NULL)
2169 : 15989947 : ResourceOwnerForgetLock(owner, locallock);
2170 : : /* compact out unused slot */
7680 tgl@sss.pgh.pa.us 2171 : 16029085 : locallock->numLockOwners--;
2172 [ + + ]: 16029085 : if (i < locallock->numLockOwners)
2173 : 54 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2174 : : }
2175 : 16601308 : break;
2176 : : }
2177 : : }
2178 [ + + ]: 16601320 : if (i < 0)
2179 : : {
2180 : : /* don't release a lock belonging to another owner */
2181 [ + - ]: 12 : elog(WARNING, "you don't own a lock of type %s",
2182 : : lockMethodTable->lockModeNames[lockmode]);
2943 peter_e@gmx.net 2183 : 12 : return false;
2184 : : }
2185 : : }
2186 : :
2187 : : /*
2188 : : * Decrease the total local count. If we're still holding the lock, we're
2189 : : * done.
2190 : : */
7680 tgl@sss.pgh.pa.us 2191 : 16601308 : locallock->nLocks--;
2192 : :
2193 [ + + ]: 16601308 : if (locallock->nLocks > 0)
2943 peter_e@gmx.net 2194 : 923566 : return true;
2195 : :
2196 : : /*
2197 : : * At this point we can no longer suppose we are clear of invalidation
2198 : : * messages related to this lock. Although we'll delete the LOCALLOCK
2199 : : * object before any intentional return from this routine, it seems worth
2200 : : * the trouble to explicitly reset lockCleared right now, just in case
2201 : : * some error prevents us from deleting the LOCALLOCK.
2202 : : */
2556 tgl@sss.pgh.pa.us 2203 : 15677742 : locallock->lockCleared = false;
2204 : :
2205 : : /* Attempt fast release of any lock eligible for the fast path. */
4301 2206 [ + + + + : 15677742 : if (EligibleForRelationFastPath(locktag, lockmode) &&
+ + + + +
+ ]
350 tomas.vondra@postgre 2207 [ + + ]: 14494307 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] > 0)
2208 : : {
2209 : : bool released;
2210 : :
2211 : : /*
2212 : : * We might not find the lock here, even if we originally entered it
2213 : : * here. Another backend may have moved it to the main table.
2214 : : */
1940 tgl@sss.pgh.pa.us 2215 : 14204256 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
5163 rhaas@postgresql.org 2216 : 14204256 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2217 : : lockmode);
1940 tgl@sss.pgh.pa.us 2218 : 14204256 : LWLockRelease(&MyProc->fpInfoLock);
5215 rhaas@postgresql.org 2219 [ + + ]: 14204256 : if (released)
2220 : : {
2221 : 14003268 : RemoveLocalLock(locallock);
2943 peter_e@gmx.net 2222 : 14003268 : return true;
2223 : : }
2224 : : }
2225 : :
2226 : : /*
2227 : : * Otherwise we've got to mess with the shared lock table.
2228 : : */
6985 tgl@sss.pgh.pa.us 2229 : 1674474 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2230 : :
7209 2231 : 1674474 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2232 : :
2233 : : /*
2234 : : * Normally, we don't need to re-find the lock or proclock, since we kept
2235 : : * their addresses in the locallock table, and they couldn't have been
2236 : : * removed while we were holding a lock on them. But it's possible that
2237 : : * the lock was taken fast-path and has since been moved to the main hash
2238 : : * table by another backend, in which case we will need to look up the
2239 : : * objects here. We assume the lock field is NULL if so.
2240 : : */
7680 2241 : 1674474 : lock = locallock->lock;
5215 rhaas@postgresql.org 2242 [ + + ]: 1674474 : if (!lock)
2243 : : {
2244 : : PROCLOCKTAG proclocktag;
2245 : :
4847 2246 [ + - + - : 10 : Assert(EligibleForRelationFastPath(locktag, lockmode));
+ - + - -
+ ]
5215 2247 : 10 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2248 : : locktag,
2249 : : locallock->hashcode,
2250 : : HASH_FIND,
2251 : : NULL);
4301 tgl@sss.pgh.pa.us 2252 [ - + ]: 10 : if (!lock)
4301 tgl@sss.pgh.pa.us 2253 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
5215 rhaas@postgresql.org 2254 :CBC 10 : locallock->lock = lock;
2255 : :
2256 : 10 : proclocktag.myLock = lock;
2257 : 10 : proclocktag.myProc = MyProc;
2258 : 10 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2259 : : &proclocktag,
2260 : : HASH_FIND,
2261 : : NULL);
4301 tgl@sss.pgh.pa.us 2262 [ - + ]: 10 : if (!locallock->proclock)
4301 tgl@sss.pgh.pa.us 2263 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
2264 : : }
2265 : : LOCK_PRINT("LockRelease: found", lock, lockmode);
7680 tgl@sss.pgh.pa.us 2266 :CBC 1674474 : proclock = locallock->proclock;
2267 : : PROCLOCK_PRINT("LockRelease: found", proclock);
2268 : :
2269 : : /*
2270 : : * Double-check that we are actually holding a lock of the type we want to
2271 : : * release.
2272 : : */
2273 [ - + ]: 1674474 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2274 : : {
2275 : : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
7209 tgl@sss.pgh.pa.us 2276 :UBC 0 : LWLockRelease(partitionLock);
8080 2277 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
2278 : : lockMethodTable->lockModeNames[lockmode]);
7680 2279 : 0 : RemoveLocalLock(locallock);
2943 peter_e@gmx.net 2280 : 0 : return false;
2281 : : }
2282 : :
2283 : : /*
2284 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2285 : : */
7415 tgl@sss.pgh.pa.us 2286 :CBC 1674474 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2287 : :
7209 2288 : 1674474 : CleanUpLock(lock, proclock,
2289 : : lockMethodTable, locallock->hashcode,
2290 : : wakeupNeeded);
2291 : :
2292 : 1674474 : LWLockRelease(partitionLock);
2293 : :
7680 2294 : 1674474 : RemoveLocalLock(locallock);
2943 peter_e@gmx.net 2295 : 1674474 : return true;
2296 : : }
2297 : :
2298 : : /*
2299 : : * LockReleaseAll -- Release all locks of the specified lock method that
2300 : : * are held by the current process.
2301 : : *
2302 : : * Well, not necessarily *all* locks. The available behaviors are:
2303 : : * allLocks == true: release all locks including session locks.
2304 : : * allLocks == false: release all non-session locks.
2305 : : */
2306 : : void
7389 tgl@sss.pgh.pa.us 2307 : 652530 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2308 : : {
2309 : : HASH_SEQ_STATUS status;
2310 : : LockMethod lockMethodTable;
2311 : : int i,
2312 : : numLockModes;
2313 : : LOCALLOCK *locallock;
2314 : : LOCK *lock;
2315 : : int partition;
5215 rhaas@postgresql.org 2316 : 652530 : bool have_fast_path_lwlock = false;
2317 : :
7211 tgl@sss.pgh.pa.us 2318 [ + - - + ]: 652530 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7211 tgl@sss.pgh.pa.us 2319 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7211 tgl@sss.pgh.pa.us 2320 :CBC 652530 : lockMethodTable = LockMethods[lockmethodid];
2321 : :
2322 : : #ifdef LOCK_DEBUG
2323 : : if (*(lockMethodTable->trace_flag))
2324 : : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2325 : : #endif
2326 : :
2327 : : /*
2328 : : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2329 : : * the only way that the lock we hold on our own VXID can ever get
2330 : : * released: it is always and only released when a toplevel transaction
2331 : : * ends.
2332 : : */
5147 rhaas@postgresql.org 2333 [ + + ]: 652530 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2334 : 319077 : VirtualXactLockTableCleanup();
2335 : :
8451 bruce@momjian.us 2336 : 652530 : numLockModes = lockMethodTable->numLockModes;
2337 : :
2338 : : /*
2339 : : * First we run through the locallock table and get rid of unwanted
2340 : : * entries, then we scan the process's proclocks and get rid of those. We
2341 : : * do this separately because we may have multiple locallock entries
2342 : : * pointing to the same proclock, and we daren't end up with any dangling
2343 : : * pointers. Fast-path locks are cleaned up during the locallock table
2344 : : * scan, though.
2345 : : */
7211 tgl@sss.pgh.pa.us 2346 : 652530 : hash_seq_init(&status, LockMethodLocalHash);
2347 : :
7680 2348 [ + + ]: 1689046 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2349 : : {
2350 : : /*
2351 : : * If the LOCALLOCK entry is unused, something must've gone wrong
2352 : : * while trying to acquire this lock. Just forget the local entry.
2353 : : */
4847 rhaas@postgresql.org 2354 [ + + ]: 1036516 : if (locallock->nLocks == 0)
2355 : : {
2356 : 45 : RemoveLocalLock(locallock);
2357 : 45 : continue;
2358 : : }
2359 : :
2360 : : /* Ignore items that are not of the lockmethod to be removed */
2361 [ + + ]: 1036471 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2362 : 27521 : continue;
2363 : :
2364 : : /*
2365 : : * If we are asked to release all locks, we can just zap the entry.
2366 : : * Otherwise, must scan to see if there are session locks. We assume
2367 : : * there is at most one lockOwners entry for session locks.
2368 : : */
2369 [ + + ]: 1008950 : if (!allLocks)
2370 : : {
2371 : 929305 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2372 : :
2373 : : /* If session lock is above array position 0, move it down to 0 */
4483 bruce@momjian.us 2374 [ + + ]: 1875916 : for (i = 0; i < locallock->numLockOwners; i++)
2375 : : {
4847 rhaas@postgresql.org 2376 [ + + ]: 946611 : if (lockOwners[i].owner == NULL)
2377 : 27212 : lockOwners[0] = lockOwners[i];
2378 : : else
4825 heikki.linnakangas@i 2379 : 919399 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2380 : : }
2381 : :
4847 rhaas@postgresql.org 2382 [ + - ]: 929305 : if (locallock->numLockOwners > 0 &&
2383 [ + + ]: 929305 : lockOwners[0].owner == NULL &&
2384 [ + - ]: 27212 : lockOwners[0].nLocks > 0)
2385 : : {
2386 : : /* Fix the locallock to show just the session locks */
2387 : 27212 : locallock->nLocks = lockOwners[0].nLocks;
2388 : 27212 : locallock->numLockOwners = 1;
2389 : : /* We aren't deleting this locallock, so done */
5215 2390 : 27212 : continue;
2391 : : }
2392 : : else
4825 heikki.linnakangas@i 2393 : 902093 : locallock->numLockOwners = 0;
2394 : : }
2395 : :
2396 : : #ifdef USE_ASSERT_CHECKING
2397 : :
2398 : : /*
2399 : : * Tuple locks are currently held only for short durations within a
2400 : : * transaction. Check that we didn't forget to release one.
2401 : : */
347 noah@leadboat.com 2402 [ + + - + ]: 981738 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
347 noah@leadboat.com 2403 [ # # ]:UBC 0 : elog(WARNING, "tuple lock held at commit");
2404 : : #endif
2405 : :
2406 : : /*
2407 : : * If the lock or proclock pointers are NULL, this lock was taken via
2408 : : * the relation fast-path (and is not known to have been transferred).
2409 : : */
4847 rhaas@postgresql.org 2410 [ + + - + ]:CBC 981738 : if (locallock->proclock == NULL || locallock->lock == NULL)
2411 : 1117 : {
2412 : 478479 : LOCKMODE lockmode = locallock->tag.mode;
2413 : : Oid relid;
2414 : :
2415 : : /* Verify that a fast-path lock is what we've got. */
2416 [ + - + - : 478479 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
+ - + - -
+ ]
5215 rhaas@postgresql.org 2417 [ # # ]:UBC 0 : elog(PANIC, "locallock table corrupted");
2418 : :
2419 : : /*
2420 : : * If we don't currently hold the LWLock that protects our
2421 : : * fast-path data structures, we must acquire it before attempting
2422 : : * to release the lock via the fast-path. We will continue to
2423 : : * hold the LWLock until we're done scanning the locallock table,
2424 : : * unless we hit a transferred fast-path lock. (XXX is this
2425 : : * really such a good idea? There could be a lot of entries ...)
2426 : : */
5215 rhaas@postgresql.org 2427 [ + + ]:CBC 478479 : if (!have_fast_path_lwlock)
2428 : : {
1940 tgl@sss.pgh.pa.us 2429 : 143633 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
5215 rhaas@postgresql.org 2430 : 143633 : have_fast_path_lwlock = true;
2431 : : }
2432 : :
2433 : : /* Attempt fast-path release. */
2434 : 478479 : relid = locallock->tag.lock.locktag_field2;
5163 2435 [ + + ]: 478479 : if (FastPathUnGrantRelationLock(relid, lockmode))
2436 : : {
5215 2437 : 477362 : RemoveLocalLock(locallock);
2438 : 477362 : continue;
2439 : : }
2440 : :
2441 : : /*
2442 : : * Our lock, originally taken via the fast path, has been
2443 : : * transferred to the main lock table. That's going to require
2444 : : * some extra work, so release our fast-path lock before starting.
2445 : : */
1940 tgl@sss.pgh.pa.us 2446 : 1117 : LWLockRelease(&MyProc->fpInfoLock);
5215 rhaas@postgresql.org 2447 : 1117 : have_fast_path_lwlock = false;
2448 : :
2449 : : /*
2450 : : * Now dump the lock. We haven't got a pointer to the LOCK or
2451 : : * PROCLOCK in this case, so we have to handle this a bit
2452 : : * differently than a normal lock release. Unfortunately, this
2453 : : * requires an extra LWLock acquire-and-release cycle on the
2454 : : * partitionLock, but hopefully it shouldn't happen often.
2455 : : */
2456 : 1117 : LockRefindAndRelease(lockMethodTable, MyProc,
2457 : : &locallock->tag.lock, lockmode, false);
7680 tgl@sss.pgh.pa.us 2458 : 1117 : RemoveLocalLock(locallock);
2459 : 1117 : continue;
2460 : : }
2461 : :
2462 : : /* Mark the proclock to show we need to release this lockmode */
7389 2463 [ + - ]: 503259 : if (locallock->nLocks > 0)
2464 : 503259 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2465 : :
2466 : : /* And remove the locallock hashtable entry */
7680 2467 : 503259 : RemoveLocalLock(locallock);
2468 : : }
2469 : :
2470 : : /* Done with the fast-path data structures */
5215 rhaas@postgresql.org 2471 [ + + ]: 652530 : if (have_fast_path_lwlock)
1940 tgl@sss.pgh.pa.us 2472 : 142516 : LWLockRelease(&MyProc->fpInfoLock);
2473 : :
2474 : : /*
2475 : : * Now, scan each lock partition separately.
2476 : : */
7209 2477 [ + + ]: 11093010 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2478 : : {
2479 : : LWLock *partitionLock;
962 andres@anarazel.de 2480 : 10440480 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2481 : : dlist_mutable_iter proclock_iter;
2482 : :
4240 rhaas@postgresql.org 2483 : 10440480 : partitionLock = LockHashPartitionLockByIndex(partition);
2484 : :
2485 : : /*
2486 : : * If the proclock list for this partition is empty, we can skip
2487 : : * acquiring the partition lock. This optimization is trickier than
2488 : : * it looks, because another backend could be in process of adding
2489 : : * something to our proclock list due to promoting one of our
2490 : : * fast-path locks. However, any such lock must be one that we
2491 : : * decided not to delete above, so it's okay to skip it again now;
2492 : : * we'd just decide not to delete it again. We must, however, be
2493 : : * careful to re-fetch the list header once we've acquired the
2494 : : * partition lock, to be sure we have a valid, up-to-date pointer.
2495 : : * (There is probably no significant risk if pointer fetch/store is
2496 : : * atomic, but we don't wish to assume that.)
2497 : : *
2498 : : * XXX This argument assumes that the locallock table correctly
2499 : : * represents all of our fast-path locks. While allLocks mode
2500 : : * guarantees to clean up all of our normal locks regardless of the
2501 : : * locallock situation, we lose that guarantee for fast-path locks.
2502 : : * This is not ideal.
2503 : : */
962 andres@anarazel.de 2504 [ + + ]: 10440480 : if (dlist_is_empty(procLocks))
7209 tgl@sss.pgh.pa.us 2505 : 10024933 : continue; /* needn't examine this partition */
2506 : :
2507 : 415547 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2508 : :
962 andres@anarazel.de 2509 [ + - + + ]: 937726 : dlist_foreach_modify(proclock_iter, procLocks)
2510 : : {
2511 : 522179 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
7209 tgl@sss.pgh.pa.us 2512 : 522179 : bool wakeupNeeded = false;
2513 : :
6985 2514 [ - + ]: 522179 : Assert(proclock->tag.myProc == MyProc);
2515 : :
2516 : 522179 : lock = proclock->tag.myLock;
2517 : :
2518 : : /* Ignore items that are not of the lockmethod to be removed */
7209 2519 [ + + ]: 522179 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
4300 2520 : 27519 : continue;
2521 : :
2522 : : /*
2523 : : * In allLocks mode, force release of all locks even if locallock
2524 : : * table had problems
2525 : : */
7209 2526 [ + + ]: 494660 : if (allLocks)
2527 : 43407 : proclock->releaseMask = proclock->holdMask;
2528 : : else
2529 [ - + ]: 451253 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2530 : :
2531 : : /*
2532 : : * Ignore items that have nothing to be released, unless they have
2533 : : * holdMask == 0 and are therefore recyclable
2534 : : */
2535 [ + + + - ]: 494660 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
4300 2536 : 26526 : continue;
2537 : :
2538 : : PROCLOCK_PRINT("LockReleaseAll", proclock);
2539 : : LOCK_PRINT("LockReleaseAll", lock, 0);
7209 2540 [ - + ]: 468134 : Assert(lock->nRequested >= 0);
2541 [ - + ]: 468134 : Assert(lock->nGranted >= 0);
2542 [ - + ]: 468134 : Assert(lock->nGranted <= lock->nRequested);
2543 [ - + ]: 468134 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2544 : :
2545 : : /*
2546 : : * Release the previously-marked lock modes
2547 : : */
2548 [ + + ]: 4213206 : for (i = 1; i <= numLockModes; i++)
2549 : : {
2550 [ + + ]: 3745072 : if (proclock->releaseMask & LOCKBIT_ON(i))
2551 : 503260 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2552 : : lockMethodTable);
2553 : : }
2554 [ + - - + ]: 468134 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2555 [ - + ]: 468134 : Assert(lock->nGranted <= lock->nRequested);
2556 : : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2557 : :
2558 : 468134 : proclock->releaseMask = 0;
2559 : :
2560 : : /* CleanUpLock will wake up waiters if needed. */
2561 : 468134 : CleanUpLock(lock, proclock,
2562 : : lockMethodTable,
6985 2563 : 468134 : LockTagHashCode(&lock->tag),
2564 : : wakeupNeeded);
2565 : : } /* loop over PROCLOCKs within this partition */
2566 : :
7209 2567 : 415547 : LWLockRelease(partitionLock);
2568 : : } /* loop over partitions */
2569 : :
2570 : : #ifdef LOCK_DEBUG
2571 : : if (*(lockMethodTable->trace_flag))
2572 : : elog(LOG, "LockReleaseAll done");
2573 : : #endif
10651 scrappy@hub.org 2574 : 652530 : }
2575 : :
2576 : : /*
2577 : : * LockReleaseSession -- Release all session locks of the specified lock method
2578 : : * that are held by the current process.
2579 : : */
2580 : : void
4873 tgl@sss.pgh.pa.us 2581 : 119 : LockReleaseSession(LOCKMETHODID lockmethodid)
2582 : : {
2583 : : HASH_SEQ_STATUS status;
2584 : : LOCALLOCK *locallock;
2585 : :
2586 [ + - - + ]: 119 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4873 tgl@sss.pgh.pa.us 2587 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2588 : :
4873 tgl@sss.pgh.pa.us 2589 :CBC 119 : hash_seq_init(&status, LockMethodLocalHash);
2590 : :
2591 [ + + ]: 226 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592 : : {
2593 : : /* Ignore items that are not of the specified lock method */
2594 [ + + ]: 107 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2595 : 10 : continue;
2596 : :
2597 : 97 : ReleaseLockIfHeld(locallock, true);
2598 : : }
2599 : 119 : }
2600 : :
2601 : : /*
2602 : : * LockReleaseCurrentOwner
2603 : : * Release all locks belonging to CurrentResourceOwner
2604 : : *
2605 : : * If the caller knows what those locks are, it can pass them as an array.
2606 : : * That speeds up the call significantly, when a lot of locks are held.
2607 : : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2608 : : * table to find them.
2609 : : */
2610 : : void
4825 heikki.linnakangas@i 2611 : 5330 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2612 : : {
2613 [ + + ]: 5330 : if (locallocks == NULL)
2614 : : {
2615 : : HASH_SEQ_STATUS status;
2616 : : LOCALLOCK *locallock;
2617 : :
2618 : 4 : hash_seq_init(&status, LockMethodLocalHash);
2619 : :
2620 [ + + ]: 272 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2621 : 268 : ReleaseLockIfHeld(locallock, false);
2622 : : }
2623 : : else
2624 : : {
2625 : : int i;
2626 : :
2627 [ + + ]: 7858 : for (i = nlocks - 1; i >= 0; i--)
2628 : 2532 : ReleaseLockIfHeld(locallocks[i], false);
2629 : : }
5314 itagaki.takahiro@gma 2630 : 5330 : }
2631 : :
2632 : : /*
2633 : : * ReleaseLockIfHeld
2634 : : * Release any session-level locks on this lockable object if sessionLock
2635 : : * is true; else, release any locks held by CurrentResourceOwner.
2636 : : *
2637 : : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2638 : : * locks), but without refactoring LockRelease() we cannot support releasing
2639 : : * locks belonging to resource owners other than CurrentResourceOwner.
2640 : : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2641 : : * do a hashtable lookup of the locallock, too. However, currently this
2642 : : * function isn't used heavily enough to justify refactoring for its
2643 : : * convenience.
2644 : : */
2645 : : static void
4873 tgl@sss.pgh.pa.us 2646 : 2897 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2647 : : {
2648 : : ResourceOwner owner;
2649 : : LOCALLOCKOWNER *lockOwners;
2650 : : int i;
2651 : :
2652 : : /* Identify owner for lock (must match LockRelease!) */
2653 [ + + ]: 2897 : if (sessionLock)
2654 : 97 : owner = NULL;
2655 : : else
2656 : 2800 : owner = CurrentResourceOwner;
2657 : :
2658 : : /* Scan to see if there are any locks belonging to the target owner */
5314 itagaki.takahiro@gma 2659 : 2897 : lockOwners = locallock->lockOwners;
2660 [ + + ]: 3090 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661 : : {
2662 [ + + ]: 2897 : if (lockOwners[i].owner == owner)
2663 : : {
2664 [ - + ]: 2704 : Assert(lockOwners[i].nLocks > 0);
2665 [ + + ]: 2704 : if (lockOwners[i].nLocks < locallock->nLocks)
2666 : : {
2667 : : /*
2668 : : * We will still hold this lock after forgetting this
2669 : : * ResourceOwner.
2670 : : */
2671 : 696 : locallock->nLocks -= lockOwners[i].nLocks;
2672 : : /* compact out unused slot */
2673 : 696 : locallock->numLockOwners--;
4825 heikki.linnakangas@i 2674 [ + - ]: 696 : if (owner != NULL)
2675 : 696 : ResourceOwnerForgetLock(owner, locallock);
5314 itagaki.takahiro@gma 2676 [ - + ]: 696 : if (i < locallock->numLockOwners)
5314 itagaki.takahiro@gma 2677 :UBC 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2678 : : }
2679 : : else
2680 : : {
5314 itagaki.takahiro@gma 2681 [ - + ]:CBC 2008 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2682 : : /* We want to call LockRelease just once */
2683 : 2008 : lockOwners[i].nLocks = 1;
2684 : 2008 : locallock->nLocks = 1;
2685 [ - + ]: 2008 : if (!LockRelease(&locallock->tag.lock,
2686 : : locallock->tag.mode,
2687 : : sessionLock))
4873 tgl@sss.pgh.pa.us 2688 [ # # ]:UBC 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2689 : : }
5314 itagaki.takahiro@gma 2690 :CBC 2704 : break;
2691 : : }
2692 : : }
7680 tgl@sss.pgh.pa.us 2693 : 2897 : }
2694 : :
2695 : : /*
2696 : : * LockReassignCurrentOwner
2697 : : * Reassign all locks belonging to CurrentResourceOwner to belong
2698 : : * to its parent resource owner.
2699 : : *
2700 : : * If the caller knows what those locks are, it can pass them as an array.
2701 : : * That speeds up the call significantly, when a lot of locks are held
2702 : : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2703 : : * and we'll traverse through our hash table to find them.
2704 : : */
2705 : : void
4825 heikki.linnakangas@i 2706 : 346819 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2707 : : {
7680 tgl@sss.pgh.pa.us 2708 : 346819 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2709 : :
2710 [ - + ]: 346819 : Assert(parent != NULL);
2711 : :
4825 heikki.linnakangas@i 2712 [ + + ]: 346819 : if (locallocks == NULL)
2713 : : {
2714 : : HASH_SEQ_STATUS status;
2715 : : LOCALLOCK *locallock;
2716 : :
2717 : 3462 : hash_seq_init(&status, LockMethodLocalHash);
2718 : :
2719 [ + + ]: 96266 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2720 : 92804 : LockReassignOwner(locallock, parent);
2721 : : }
2722 : : else
2723 : : {
2724 : : int i;
2725 : :
2726 [ + + ]: 738047 : for (i = nlocks - 1; i >= 0; i--)
2727 : 394690 : LockReassignOwner(locallocks[i], parent);
2728 : : }
2729 : 346819 : }
2730 : :
2731 : : /*
2732 : : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2733 : : * CurrentResourceOwner to its parent.
2734 : : */
2735 : : static void
2736 : 487494 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2737 : : {
2738 : : LOCALLOCKOWNER *lockOwners;
2739 : : int i;
2740 : 487494 : int ic = -1;
2741 : 487494 : int ip = -1;
2742 : :
2743 : : /*
2744 : : * Scan to see if there are any locks belonging to current owner or its
2745 : : * parent
2746 : : */
2747 : 487494 : lockOwners = locallock->lockOwners;
2748 [ + + ]: 1138860 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2749 : : {
2750 [ + + ]: 651366 : if (lockOwners[i].owner == CurrentResourceOwner)
2751 : 474088 : ic = i;
2752 [ + + ]: 177278 : else if (lockOwners[i].owner == parent)
2753 : 134868 : ip = i;
2754 : : }
2755 : :
2756 [ + + ]: 487494 : if (ic < 0)
4483 bruce@momjian.us 2757 : 13406 : return; /* no current locks */
2758 : :
4825 heikki.linnakangas@i 2759 [ + + ]: 474088 : if (ip < 0)
2760 : : {
2761 : : /* Parent has no slot, so just give it the child's slot */
2762 : 352595 : lockOwners[ic].owner = parent;
2763 : 352595 : ResourceOwnerRememberLock(parent, locallock);
2764 : : }
2765 : : else
2766 : : {
2767 : : /* Merge child's count with parent's */
2768 : 121493 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2769 : : /* compact out unused slot */
2770 : 121493 : locallock->numLockOwners--;
2771 [ + + ]: 121493 : if (ic < locallock->numLockOwners)
2772 : 775 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2773 : : }
2774 : 474088 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2775 : : }
2776 : :
2777 : : /*
2778 : : * FastPathGrantRelationLock
2779 : : * Grant lock using per-backend fast-path array, if there is space.
2780 : : */
2781 : : static bool
5163 rhaas@postgresql.org 2782 : 14482056 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2783 : : {
2784 : : uint32 i;
186 tomas.vondra@postgre 2785 : 14482056 : uint32 unused_slot = FastPathLockSlotsPerBackend();
2786 : :
2787 : : /* fast-path group the lock belongs to */
350 2788 : 14482056 : uint32 group = FAST_PATH_REL_GROUP(relid);
2789 : :
2790 : : /* Scan for existing entry for this relid, remembering empty slot. */
2791 [ + + ]: 245482793 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2792 : : {
2793 : : /* index into the whole per-backend array */
2794 [ - + - + ]: 231480841 : uint32 f = FAST_PATH_SLOT(group, i);
2795 : :
5215 rhaas@postgresql.org 2796 [ - + - + : 231480841 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
+ + ]
2797 : 223333547 : unused_slot = f;
2798 [ + + ]: 8147294 : else if (MyProc->fpRelId[f] == relid)
2799 : : {
2800 [ - + - + : 480104 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
- + - + -
+ - + ]
2801 [ - + - + : 480104 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
2802 : 480104 : return true;
2803 : : }
2804 : : }
2805 : :
2806 : : /* If no existing entry, use any empty slot. */
186 tomas.vondra@postgre 2807 [ + - ]: 14001952 : if (unused_slot < FastPathLockSlotsPerBackend())
2808 : : {
5215 rhaas@postgresql.org 2809 : 14001952 : MyProc->fpRelId[unused_slot] = relid;
2810 [ - + - + : 14001952 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
- + - + -
+ ]
350 tomas.vondra@postgre 2811 : 14001952 : ++FastPathLocalUseCounts[group];
5215 rhaas@postgresql.org 2812 : 14001952 : return true;
2813 : : }
2814 : :
2815 : : /* No existing entry, and no empty slot. */
5215 rhaas@postgresql.org 2816 :UBC 0 : return false;
2817 : : }
2818 : :
2819 : : /*
2820 : : * FastPathUnGrantRelationLock
2821 : : * Release fast-path lock, if present. Update backend-private local
2822 : : * use count, while we're at it.
2823 : : */
2824 : : static bool
5163 rhaas@postgresql.org 2825 :CBC 14682735 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2826 : : {
2827 : : uint32 i;
5215 2828 : 14682735 : bool result = false;
2829 : :
2830 : : /* fast-path group the lock belongs to */
350 tomas.vondra@postgre 2831 : 14682735 : uint32 group = FAST_PATH_REL_GROUP(relid);
2832 : :
2833 : 14682735 : FastPathLocalUseCounts[group] = 0;
2834 [ + + ]: 249606495 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2835 : : {
2836 : : /* index into the whole per-backend array */
2837 [ - + - + ]: 234923760 : uint32 f = FAST_PATH_SLOT(group, i);
2838 : :
5215 rhaas@postgresql.org 2839 [ + + ]: 234923760 : if (MyProc->fpRelId[f] == relid
2840 [ - + - + : 19913350 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + - + -
+ + + ]
2841 : : {
2842 [ - + ]: 14480630 : Assert(!result);
2843 [ - + - + : 14480630 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
2844 : 14480630 : result = true;
2845 : : /* we continue iterating so as to update FastPathLocalUseCount */
2846 : : }
2847 [ - + - + : 234923760 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
+ + ]
350 tomas.vondra@postgre 2848 : 11079442 : ++FastPathLocalUseCounts[group];
2849 : : }
5215 rhaas@postgresql.org 2850 : 14682735 : return result;
2851 : : }
2852 : :
2853 : : /*
2854 : : * FastPathTransferRelationLocks
2855 : : * Transfer locks matching the given lock tag from per-backend fast-path
2856 : : * arrays to the shared hash table.
2857 : : *
2858 : : * Returns true if successful, false if ran out of shared memory.
2859 : : */
2860 : : static bool
5163 2861 : 181674 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2862 : : uint32 hashcode)
2863 : : {
4240 2864 : 181674 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
4836 bruce@momjian.us 2865 : 181674 : Oid relid = locktag->locktag_field2;
2866 : : uint32 i;
2867 : :
2868 : : /* fast-path group the lock belongs to */
176 fujii@postgresql.org 2869 : 181674 : uint32 group = FAST_PATH_REL_GROUP(relid);
2870 : :
2871 : : /*
2872 : : * Every PGPROC that can potentially hold a fast-path lock is present in
2873 : : * ProcGlobal->allProcs. Prepared transactions are not, but any
2874 : : * outstanding fast-path locks held by prepared transactions are
2875 : : * transferred to the main lock table.
2876 : : */
5215 rhaas@postgresql.org 2877 [ + + ]: 26392313 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2878 : : {
2879 : 26210639 : PGPROC *proc = &ProcGlobal->allProcs[i];
2880 : : uint32 j;
2881 : :
1940 tgl@sss.pgh.pa.us 2882 : 26210639 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2883 : :
2884 : : /*
2885 : : * If the target backend isn't referencing the same database as the
2886 : : * lock, then we needn't examine the individual relation IDs at all;
2887 : : * none of them can be relevant.
2888 : : *
2889 : : * proc->databaseId is set at backend startup time and never changes
2890 : : * thereafter, so it might be safe to perform this test before
2891 : : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2892 : : * assume that if the target backend holds any fast-path locks, it
2893 : : * must have performed a memory-fencing operation (in particular, an
2894 : : * LWLock acquisition) since setting proc->databaseId. However, it's
2895 : : * less clear that our backend is certain to have performed a memory
2896 : : * fencing operation since the other backend set proc->databaseId. So
2897 : : * for now, we test it after acquiring the LWLock just to be safe.
2898 : : *
2899 : : * Also skip groups without any registered fast-path locks.
2900 : : */
176 fujii@postgresql.org 2901 [ + + ]: 26210639 : if (proc->databaseId != locktag->locktag_field1 ||
2902 [ + + ]: 9364723 : proc->fpLockBits[group] == 0)
2903 : : {
1940 tgl@sss.pgh.pa.us 2904 : 26020280 : LWLockRelease(&proc->fpInfoLock);
5215 rhaas@postgresql.org 2905 : 26020280 : continue;
2906 : : }
2907 : :
350 tomas.vondra@postgre 2908 [ + + ]: 3234872 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2909 : : {
2910 : : uint32 lockmode;
2911 : :
2912 : : /* index into the whole per-backend array */
2913 [ - + - + ]: 3045593 : uint32 f = FAST_PATH_SLOT(group, j);
2914 : :
2915 : : /* Look for an allocated slot matching the given relid. */
5215 rhaas@postgresql.org 2916 [ + + - + : 3045593 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
- + + + ]
2917 : 3044513 : continue;
2918 : :
2919 : : /* Find or create lock object. */
2920 : 1080 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2921 : 1080 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2999 tgl@sss.pgh.pa.us 2922 [ + + ]: 4320 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
5215 rhaas@postgresql.org 2923 : 3240 : ++lockmode)
2924 : : {
2925 : : PROCLOCK *proclock;
2926 : :
2927 [ - + - + : 3240 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
- + - + -
+ + + ]
2928 : 2105 : continue;
2929 : 1135 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2930 : : hashcode, lockmode);
2931 [ - + ]: 1135 : if (!proclock)
2932 : : {
5215 rhaas@postgresql.org 2933 :UBC 0 : LWLockRelease(partitionLock);
1940 tgl@sss.pgh.pa.us 2934 : 0 : LWLockRelease(&proc->fpInfoLock);
5215 rhaas@postgresql.org 2935 : 0 : return false;
2936 : : }
5215 rhaas@postgresql.org 2937 :CBC 1135 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2938 [ - + - + : 1135 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
- + - + -
+ ]
2939 : : }
2940 : 1080 : LWLockRelease(partitionLock);
2941 : :
2942 : : /* No need to examine remaining slots. */
4301 tgl@sss.pgh.pa.us 2943 : 1080 : break;
2944 : : }
1940 2945 : 190359 : LWLockRelease(&proc->fpInfoLock);
2946 : : }
5215 rhaas@postgresql.org 2947 : 181674 : return true;
2948 : : }
2949 : :
2950 : : /*
2951 : : * FastPathGetRelationLockEntry
2952 : : * Return the PROCLOCK for a lock originally taken via the fast-path,
2953 : : * transferring it to the primary lock table if necessary.
2954 : : *
2955 : : * Note: caller takes care of updating the locallock object.
2956 : : */
2957 : : static PROCLOCK *
5163 2958 : 299 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2959 : : {
4836 bruce@momjian.us 2960 : 299 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2961 : 299 : LOCKTAG *locktag = &locallock->tag.lock;
2962 : 299 : PROCLOCK *proclock = NULL;
4240 rhaas@postgresql.org 2963 : 299 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
4836 bruce@momjian.us 2964 : 299 : Oid relid = locktag->locktag_field2;
2965 : : uint32 i,
2966 : : group;
2967 : :
2968 : : /* fast-path group the lock belongs to */
350 tomas.vondra@postgre 2969 : 299 : group = FAST_PATH_REL_GROUP(relid);
2970 : :
1940 tgl@sss.pgh.pa.us 2971 : 299 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2972 : :
350 tomas.vondra@postgre 2973 [ + + ]: 4786 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2974 : : {
2975 : : uint32 lockmode;
2976 : :
2977 : : /* index into the whole per-backend array */
2978 [ - + - + ]: 4778 : uint32 f = FAST_PATH_SLOT(group, i);
2979 : :
2980 : : /* Look for an allocated slot matching the given relid. */
5215 rhaas@postgresql.org 2981 [ + + - + : 4778 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
- + - + ]
2982 : 4487 : continue;
2983 : :
2984 : : /* If we don't have a lock of the given mode, forget it! */
2985 : 291 : lockmode = locallock->tag.mode;
2986 [ - + - + : 291 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + - + -
+ - + ]
5215 rhaas@postgresql.org 2987 :UBC 0 : break;
2988 : :
2989 : : /* Find or create lock object. */
5215 rhaas@postgresql.org 2990 :CBC 291 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2991 : :
2992 : 291 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2993 : : locallock->hashcode, lockmode);
2994 [ - + ]: 291 : if (!proclock)
2995 : : {
4624 tgl@sss.pgh.pa.us 2996 :UBC 0 : LWLockRelease(partitionLock);
1940 2997 : 0 : LWLockRelease(&MyProc->fpInfoLock);
5215 rhaas@postgresql.org 2998 [ # # ]: 0 : ereport(ERROR,
2999 : : (errcode(ERRCODE_OUT_OF_MEMORY),
3000 : : errmsg("out of shared memory"),
3001 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3002 : : }
5215 rhaas@postgresql.org 3003 :CBC 291 : GrantLock(proclock->tag.myLock, proclock, lockmode);
3004 [ - + - + : 291 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + - + -
+ ]
3005 : :
3006 : 291 : LWLockRelease(partitionLock);
3007 : :
3008 : : /* No need to examine remaining slots. */
4301 tgl@sss.pgh.pa.us 3009 : 291 : break;
3010 : : }
3011 : :
1940 3012 : 299 : LWLockRelease(&MyProc->fpInfoLock);
3013 : :
3014 : : /* Lock may have already been transferred by some other backend. */
5215 rhaas@postgresql.org 3015 [ + + ]: 299 : if (proclock == NULL)
3016 : : {
3017 : : LOCK *lock;
3018 : : PROCLOCKTAG proclocktag;
3019 : : uint32 proclock_hashcode;
3020 : :
3021 : 8 : LWLockAcquire(partitionLock, LW_SHARED);
3022 : :
3023 : 8 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3024 : : locktag,
3025 : : locallock->hashcode,
3026 : : HASH_FIND,
3027 : : NULL);
3028 [ - + ]: 8 : if (!lock)
5215 rhaas@postgresql.org 3029 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
3030 : :
5215 rhaas@postgresql.org 3031 :CBC 8 : proclocktag.myLock = lock;
3032 : 8 : proclocktag.myProc = MyProc;
3033 : :
3034 : 8 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3035 : : proclock = (PROCLOCK *)
3036 : 8 : hash_search_with_hash_value(LockMethodProcLockHash,
3037 : : &proclocktag,
3038 : : proclock_hashcode,
3039 : : HASH_FIND,
3040 : : NULL);
3041 [ - + ]: 8 : if (!proclock)
5215 rhaas@postgresql.org 3042 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
5215 rhaas@postgresql.org 3043 :CBC 8 : LWLockRelease(partitionLock);
3044 : : }
3045 : :
3046 : 299 : return proclock;
3047 : : }
3048 : :
3049 : : /*
3050 : : * GetLockConflicts
3051 : : * Get an array of VirtualTransactionIds of xacts currently holding locks
3052 : : * that would conflict with the specified lock/lockmode.
3053 : : * xacts merely awaiting such a lock are NOT reported.
3054 : : *
3055 : : * The result array is palloc'd and is terminated with an invalid VXID.
3056 : : * *countp, if not null, is updated to the number of items set.
3057 : : *
3058 : : * Of course, the result could be out of date by the time it's returned, so
3059 : : * use of this function has to be thought about carefully. Similarly, a
3060 : : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3061 : : * lock it holds. Existing callers don't care about a locker after that
3062 : : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3063 : : * pg_xact updates and before releasing locks.
3064 : : *
3065 : : * Note we never include the current xact's vxid in the result array,
3066 : : * since an xact never blocks itself.
3067 : : */
3068 : : VirtualTransactionId *
2349 alvherre@alvh.no-ip. 3069 : 1306 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3070 : : {
3071 : : static VirtualTransactionId *vxids;
6950 tgl@sss.pgh.pa.us 3072 : 1306 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3073 : : LockMethod lockMethodTable;
3074 : : LOCK *lock;
3075 : : LOCKMASK conflictMask;
3076 : : dlist_iter proclock_iter;
3077 : : PROCLOCK *proclock;
3078 : : uint32 hashcode;
3079 : : LWLock *partitionLock;
6576 3080 : 1306 : int count = 0;
5215 rhaas@postgresql.org 3081 : 1306 : int fast_count = 0;
3082 : :
6950 tgl@sss.pgh.pa.us 3083 [ + - - + ]: 1306 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6950 tgl@sss.pgh.pa.us 3084 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6950 tgl@sss.pgh.pa.us 3085 :CBC 1306 : lockMethodTable = LockMethods[lockmethodid];
3086 [ + - - + ]: 1306 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
6950 tgl@sss.pgh.pa.us 3087 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
3088 : :
3089 : : /*
3090 : : * Allocate memory to store results, and fill with InvalidVXID. We only
3091 : : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3092 : : * InHotStandby allocate once in TopMemoryContext.
3093 : : */
5699 simon@2ndQuadrant.co 3094 [ + + ]:CBC 1306 : if (InHotStandby)
3095 : : {
5700 3096 [ + + ]: 4 : if (vxids == NULL)
3097 : 1 : vxids = (VirtualTransactionId *)
5699 3098 : 1 : MemoryContextAlloc(TopMemoryContext,
3099 : : sizeof(VirtualTransactionId) *
1243 rhaas@postgresql.org 3100 : 1 : (MaxBackends + max_prepared_xacts + 1));
3101 : : }
3102 : : else
5699 simon@2ndQuadrant.co 3103 : 1302 : vxids = (VirtualTransactionId *)
1680 noah@leadboat.com 3104 : 1302 : palloc0(sizeof(VirtualTransactionId) *
1243 rhaas@postgresql.org 3105 : 1302 : (MaxBackends + max_prepared_xacts + 1));
3106 : :
3107 : : /* Compute hash code and partition lock, and look up conflicting modes. */
6950 tgl@sss.pgh.pa.us 3108 : 1306 : hashcode = LockTagHashCode(locktag);
3109 : 1306 : partitionLock = LockHashPartitionLock(hashcode);
5215 rhaas@postgresql.org 3110 : 1306 : conflictMask = lockMethodTable->conflictTab[lockmode];
3111 : :
3112 : : /*
3113 : : * Fast path locks might not have been entered in the primary lock table.
3114 : : * If the lock we're dealing with could conflict with such a lock, we must
3115 : : * examine each backend's fast-path array for conflicts.
3116 : : */
4847 3117 [ + - + - : 1306 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ - + - ]
3118 : : {
3119 : : int i;
5215 3120 : 1306 : Oid relid = locktag->locktag_field2;
3121 : : VirtualTransactionId vxid;
3122 : :
3123 : : /* fast-path group the lock belongs to */
176 fujii@postgresql.org 3124 : 1306 : uint32 group = FAST_PATH_REL_GROUP(relid);
3125 : :
3126 : : /*
3127 : : * Iterate over relevant PGPROCs. Anything held by a prepared
3128 : : * transaction will have been transferred to the primary lock table,
3129 : : * so we need not worry about those. This is all a bit fuzzy, because
3130 : : * new locks could be taken after we've visited a particular
3131 : : * partition, but the callers had better be prepared to deal with that
3132 : : * anyway, since the locks could equally well be taken between the
3133 : : * time we return the value and the time the caller does something
3134 : : * with it.
3135 : : */
5215 rhaas@postgresql.org 3136 [ + + ]: 203966 : for (i = 0; i < ProcGlobal->allProcCount; i++)
3137 : : {
3138 : 202660 : PGPROC *proc = &ProcGlobal->allProcs[i];
3139 : : uint32 j;
3140 : :
3141 : : /* A backend never blocks itself */
3142 [ + + ]: 202660 : if (proc == MyProc)
3143 : 1306 : continue;
3144 : :
1940 tgl@sss.pgh.pa.us 3145 : 201354 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3146 : :
3147 : : /*
3148 : : * If the target backend isn't referencing the same database as
3149 : : * the lock, then we needn't examine the individual relation IDs
3150 : : * at all; none of them can be relevant.
3151 : : *
3152 : : * See FastPathTransferRelationLocks() for discussion of why we do
3153 : : * this test after acquiring the lock.
3154 : : *
3155 : : * Also skip groups without any registered fast-path locks.
3156 : : */
176 fujii@postgresql.org 3157 [ + + ]: 201354 : if (proc->databaseId != locktag->locktag_field1 ||
3158 [ + + ]: 86489 : proc->fpLockBits[group] == 0)
3159 : : {
1940 tgl@sss.pgh.pa.us 3160 : 200959 : LWLockRelease(&proc->fpInfoLock);
5215 rhaas@postgresql.org 3161 : 200959 : continue;
3162 : : }
3163 : :
350 tomas.vondra@postgre 3164 [ + + ]: 6505 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3165 : : {
3166 : : uint32 lockmask;
3167 : :
3168 : : /* index into the whole per-backend array */
3169 [ - + - + ]: 6308 : uint32 f = FAST_PATH_SLOT(group, j);
3170 : :
3171 : : /* Look for an allocated slot matching the given relid. */
5215 rhaas@postgresql.org 3172 [ + + ]: 6308 : if (relid != proc->fpRelId[f])
3173 : 6110 : continue;
3174 [ - + - + ]: 198 : lockmask = FAST_PATH_GET_BITS(proc, f);
3175 [ - + ]: 198 : if (!lockmask)
5215 rhaas@postgresql.org 3176 :UBC 0 : continue;
5215 rhaas@postgresql.org 3177 :CBC 198 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3178 : :
3179 : : /*
3180 : : * There can only be one entry per relation, so if we found it
3181 : : * and it doesn't conflict, we can skip the rest of the slots.
3182 : : */
3183 [ + + ]: 198 : if ((lockmask & conflictMask) == 0)
3184 : 5 : break;
3185 : :
3186 : : /* Conflict! */
3187 : 193 : GET_VXID_FROM_PGPROC(vxid, *proc);
3188 : :
3189 [ + - ]: 193 : if (VirtualTransactionIdIsValid(vxid))
3190 : 193 : vxids[count++] = vxid;
3191 : : /* else, xact already committed or aborted */
3192 : :
3193 : : /* No need to examine remaining slots. */
3194 : 193 : break;
3195 : : }
3196 : :
1940 tgl@sss.pgh.pa.us 3197 : 395 : LWLockRelease(&proc->fpInfoLock);
3198 : : }
3199 : : }
3200 : :
3201 : : /* Remember how many fast-path conflicts we found. */
5215 rhaas@postgresql.org 3202 : 1306 : fast_count = count;
3203 : :
3204 : : /*
3205 : : * Look up the lock object matching the tag.
3206 : : */
6950 tgl@sss.pgh.pa.us 3207 : 1306 : LWLockAcquire(partitionLock, LW_SHARED);
3208 : :
3209 : 1306 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3210 : : locktag,
3211 : : hashcode,
3212 : : HASH_FIND,
3213 : : NULL);
3214 [ + + ]: 1306 : if (!lock)
3215 : : {
3216 : : /*
3217 : : * If the lock object doesn't exist, there is nothing holding a lock
3218 : : * on this lockable object.
3219 : : */
3220 : 70 : LWLockRelease(partitionLock);
552 heikki.linnakangas@i 3221 : 70 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3873 andres@anarazel.de 3222 : 70 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2349 alvherre@alvh.no-ip. 3223 [ - + ]: 70 : if (countp)
2349 alvherre@alvh.no-ip. 3224 :UBC 0 : *countp = count;
6576 tgl@sss.pgh.pa.us 3225 :CBC 70 : return vxids;
3226 : : }
3227 : :
3228 : : /*
3229 : : * Examine each existing holder (or awaiter) of the lock.
3230 : : */
962 andres@anarazel.de 3231 [ + - + + ]: 2495 : dlist_foreach(proclock_iter, &lock->procLocks)
3232 : : {
3233 : 1259 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3234 : :
6950 tgl@sss.pgh.pa.us 3235 [ + + ]: 1259 : if (conflictMask & proclock->holdMask)
3236 : : {
6912 bruce@momjian.us 3237 : 1255 : PGPROC *proc = proclock->tag.myProc;
3238 : :
3239 : : /* A backend never blocks itself */
6950 tgl@sss.pgh.pa.us 3240 [ + + ]: 1255 : if (proc != MyProc)
3241 : : {
3242 : : VirtualTransactionId vxid;
3243 : :
6576 3244 : 23 : GET_VXID_FROM_PGPROC(vxid, *proc);
3245 : :
3246 [ + - ]: 23 : if (VirtualTransactionIdIsValid(vxid))
3247 : : {
3248 : : int i;
3249 : :
3250 : : /* Avoid duplicate entries. */
5215 rhaas@postgresql.org 3251 [ + + ]: 36 : for (i = 0; i < fast_count; ++i)
3252 [ + + - + ]: 13 : if (VirtualTransactionIdEquals(vxids[i], vxid))
5215 rhaas@postgresql.org 3253 :UBC 0 : break;
5215 rhaas@postgresql.org 3254 [ + - ]:CBC 23 : if (i >= fast_count)
3255 : 23 : vxids[count++] = vxid;
3256 : : }
3257 : : /* else, xact already committed or aborted */
3258 : : }
3259 : : }
3260 : : }
3261 : :
6950 tgl@sss.pgh.pa.us 3262 : 1236 : LWLockRelease(partitionLock);
3263 : :
1243 rhaas@postgresql.org 3264 [ - + ]: 1236 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
6576 tgl@sss.pgh.pa.us 3265 [ # # ]:UBC 0 : elog(PANIC, "too many conflicting locks found");
3266 : :
552 heikki.linnakangas@i 3267 :CBC 1236 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3873 andres@anarazel.de 3268 : 1236 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2349 alvherre@alvh.no-ip. 3269 [ + + ]: 1236 : if (countp)
3270 : 1233 : *countp = count;
6576 tgl@sss.pgh.pa.us 3271 : 1236 : return vxids;
3272 : : }
3273 : :
3274 : : /*
3275 : : * Find a lock in the shared lock table and release it. It is the caller's
3276 : : * responsibility to verify that this is a sane thing to do. (For example, it
3277 : : * would be bad to release a lock here if there might still be a LOCALLOCK
3278 : : * object with pointers to it.)
3279 : : *
3280 : : * We currently use this in two situations: first, to release locks held by
3281 : : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3282 : : * to release locks taken via the fast-path, transferred to the main hash
3283 : : * table, and then released (see LockReleaseAll).
3284 : : */
3285 : : static void
5215 rhaas@postgresql.org 3286 : 2083 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3287 : : LOCKTAG *locktag, LOCKMODE lockmode,
3288 : : bool decrement_strong_lock_count)
3289 : : {
3290 : : LOCK *lock;
3291 : : PROCLOCK *proclock;
3292 : : PROCLOCKTAG proclocktag;
3293 : : uint32 hashcode;
3294 : : uint32 proclock_hashcode;
3295 : : LWLock *partitionLock;
3296 : : bool wakeupNeeded;
3297 : :
3298 : 2083 : hashcode = LockTagHashCode(locktag);
3299 : 2083 : partitionLock = LockHashPartitionLock(hashcode);
3300 : :
3301 : 2083 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3302 : :
3303 : : /*
3304 : : * Re-find the lock object (it had better be there).
3305 : : */
3306 : 2083 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3307 : : locktag,
3308 : : hashcode,
3309 : : HASH_FIND,
3310 : : NULL);
3311 [ - + ]: 2083 : if (!lock)
5215 rhaas@postgresql.org 3312 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared lock object");
3313 : :
3314 : : /*
3315 : : * Re-find the proclock object (ditto).
3316 : : */
5215 rhaas@postgresql.org 3317 :CBC 2083 : proclocktag.myLock = lock;
3318 : 2083 : proclocktag.myProc = proc;
3319 : :
3320 : 2083 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3321 : :
3322 : 2083 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3323 : : &proclocktag,
3324 : : proclock_hashcode,
3325 : : HASH_FIND,
3326 : : NULL);
3327 [ - + ]: 2083 : if (!proclock)
5215 rhaas@postgresql.org 3328 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared proclock object");
3329 : :
3330 : : /*
3331 : : * Double-check that we are actually holding a lock of the type we want to
3332 : : * release.
3333 : : */
5215 rhaas@postgresql.org 3334 [ - + ]:CBC 2083 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3335 : : {
3336 : : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
5215 rhaas@postgresql.org 3337 :UBC 0 : LWLockRelease(partitionLock);
3338 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
3339 : : lockMethodTable->lockModeNames[lockmode]);
3340 : 0 : return;
3341 : : }
3342 : :
3343 : : /*
3344 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3345 : : */
5215 rhaas@postgresql.org 3346 :CBC 2083 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3347 : :
3348 : 2083 : CleanUpLock(lock, proclock,
3349 : : lockMethodTable, hashcode,
3350 : : wakeupNeeded);
3351 : :
3352 : 2083 : LWLockRelease(partitionLock);
3353 : :
3354 : : /*
3355 : : * Decrement strong lock count. This logic is needed only for 2PC.
3356 : : */
3357 [ + + ]: 2083 : if (decrement_strong_lock_count
4062 3358 [ + - + + : 715 : && ConflictsWithRelationFastPath(locktag, lockmode))
+ - + + ]
3359 : : {
4836 bruce@momjian.us 3360 : 71 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3361 : :
5163 rhaas@postgresql.org 3362 [ - + ]: 71 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4170 3363 [ - + ]: 71 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
5163 3364 : 71 : FastPathStrongRelationLocks->count[fasthashcode]--;
3365 : 71 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3366 : : }
3367 : : }
3368 : :
3369 : : /*
3370 : : * CheckForSessionAndXactLocks
3371 : : * Check to see if transaction holds both session-level and xact-level
3372 : : * locks on the same object; if so, throw an error.
3373 : : *
3374 : : * If we have both session- and transaction-level locks on the same object,
3375 : : * PREPARE TRANSACTION must fail. This should never happen with regular
3376 : : * locks, since we only take those at session level in some special operations
3377 : : * like VACUUM. It's possible to hit this with advisory locks, though.
3378 : : *
3379 : : * It would be nice if we could keep the session hold and give away the
3380 : : * transactional hold to the prepared xact. However, that would require two
3381 : : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3382 : : * available when it comes time for PostPrepare_Locks to do the deed.
3383 : : * So for now, we error out while we can still do so safely.
3384 : : *
3385 : : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3386 : : * we can't implement this check by examining LOCALLOCK entries in isolation.
3387 : : * We must build a transient hashtable that is indexed by locktag only.
3388 : : */
3389 : : static void
1505 tgl@sss.pgh.pa.us 3390 : 290 : CheckForSessionAndXactLocks(void)
3391 : : {
3392 : : typedef struct
3393 : : {
3394 : : LOCKTAG lock; /* identifies the lockable object */
3395 : : bool sessLock; /* is any lockmode held at session level? */
3396 : : bool xactLock; /* is any lockmode held at xact level? */
3397 : : } PerLockTagEntry;
3398 : :
3399 : : HASHCTL hash_ctl;
3400 : : HTAB *lockhtab;
3401 : : HASH_SEQ_STATUS status;
3402 : : LOCALLOCK *locallock;
3403 : :
3404 : : /* Create a local hash table keyed by LOCKTAG only */
3405 : 290 : hash_ctl.keysize = sizeof(LOCKTAG);
3406 : 290 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3407 : 290 : hash_ctl.hcxt = CurrentMemoryContext;
3408 : :
3409 : 290 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3410 : : 256, /* arbitrary initial size */
3411 : : &hash_ctl,
3412 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3413 : :
3414 : : /* Scan local lock table to find entries for each LOCKTAG */
3415 : 290 : hash_seq_init(&status, LockMethodLocalHash);
3416 : :
3417 [ + + ]: 990 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3418 : : {
3419 : 702 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3420 : : PerLockTagEntry *hentry;
3421 : : bool found;
3422 : : int i;
3423 : :
3424 : : /*
3425 : : * Ignore VXID locks. We don't want those to be held by prepared
3426 : : * transactions, since they aren't meaningful after a restart.
3427 : : */
3428 [ - + ]: 702 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
1505 tgl@sss.pgh.pa.us 3429 :UBC 0 : continue;
3430 : :
3431 : : /* Ignore it if we don't actually hold the lock */
1505 tgl@sss.pgh.pa.us 3432 [ - + ]:CBC 702 : if (locallock->nLocks <= 0)
1505 tgl@sss.pgh.pa.us 3433 :UBC 0 : continue;
3434 : :
3435 : : /* Otherwise, find or make an entry in lockhtab */
1505 tgl@sss.pgh.pa.us 3436 :CBC 702 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
943 peter@eisentraut.org 3437 : 702 : &locallock->tag.lock,
3438 : : HASH_ENTER, &found);
1505 tgl@sss.pgh.pa.us 3439 [ + + ]: 702 : if (!found) /* initialize, if newly created */
3440 : 653 : hentry->sessLock = hentry->xactLock = false;
3441 : :
3442 : : /* Scan to see if we hold lock at session or xact level or both */
3443 [ + + ]: 1404 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3444 : : {
3445 [ + + ]: 702 : if (lockOwners[i].owner == NULL)
3446 : 9 : hentry->sessLock = true;
3447 : : else
3448 : 693 : hentry->xactLock = true;
3449 : : }
3450 : :
3451 : : /*
3452 : : * We can throw error immediately when we see both types of locks; no
3453 : : * need to wait around to see if there are more violations.
3454 : : */
3455 [ + + + + ]: 702 : if (hentry->sessLock && hentry->xactLock)
3456 [ + - ]: 2 : ereport(ERROR,
3457 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3458 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3459 : : }
3460 : :
3461 : : /* Success, so clean up */
3462 : 288 : hash_destroy(lockhtab);
3463 : 288 : }
3464 : :
3465 : : /*
3466 : : * AtPrepare_Locks
3467 : : * Do the preparatory work for a PREPARE: make 2PC state file records
3468 : : * for all locks currently held.
3469 : : *
3470 : : * Session-level locks are ignored, as are VXID locks.
3471 : : *
3472 : : * For the most part, we don't need to touch shared memory for this ---
3473 : : * all the necessary state information is in the locallock table.
3474 : : * Fast-path locks are an exception, however: we move any such locks to
3475 : : * the main table before allowing PREPARE TRANSACTION to succeed.
3476 : : */
3477 : : void
7386 3478 : 290 : AtPrepare_Locks(void)
3479 : : {
3480 : : HASH_SEQ_STATUS status;
3481 : : LOCALLOCK *locallock;
3482 : :
3483 : : /* First, verify there aren't locks of both xact and session level */
1505 3484 : 290 : CheckForSessionAndXactLocks();
3485 : :
3486 : : /* Now do the per-locallock cleanup work */
7211 3487 : 288 : hash_seq_init(&status, LockMethodLocalHash);
3488 : :
7386 3489 [ + + ]: 985 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3490 : : {
3491 : : TwoPhaseLockRecord record;
3492 : 697 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3493 : : bool haveSessionLock;
3494 : : bool haveXactLock;
3495 : : int i;
3496 : :
3497 : : /*
3498 : : * Ignore VXID locks. We don't want those to be held by prepared
3499 : : * transactions, since they aren't meaningful after a restart.
3500 : : */
6576 3501 [ - + ]: 697 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3502 : 7 : continue;
3503 : :
3504 : : /* Ignore it if we don't actually hold the lock */
7386 3505 [ - + ]: 697 : if (locallock->nLocks <= 0)
7386 tgl@sss.pgh.pa.us 3506 :UBC 0 : continue;
3507 : :
3508 : : /* Scan to see whether we hold it at session or transaction level */
4873 tgl@sss.pgh.pa.us 3509 :CBC 697 : haveSessionLock = haveXactLock = false;
7386 3510 [ + + ]: 1394 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3511 : : {
3512 [ + + ]: 697 : if (lockOwners[i].owner == NULL)
4873 3513 : 7 : haveSessionLock = true;
3514 : : else
3515 : 690 : haveXactLock = true;
3516 : : }
3517 : :
3518 : : /* Ignore it if we have only session lock */
3519 [ + + ]: 697 : if (!haveXactLock)
3520 : 7 : continue;
3521 : :
3522 : : /* This can't happen, because we already checked it */
3523 [ - + ]: 690 : if (haveSessionLock)
4873 tgl@sss.pgh.pa.us 3524 [ # # ]:UBC 0 : ereport(ERROR,
3525 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3526 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3527 : :
3528 : : /*
3529 : : * If the local lock was taken via the fast-path, we need to move it
3530 : : * to the primary lock table, or just get a pointer to the existing
3531 : : * primary lock table entry if by chance it's already been
3532 : : * transferred.
3533 : : */
5215 rhaas@postgresql.org 3534 [ + + ]:CBC 690 : if (locallock->proclock == NULL)
3535 : : {
5163 3536 : 299 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
5215 3537 : 299 : locallock->lock = locallock->proclock->tag.myLock;
3538 : : }
3539 : :
3540 : : /*
3541 : : * Arrange to not release any strong lock count held by this lock
3542 : : * entry. We must retain the count until the prepared transaction is
3543 : : * committed or rolled back.
3544 : : */
2943 peter_e@gmx.net 3545 : 690 : locallock->holdsStrongLockCount = false;
3546 : :
3547 : : /*
3548 : : * Create a 2PC record.
3549 : : */
7386 tgl@sss.pgh.pa.us 3550 : 690 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3551 : 690 : record.lockmode = locallock->tag.mode;
3552 : :
3553 : 690 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3554 : : &record, sizeof(TwoPhaseLockRecord));
3555 : : }
3556 : 288 : }
3557 : :
3558 : : /*
3559 : : * PostPrepare_Locks
3560 : : * Clean up after successful PREPARE
3561 : : *
3562 : : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3563 : : * that's now associated with the prepared transaction, and we want to
3564 : : * clean out the corresponding entries in the LOCALLOCK table.
3565 : : *
3566 : : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3567 : : * pointers in the transaction's resource owner. This is OK at the
3568 : : * moment since resowner.c doesn't try to free locks retail at a toplevel
3569 : : * transaction commit or abort. We could alternatively zero out nLocks
3570 : : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3571 : : * but that probably costs more cycles.
3572 : : */
3573 : : void
61 michael@paquier.xyz 3574 :GNC 288 : PostPrepare_Locks(FullTransactionId fxid)
3575 : : {
3576 : 288 : PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3577 : : HASH_SEQ_STATUS status;
3578 : : LOCALLOCK *locallock;
3579 : : LOCK *lock;
3580 : : PROCLOCK *proclock;
3581 : : PROCLOCKTAG proclocktag;
3582 : : int partition;
3583 : :
3584 : : /* Can't prepare a lock group follower. */
3499 rhaas@postgresql.org 3585 [ - + - - ]:CBC 288 : Assert(MyProc->lockGroupLeader == NULL ||
3586 : : MyProc->lockGroupLeader == MyProc);
3587 : :
3588 : : /* This is a critical section: any error means big trouble */
7386 tgl@sss.pgh.pa.us 3589 : 288 : START_CRIT_SECTION();
3590 : :
3591 : : /*
3592 : : * First we run through the locallock table and get rid of unwanted
3593 : : * entries, then we scan the process's proclocks and transfer them to the
3594 : : * target proc.
3595 : : *
3596 : : * We do this separately because we may have multiple locallock entries
3597 : : * pointing to the same proclock, and we daren't end up with any dangling
3598 : : * pointers.
3599 : : */
7211 3600 : 288 : hash_seq_init(&status, LockMethodLocalHash);
3601 : :
7386 3602 [ + + ]: 985 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3603 : : {
4873 3604 : 697 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3605 : : bool haveSessionLock;
3606 : : bool haveXactLock;
3607 : : int i;
3608 : :
7386 3609 [ + - - + ]: 697 : if (locallock->proclock == NULL || locallock->lock == NULL)
3610 : : {
3611 : : /*
3612 : : * We must've run out of shared memory while trying to set up this
3613 : : * lock. Just forget the local entry.
3614 : : */
7386 tgl@sss.pgh.pa.us 3615 [ # # ]:UBC 0 : Assert(locallock->nLocks == 0);
3616 : 0 : RemoveLocalLock(locallock);
3617 : 0 : continue;
3618 : : }
3619 : :
3620 : : /* Ignore VXID locks */
6576 tgl@sss.pgh.pa.us 3621 [ - + ]:CBC 697 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
6576 tgl@sss.pgh.pa.us 3622 :UBC 0 : continue;
3623 : :
3624 : : /* Scan to see whether we hold it at session or transaction level */
4873 tgl@sss.pgh.pa.us 3625 :CBC 697 : haveSessionLock = haveXactLock = false;
3626 [ + + ]: 1394 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3627 : : {
3628 [ + + ]: 697 : if (lockOwners[i].owner == NULL)
3629 : 7 : haveSessionLock = true;
3630 : : else
3631 : 690 : haveXactLock = true;
3632 : : }
3633 : :
3634 : : /* Ignore it if we have only session lock */
3635 [ + + ]: 697 : if (!haveXactLock)
3636 : 7 : continue;
3637 : :
3638 : : /* This can't happen, because we already checked it */
3639 [ - + ]: 690 : if (haveSessionLock)
4873 tgl@sss.pgh.pa.us 3640 [ # # ]:UBC 0 : ereport(PANIC,
3641 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3642 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3643 : :
3644 : : /* Mark the proclock to show we need to release this lockmode */
7386 tgl@sss.pgh.pa.us 3645 [ + - ]:CBC 690 : if (locallock->nLocks > 0)
3646 : 690 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3647 : :
3648 : : /* And remove the locallock hashtable entry */
3649 : 690 : RemoveLocalLock(locallock);
3650 : : }
3651 : :
3652 : : /*
3653 : : * Now, scan each lock partition separately.
3654 : : */
7209 3655 [ + + ]: 4896 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3656 : : {
3657 : : LWLock *partitionLock;
962 andres@anarazel.de 3658 : 4608 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3659 : : dlist_mutable_iter proclock_iter;
3660 : :
4240 rhaas@postgresql.org 3661 : 4608 : partitionLock = LockHashPartitionLockByIndex(partition);
3662 : :
3663 : : /*
3664 : : * If the proclock list for this partition is empty, we can skip
3665 : : * acquiring the partition lock. This optimization is safer than the
3666 : : * situation in LockReleaseAll, because we got rid of any fast-path
3667 : : * locks during AtPrepare_Locks, so there cannot be any case where
3668 : : * another backend is adding something to our lists now. For safety,
3669 : : * though, we code this the same way as in LockReleaseAll.
3670 : : */
962 andres@anarazel.de 3671 [ + + ]: 4608 : if (dlist_is_empty(procLocks))
7209 tgl@sss.pgh.pa.us 3672 : 3958 : continue; /* needn't examine this partition */
3673 : :
3674 : 650 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3675 : :
962 andres@anarazel.de 3676 [ + - + + ]: 1334 : dlist_foreach_modify(proclock_iter, procLocks)
3677 : : {
3678 : 684 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3679 : :
6985 tgl@sss.pgh.pa.us 3680 [ - + ]: 684 : Assert(proclock->tag.myProc == MyProc);
3681 : :
3682 : 684 : lock = proclock->tag.myLock;
3683 : :
3684 : : /* Ignore VXID locks */
6576 3685 [ + + ]: 684 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
4300 3686 : 34 : continue;
3687 : :
3688 : : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3689 : : LOCK_PRINT("PostPrepare_Locks", lock, 0);
7209 3690 [ - + ]: 650 : Assert(lock->nRequested >= 0);
3691 [ - + ]: 650 : Assert(lock->nGranted >= 0);
3692 [ - + ]: 650 : Assert(lock->nGranted <= lock->nRequested);
3693 [ - + ]: 650 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3694 : :
3695 : : /* Ignore it if nothing to release (must be a session lock) */
4873 3696 [ + + ]: 650 : if (proclock->releaseMask == 0)
4300 3697 : 7 : continue;
3698 : :
3699 : : /* Else we should be releasing all locks */
7209 3700 [ - + ]: 643 : if (proclock->releaseMask != proclock->holdMask)
7209 tgl@sss.pgh.pa.us 3701 [ # # ]:UBC 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3702 : :
3703 : : /*
3704 : : * We cannot simply modify proclock->tag.myProc to reassign
3705 : : * ownership of the lock, because that's part of the hash key and
3706 : : * the proclock would then be in the wrong hash chain. Instead
3707 : : * use hash_update_hash_key. (We used to create a new hash entry,
3708 : : * but that risks out-of-memory failure if other processes are
3709 : : * busy making proclocks too.) We must unlink the proclock from
3710 : : * our procLink chain and put it into the new proc's chain, too.
3711 : : *
3712 : : * Note: the updated proclock hash key will still belong to the
3713 : : * same hash partition, cf proclock_hash(). So the partition lock
3714 : : * we already hold is sufficient for this.
3715 : : */
962 andres@anarazel.de 3716 :CBC 643 : dlist_delete(&proclock->procLink);
3717 : :
3718 : : /*
3719 : : * Create the new hash key for the proclock.
3720 : : */
6985 tgl@sss.pgh.pa.us 3721 : 643 : proclocktag.myLock = lock;
3722 : 643 : proclocktag.myProc = newproc;
3723 : :
3724 : : /*
3725 : : * Update groupLeader pointer to point to the new proc. (We'd
3726 : : * better not be a member of somebody else's lock group!)
3727 : : */
3499 rhaas@postgresql.org 3728 [ - + ]: 643 : Assert(proclock->groupLeader == proclock->tag.myProc);
3729 : 643 : proclock->groupLeader = newproc;
3730 : :
3731 : : /*
3732 : : * Update the proclock. We should not find any existing entry for
3733 : : * the same hash key, since there can be only one entry for any
3734 : : * given lock with my own proc.
3735 : : */
4619 tgl@sss.pgh.pa.us 3736 [ - + ]: 643 : if (!hash_update_hash_key(LockMethodProcLockHash,
3737 : : proclock,
3738 : : &proclocktag))
4619 tgl@sss.pgh.pa.us 3739 [ # # ]:UBC 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3740 : :
3741 : : /* Re-link into the new proc's proclock list */
962 andres@anarazel.de 3742 :CBC 643 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3743 : :
3744 : : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3745 : : } /* loop over PROCLOCKs within this partition */
3746 : :
7209 tgl@sss.pgh.pa.us 3747 : 650 : LWLockRelease(partitionLock);
3748 : : } /* loop over partitions */
3749 : :
7386 3750 [ - + ]: 288 : END_CRIT_SECTION();
3751 : 288 : }
3752 : :
3753 : :
3754 : : /*
3755 : : * Estimate shared-memory space used for lock tables
3756 : : */
3757 : : Size
373 heikki.linnakangas@i 3758 : 1909 : LockManagerShmemSize(void)
3759 : : {
7211 tgl@sss.pgh.pa.us 3760 : 1909 : Size size = 0;
3761 : : long max_table_size;
3762 : :
3763 : : /* lock hash table */
7209 3764 : 1909 : max_table_size = NLOCKENTS();
6985 3765 : 1909 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3766 : :
3767 : : /* proclock hash table */
7209 3768 : 1909 : max_table_size *= 2;
6985 3769 : 1909 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3770 : :
3771 : : /*
3772 : : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3773 : : */
7322 3774 : 1909 : size = add_size(size, size / 10);
3775 : :
10226 bruce@momjian.us 3776 : 1909 : return size;
3777 : : }
3778 : :
3779 : : /*
3780 : : * GetLockStatusData - Return a summary of the lock manager's internal
3781 : : * status, for use in a user-level reporting function.
3782 : : *
3783 : : * The return data consists of an array of LockInstanceData objects,
3784 : : * which are a lightly abstracted version of the PROCLOCK data structures,
3785 : : * i.e. there is one entry for each unique lock and interested PGPROC.
3786 : : * It is the caller's responsibility to match up related items (such as
3787 : : * references to the same lockable object or PGPROC) if wanted.
3788 : : *
3789 : : * The design goal is to hold the LWLocks for as short a time as possible;
3790 : : * thus, this function simply makes a copy of the necessary data and releases
3791 : : * the locks, allowing the caller to contemplate and format the data for as
3792 : : * long as it pleases.
3793 : : */
3794 : : LockData *
8407 tgl@sss.pgh.pa.us 3795 : 229 : GetLockStatusData(void)
3796 : : {
3797 : : LockData *data;
3798 : : PROCLOCK *proclock;
3799 : : HASH_SEQ_STATUS seqstat;
3800 : : int els;
3801 : : int el;
3802 : : int i;
3803 : :
3804 : 229 : data = (LockData *) palloc(sizeof(LockData));
3805 : :
3806 : : /* Guess how much space we'll need. */
1243 rhaas@postgresql.org 3807 : 229 : els = MaxBackends;
5215 3808 : 229 : el = 0;
3809 : 229 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3810 : :
3811 : : /*
3812 : : * First, we iterate through the per-backend fast-path arrays, locking
3813 : : * them one at a time. This might produce an inconsistent picture of the
3814 : : * system state, but taking all of those LWLocks at the same time seems
3815 : : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3816 : : * matter too much, because none of these locks can be involved in lock
3817 : : * conflicts anyway - anything that might must be present in the main lock
3818 : : * table. (For the same reason, we don't sweat about making leaderPid
3819 : : * completely valid. We cannot safely dereference another backend's
3820 : : * lockGroupLeader field without holding all lock partition locks, and
3821 : : * it's not worth that.)
3822 : : */
3823 [ + + ]: 33168 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3824 : : {
3825 : 32939 : PGPROC *proc = &ProcGlobal->allProcs[i];
3826 : :
3827 : : /* Skip backends with pid=0, as they don't hold fast-path locks */
316 fujii@postgresql.org 3828 [ + + ]: 32939 : if (proc->pid == 0)
3829 : 29288 : continue;
3830 : :
1940 tgl@sss.pgh.pa.us 3831 : 3651 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3832 : :
316 fujii@postgresql.org 3833 [ + + ]: 18255 : for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3834 : : {
3835 : : /* Skip groups without registered fast-path locks */
3836 [ + + ]: 14604 : if (proc->fpLockBits[g] == 0)
5215 rhaas@postgresql.org 3837 : 11558 : continue;
3838 : :
316 fujii@postgresql.org 3839 [ + + ]: 51782 : for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3840 : : {
3841 : : LockInstanceData *instance;
3842 [ - + - + ]: 48736 : uint32 f = FAST_PATH_SLOT(g, j);
3843 [ - + - + ]: 48736 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3844 : :
3845 : : /* Skip unallocated slots */
3846 [ + + ]: 48736 : if (!lockbits)
3847 : 43321 : continue;
3848 : :
3849 [ + + ]: 5415 : if (el >= els)
3850 : : {
3851 : 21 : els += MaxBackends;
3852 : 21 : data->locks = (LockInstanceData *)
3853 : 21 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3854 : : }
3855 : :
3856 : 5415 : instance = &data->locks[el];
3857 : 5415 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3858 : : proc->fpRelId[f]);
3859 : 5415 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3860 : 5415 : instance->waitLockMode = NoLock;
3861 : 5415 : instance->vxid.procNumber = proc->vxid.procNumber;
3862 : 5415 : instance->vxid.localTransactionId = proc->vxid.lxid;
3863 : 5415 : instance->pid = proc->pid;
3864 : 5415 : instance->leaderPid = proc->pid;
3865 : 5415 : instance->fastpath = true;
3866 : :
3867 : : /*
3868 : : * Successfully taking fast path lock means there were no
3869 : : * conflicting locks.
3870 : : */
3871 : 5415 : instance->waitStart = 0;
3872 : :
3873 : 5415 : el++;
3874 : : }
3875 : : }
3876 : :
5147 rhaas@postgresql.org 3877 [ + + ]: 3651 : if (proc->fpVXIDLock)
3878 : : {
3879 : : VirtualTransactionId vxid;
3880 : : LockInstanceData *instance;
3881 : :
3882 [ + + ]: 1389 : if (el >= els)
3883 : : {
1243 3884 : 6 : els += MaxBackends;
5147 3885 : 6 : data->locks = (LockInstanceData *)
3886 : 6 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3887 : : }
3888 : :
552 heikki.linnakangas@i 3889 : 1389 : vxid.procNumber = proc->vxid.procNumber;
5147 rhaas@postgresql.org 3890 : 1389 : vxid.localTransactionId = proc->fpLocalTransactionId;
3891 : :
3892 : 1389 : instance = &data->locks[el];
3893 : 1389 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3894 : 1389 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3895 : 1389 : instance->waitLockMode = NoLock;
552 heikki.linnakangas@i 3896 : 1389 : instance->vxid.procNumber = proc->vxid.procNumber;
3897 : 1389 : instance->vxid.localTransactionId = proc->vxid.lxid;
5147 rhaas@postgresql.org 3898 : 1389 : instance->pid = proc->pid;
3484 tgl@sss.pgh.pa.us 3899 : 1389 : instance->leaderPid = proc->pid;
5147 rhaas@postgresql.org 3900 : 1389 : instance->fastpath = true;
1664 fujii@postgresql.org 3901 : 1389 : instance->waitStart = 0;
3902 : :
5147 rhaas@postgresql.org 3903 : 1389 : el++;
3904 : : }
3905 : :
1940 tgl@sss.pgh.pa.us 3906 : 3651 : LWLockRelease(&proc->fpInfoLock);
3907 : : }
3908 : :
3909 : : /*
3910 : : * Next, acquire lock on the entire shared lock data structure. We do
3911 : : * this so that, at least for locks in the primary lock table, the state
3912 : : * will be self-consistent.
3913 : : *
3914 : : * Since this is a read-only operation, we take shared instead of
3915 : : * exclusive lock. There's not a whole lot of point to this, because all
3916 : : * the normal operations require exclusive lock, but it doesn't hurt
3917 : : * anything either. It will at least allow two backends to do
3918 : : * GetLockStatusData in parallel.
3919 : : *
3920 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3921 : : */
7209 3922 [ + + ]: 3893 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4240 rhaas@postgresql.org 3923 : 3664 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3924 : :
3925 : : /* Now we can safely count the number of proclocks */
5215 3926 : 229 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3927 [ + + ]: 229 : if (data->nelements > els)
3928 : : {
3929 : 30 : els = data->nelements;
3930 : 30 : data->locks = (LockInstanceData *)
3931 : 30 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3932 : : }
3933 : :
3934 : : /* Now scan the tables to copy the data */
6985 tgl@sss.pgh.pa.us 3935 : 229 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3936 : :
3937 [ + + ]: 3389 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3938 : : {
3939 : 3160 : PGPROC *proc = proclock->tag.myProc;
3940 : 3160 : LOCK *lock = proclock->tag.myLock;
4836 bruce@momjian.us 3941 : 3160 : LockInstanceData *instance = &data->locks[el];
3942 : :
5215 rhaas@postgresql.org 3943 : 3160 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3944 : 3160 : instance->holdMask = proclock->holdMask;
3945 [ + + ]: 3160 : if (proc->waitLock == proclock->tag.myLock)
3946 : 9 : instance->waitLockMode = proc->waitLockMode;
3947 : : else
3948 : 3151 : instance->waitLockMode = NoLock;
552 heikki.linnakangas@i 3949 : 3160 : instance->vxid.procNumber = proc->vxid.procNumber;
3950 : 3160 : instance->vxid.localTransactionId = proc->vxid.lxid;
5215 rhaas@postgresql.org 3951 : 3160 : instance->pid = proc->pid;
3484 tgl@sss.pgh.pa.us 3952 : 3160 : instance->leaderPid = proclock->groupLeader->pid;
5215 rhaas@postgresql.org 3953 : 3160 : instance->fastpath = false;
1664 fujii@postgresql.org 3954 : 3160 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3955 : :
6985 tgl@sss.pgh.pa.us 3956 : 3160 : el++;
3957 : : }
3958 : :
3959 : : /*
3960 : : * And release locks. We do this in reverse order for two reasons: (1)
3961 : : * Anyone else who needs more than one of the locks will be trying to lock
3962 : : * them in increasing order; we don't want to release the other process
3963 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
3964 : : * behavior inside LWLockRelease.
3965 : : */
6912 bruce@momjian.us 3966 [ + + ]: 3893 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4240 rhaas@postgresql.org 3967 : 3664 : LWLockRelease(LockHashPartitionLockByIndex(i));
3968 : :
7209 tgl@sss.pgh.pa.us 3969 [ - + ]: 229 : Assert(el == data->nelements);
3970 : :
8407 3971 : 229 : return data;
3972 : : }
3973 : :
3974 : : /*
3975 : : * GetBlockerStatusData - Return a summary of the lock manager's state
3976 : : * concerning locks that are blocking the specified PID or any member of
3977 : : * the PID's lock group, for use in a user-level reporting function.
3978 : : *
3979 : : * For each PID within the lock group that is awaiting some heavyweight lock,
3980 : : * the return data includes an array of LockInstanceData objects, which are
3981 : : * the same data structure used by GetLockStatusData; but unlike that function,
3982 : : * this one reports only the PROCLOCKs associated with the lock that that PID
3983 : : * is blocked on. (Hence, all the locktags should be the same for any one
3984 : : * blocked PID.) In addition, we return an array of the PIDs of those backends
3985 : : * that are ahead of the blocked PID in the lock's wait queue. These can be
3986 : : * compared with the PIDs in the LockInstanceData objects to determine which
3987 : : * waiters are ahead of or behind the blocked PID in the queue.
3988 : : *
3989 : : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3990 : : * waiting on any heavyweight lock, return empty arrays.
3991 : : *
3992 : : * The design goal is to hold the LWLocks for as short a time as possible;
3993 : : * thus, this function simply makes a copy of the necessary data and releases
3994 : : * the locks, allowing the caller to contemplate and format the data for as
3995 : : * long as it pleases.
3996 : : */
3997 : : BlockedProcsData *
3484 3998 : 1920 : GetBlockerStatusData(int blocked_pid)
3999 : : {
4000 : : BlockedProcsData *data;
4001 : : PGPROC *proc;
4002 : : int i;
4003 : :
4004 : 1920 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
4005 : :
4006 : : /*
4007 : : * Guess how much space we'll need, and preallocate. Most of the time
4008 : : * this will avoid needing to do repalloc while holding the LWLocks. (We
4009 : : * assume, but check with an Assert, that MaxBackends is enough entries
4010 : : * for the procs[] array; the other two could need enlargement, though.)
4011 : : */
4012 : 1920 : data->nprocs = data->nlocks = data->npids = 0;
1243 rhaas@postgresql.org 4013 : 1920 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3484 tgl@sss.pgh.pa.us 4014 : 1920 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
4015 : 1920 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
4016 : 1920 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
4017 : :
4018 : : /*
4019 : : * In order to search the ProcArray for blocked_pid and assume that that
4020 : : * entry won't immediately disappear under us, we must hold ProcArrayLock.
4021 : : * In addition, to examine the lock grouping fields of any other backend,
4022 : : * we must hold all the hash partition locks. (Only one of those locks is
4023 : : * actually relevant for any one lock group, but we can't know which one
4024 : : * ahead of time.) It's fairly annoying to hold all those locks
4025 : : * throughout this, but it's no worse than GetLockStatusData(), and it
4026 : : * does have the advantage that we're guaranteed to return a
4027 : : * self-consistent instantaneous state.
4028 : : */
4029 : 1920 : LWLockAcquire(ProcArrayLock, LW_SHARED);
4030 : :
4031 : 1920 : proc = BackendPidGetProcWithLock(blocked_pid);
4032 : :
4033 : : /* Nothing to do if it's gone */
4034 [ + - ]: 1920 : if (proc != NULL)
4035 : : {
4036 : : /*
4037 : : * Acquire lock on the entire shared lock data structure. See notes
4038 : : * in GetLockStatusData().
4039 : : */
4040 [ + + ]: 32640 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4041 : 30720 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4042 : :
4043 [ + + ]: 1920 : if (proc->lockGroupLeader == NULL)
4044 : : {
4045 : : /* Easy case, proc is not a lock group member */
4046 : 1792 : GetSingleProcBlockerStatusData(proc, data);
4047 : : }
4048 : : else
4049 : : {
4050 : : /* Examine all procs in proc's lock group */
4051 : : dlist_iter iter;
4052 : :
4053 [ + - + + ]: 293 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
4054 : : {
4055 : : PGPROC *memberProc;
4056 : :
4057 : 165 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4058 : 165 : GetSingleProcBlockerStatusData(memberProc, data);
4059 : : }
4060 : : }
4061 : :
4062 : : /*
4063 : : * And release locks. See notes in GetLockStatusData().
4064 : : */
4065 [ + + ]: 32640 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4066 : 30720 : LWLockRelease(LockHashPartitionLockByIndex(i));
4067 : :
4068 [ - + ]: 1920 : Assert(data->nprocs <= data->maxprocs);
4069 : : }
4070 : :
4071 : 1920 : LWLockRelease(ProcArrayLock);
4072 : :
4073 : 1920 : return data;
4074 : : }
4075 : :
4076 : : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4077 : : static void
4078 : 1957 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
4079 : : {
4080 : 1957 : LOCK *theLock = blocked_proc->waitLock;
4081 : : BlockedProcData *bproc;
4082 : : dlist_iter proclock_iter;
4083 : : dlist_iter proc_iter;
4084 : : dclist_head *waitQueue;
4085 : : int queue_size;
4086 : :
4087 : : /* Nothing to do if this proc is not blocked */
4088 [ + + ]: 1957 : if (theLock == NULL)
4089 : 822 : return;
4090 : :
4091 : : /* Set up a procs[] element */
4092 : 1135 : bproc = &data->procs[data->nprocs++];
4093 : 1135 : bproc->pid = blocked_proc->pid;
4094 : 1135 : bproc->first_lock = data->nlocks;
4095 : 1135 : bproc->first_waiter = data->npids;
4096 : :
4097 : : /*
4098 : : * We may ignore the proc's fast-path arrays, since nothing in those could
4099 : : * be related to a contended lock.
4100 : : */
4101 : :
4102 : : /* Collect all PROCLOCKs associated with theLock */
962 andres@anarazel.de 4103 [ + - + + ]: 3455 : dlist_foreach(proclock_iter, &theLock->procLocks)
4104 : : {
4105 : 2320 : PROCLOCK *proclock =
4106 : 2320 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3484 tgl@sss.pgh.pa.us 4107 : 2320 : PGPROC *proc = proclock->tag.myProc;
4108 : 2320 : LOCK *lock = proclock->tag.myLock;
4109 : : LockInstanceData *instance;
4110 : :
4111 [ - + ]: 2320 : if (data->nlocks >= data->maxlocks)
4112 : : {
1243 rhaas@postgresql.org 4113 :UBC 0 : data->maxlocks += MaxBackends;
3484 tgl@sss.pgh.pa.us 4114 : 0 : data->locks = (LockInstanceData *)
4115 : 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4116 : : }
4117 : :
3484 tgl@sss.pgh.pa.us 4118 :CBC 2320 : instance = &data->locks[data->nlocks];
4119 : 2320 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4120 : 2320 : instance->holdMask = proclock->holdMask;
4121 [ + + ]: 2320 : if (proc->waitLock == lock)
4122 : 1180 : instance->waitLockMode = proc->waitLockMode;
4123 : : else
4124 : 1140 : instance->waitLockMode = NoLock;
552 heikki.linnakangas@i 4125 : 2320 : instance->vxid.procNumber = proc->vxid.procNumber;
4126 : 2320 : instance->vxid.localTransactionId = proc->vxid.lxid;
3484 tgl@sss.pgh.pa.us 4127 : 2320 : instance->pid = proc->pid;
4128 : 2320 : instance->leaderPid = proclock->groupLeader->pid;
4129 : 2320 : instance->fastpath = false;
4130 : 2320 : data->nlocks++;
4131 : : }
4132 : :
4133 : : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4134 : 1135 : waitQueue = &(theLock->waitProcs);
962 andres@anarazel.de 4135 : 1135 : queue_size = dclist_count(waitQueue);
4136 : :
3484 tgl@sss.pgh.pa.us 4137 [ - + ]: 1135 : if (queue_size > data->maxpids - data->npids)
4138 : : {
1243 rhaas@postgresql.org 4139 :UBC 0 : data->maxpids = Max(data->maxpids + MaxBackends,
4140 : : data->npids + queue_size);
3484 tgl@sss.pgh.pa.us 4141 : 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
4142 : 0 : sizeof(int) * data->maxpids);
4143 : : }
4144 : :
4145 : : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
962 andres@anarazel.de 4146 [ + - + - ]:CBC 1157 : dclist_foreach(proc_iter, waitQueue)
4147 : : {
4148 : 1157 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4149 : :
1067 drowley@postgresql.o 4150 [ + + ]: 1157 : if (queued_proc == blocked_proc)
3484 tgl@sss.pgh.pa.us 4151 : 1135 : break;
1067 drowley@postgresql.o 4152 : 22 : data->waiter_pids[data->npids++] = queued_proc->pid;
4153 : 22 : queued_proc = (PGPROC *) queued_proc->links.next;
4154 : : }
4155 : :
3484 tgl@sss.pgh.pa.us 4156 : 1135 : bproc->num_locks = data->nlocks - bproc->first_lock;
4157 : 1135 : bproc->num_waiters = data->npids - bproc->first_waiter;
4158 : : }
4159 : :
4160 : : /*
4161 : : * Returns a list of currently held AccessExclusiveLocks, for use by
4162 : : * LogStandbySnapshot(). The result is a palloc'd array,
4163 : : * with the number of elements returned into *nlocks.
4164 : : *
4165 : : * XXX This currently takes a lock on all partitions of the lock table,
4166 : : * but it's possible to do better. By reference counting locks and storing
4167 : : * the value in the ProcArray entry for each backend we could tell if any
4168 : : * locks need recording without having to acquire the partition locks and
4169 : : * scan the lock table. Whether that's worth the additional overhead
4170 : : * is pretty dubious though.
4171 : : */
4172 : : xl_standby_lock *
5740 simon@2ndQuadrant.co 4173 : 1339 : GetRunningTransactionLocks(int *nlocks)
4174 : : {
4175 : : xl_standby_lock *accessExclusiveLocks;
4176 : : PROCLOCK *proclock;
4177 : : HASH_SEQ_STATUS seqstat;
4178 : : int i;
4179 : : int index;
4180 : : int els;
4181 : :
4182 : : /*
4183 : : * Acquire lock on the entire shared lock data structure.
4184 : : *
4185 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4186 : : */
4187 [ + + ]: 22763 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4240 rhaas@postgresql.org 4188 : 21424 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4189 : :
4190 : : /* Now we can safely count the number of proclocks */
5740 simon@2ndQuadrant.co 4191 : 1339 : els = hash_get_num_entries(LockMethodProcLockHash);
4192 : :
4193 : : /*
4194 : : * Allocating enough space for all locks in the lock table is overkill,
4195 : : * but it's more convenient and faster than having to enlarge the array.
4196 : : */
4197 : 1339 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4198 : :
4199 : : /* Now scan the tables to copy the data */
4624 tgl@sss.pgh.pa.us 4200 : 1339 : hash_seq_init(&seqstat, LockMethodProcLockHash);
4201 : :
4202 : : /*
4203 : : * If lock is a currently granted AccessExclusiveLock then it will have
4204 : : * just one proclock holder, so locks are never accessed twice in this
4205 : : * particular case. Don't copy this code for use elsewhere because in the
4206 : : * general case this will give you duplicate locks when looking at
4207 : : * non-exclusive lock types.
4208 : : */
5740 simon@2ndQuadrant.co 4209 : 1339 : index = 0;
4210 [ + + ]: 5900 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4211 : : {
4212 : : /* make sure this definition matches the one used in LockAcquire */
4213 [ + + ]: 4561 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4214 [ + + ]: 2560 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4215 : : {
5671 bruce@momjian.us 4216 : 1493 : PGPROC *proc = proclock->tag.myProc;
4217 : 1493 : LOCK *lock = proclock->tag.myLock;
1849 andres@anarazel.de 4218 : 1493 : TransactionId xid = proc->xid;
4219 : :
4220 : : /*
4221 : : * Don't record locks for transactions if we know they have
4222 : : * already issued their WAL record for commit but not yet released
4223 : : * lock. It is still possible that we see locks held by already
4224 : : * complete transactions, if they haven't yet zeroed their xids.
4225 : : */
4975 simon@2ndQuadrant.co 4226 [ + + ]: 1493 : if (!TransactionIdIsValid(xid))
4227 : 1 : continue;
4228 : :
4229 : 1492 : accessExclusiveLocks[index].xid = xid;
5671 bruce@momjian.us 4230 : 1492 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
5740 simon@2ndQuadrant.co 4231 : 1492 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4232 : :
4233 : 1492 : index++;
4234 : : }
4235 : : }
4236 : :
4477 tgl@sss.pgh.pa.us 4237 [ - + ]: 1339 : Assert(index <= els);
4238 : :
4239 : : /*
4240 : : * And release locks. We do this in reverse order for two reasons: (1)
4241 : : * Anyone else who needs more than one of the locks will be trying to lock
4242 : : * them in increasing order; we don't want to release the other process
4243 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
4244 : : * behavior inside LWLockRelease.
4245 : : */
5740 simon@2ndQuadrant.co 4246 [ + + ]: 22763 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4240 rhaas@postgresql.org 4247 : 21424 : LWLockRelease(LockHashPartitionLockByIndex(i));
4248 : :
5740 simon@2ndQuadrant.co 4249 : 1339 : *nlocks = index;
4250 : 1339 : return accessExclusiveLocks;
4251 : : }
4252 : :
4253 : : /* Provide the textual name of any lock mode */
4254 : : const char *
7211 tgl@sss.pgh.pa.us 4255 : 10648 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4256 : : {
4257 [ + - - + ]: 10648 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4258 [ + - - + ]: 10648 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4259 : 10648 : return LockMethods[lockmethodid]->lockModeNames[mode];
4260 : : }
4261 : :
4262 : : #ifdef LOCK_DEBUG
4263 : : /*
4264 : : * Dump all locks in the given proc's myProcLocks lists.
4265 : : *
4266 : : * Caller is responsible for having acquired appropriate LWLocks.
4267 : : */
4268 : : void
4269 : : DumpLocks(PGPROC *proc)
4270 : : {
4271 : : int i;
4272 : :
4273 : : if (proc == NULL)
4274 : : return;
4275 : :
4276 : : if (proc->waitLock)
4277 : : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4278 : :
4279 : : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4280 : : {
4281 : : dlist_head *procLocks = &proc->myProcLocks[i];
4282 : : dlist_iter iter;
4283 : :
4284 : : dlist_foreach(iter, procLocks)
4285 : : {
4286 : : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4287 : : LOCK *lock = proclock->tag.myLock;
4288 : :
4289 : : Assert(proclock->tag.myProc == proc);
4290 : : PROCLOCK_PRINT("DumpLocks", proclock);
4291 : : LOCK_PRINT("DumpLocks", lock, 0);
4292 : : }
4293 : : }
4294 : : }
4295 : :
4296 : : /*
4297 : : * Dump all lmgr locks.
4298 : : *
4299 : : * Caller is responsible for having acquired appropriate LWLocks.
4300 : : */
4301 : : void
4302 : : DumpAllLocks(void)
4303 : : {
4304 : : PGPROC *proc;
4305 : : PROCLOCK *proclock;
4306 : : LOCK *lock;
4307 : : HASH_SEQ_STATUS status;
4308 : :
4309 : : proc = MyProc;
4310 : :
4311 : : if (proc && proc->waitLock)
4312 : : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4313 : :
4314 : : hash_seq_init(&status, LockMethodProcLockHash);
4315 : :
4316 : : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4317 : : {
4318 : : PROCLOCK_PRINT("DumpAllLocks", proclock);
4319 : :
4320 : : lock = proclock->tag.myLock;
4321 : : if (lock)
4322 : : LOCK_PRINT("DumpAllLocks", lock, 0);
4323 : : else
4324 : : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4325 : : }
4326 : : }
4327 : : #endif /* LOCK_DEBUG */
4328 : :
4329 : : /*
4330 : : * LOCK 2PC resource manager's routines
4331 : : */
4332 : :
4333 : : /*
4334 : : * Re-acquire a lock belonging to a transaction that was prepared.
4335 : : *
4336 : : * Because this function is run at db startup, re-acquiring the locks should
4337 : : * never conflict with running transactions because there are none. We
4338 : : * assume that the lock state represented by the stored 2PC files is legal.
4339 : : *
4340 : : * When switching from Hot Standby mode to normal operation, the locks will
4341 : : * be already held by the startup process. The locks are acquired for the new
4342 : : * procs without checking for conflicts, so we don't get a conflict between the
4343 : : * startup process and the dummy procs, even though we will momentarily have
4344 : : * a situation where two procs are holding the same AccessExclusiveLock,
4345 : : * which isn't normally possible because the conflict. If we're in standby
4346 : : * mode, but a recovery snapshot hasn't been established yet, it's possible
4347 : : * that some but not all of the locks are already held by the startup process.
4348 : : *
4349 : : * This approach is simple, but also a bit dangerous, because if there isn't
4350 : : * enough shared memory to acquire the locks, an error will be thrown, which
4351 : : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4352 : : * A safer approach would be to transfer the locks like we do in
4353 : : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4354 : : * read-only backends to use up all the shared lock memory anyway, so that
4355 : : * replaying the WAL record that needs to acquire a lock will throw an error
4356 : : * and PANIC anyway.
4357 : : */
4358 : : void
61 michael@paquier.xyz 4359 :GNC 90 : lock_twophase_recover(FullTransactionId fxid, uint16 info,
4360 : : void *recdata, uint32 len)
4361 : : {
7386 tgl@sss.pgh.pa.us 4362 :CBC 90 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
61 michael@paquier.xyz 4363 :GNC 90 : PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4364 : : LOCKTAG *locktag;
4365 : : LOCKMODE lockmode;
4366 : : LOCKMETHODID lockmethodid;
4367 : : LOCK *lock;
4368 : : PROCLOCK *proclock;
4369 : : PROCLOCKTAG proclocktag;
4370 : : bool found;
4371 : : uint32 hashcode;
4372 : : uint32 proclock_hashcode;
4373 : : int partition;
4374 : : LWLock *partitionLock;
4375 : : LockMethod lockMethodTable;
4376 : :
7386 tgl@sss.pgh.pa.us 4377 [ - + ]:CBC 90 : Assert(len == sizeof(TwoPhaseLockRecord));
4378 : 90 : locktag = &rec->locktag;
4379 : 90 : lockmode = rec->lockmode;
4380 : 90 : lockmethodid = locktag->locktag_lockmethodid;
4381 : :
7211 4382 [ + - - + ]: 90 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7386 tgl@sss.pgh.pa.us 4383 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7211 tgl@sss.pgh.pa.us 4384 :CBC 90 : lockMethodTable = LockMethods[lockmethodid];
4385 : :
6985 4386 : 90 : hashcode = LockTagHashCode(locktag);
4387 : 90 : partition = LockHashPartition(hashcode);
4388 : 90 : partitionLock = LockHashPartitionLock(hashcode);
4389 : :
7209 4390 : 90 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4391 : :
4392 : : /*
4393 : : * Find or create a lock with this tag.
4394 : : */
6985 4395 : 90 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4396 : : locktag,
4397 : : hashcode,
4398 : : HASH_ENTER_NULL,
4399 : : &found);
7386 4400 [ - + ]: 90 : if (!lock)
4401 : : {
7209 tgl@sss.pgh.pa.us 4402 :UBC 0 : LWLockRelease(partitionLock);
7386 4403 [ # # ]: 0 : ereport(ERROR,
4404 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4405 : : errmsg("out of shared memory"),
4406 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4407 : : }
4408 : :
4409 : : /*
4410 : : * if it's a new lock object, initialize it
4411 : : */
7386 tgl@sss.pgh.pa.us 4412 [ + + ]:CBC 90 : if (!found)
4413 : : {
4414 : 78 : lock->grantMask = 0;
4415 : 78 : lock->waitMask = 0;
962 andres@anarazel.de 4416 : 78 : dlist_init(&lock->procLocks);
4417 : 78 : dclist_init(&lock->waitProcs);
7386 tgl@sss.pgh.pa.us 4418 : 78 : lock->nRequested = 0;
4419 : 78 : lock->nGranted = 0;
4420 [ + - + - : 468 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
4421 [ - + - - : 78 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
4422 : : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4423 : : }
4424 : : else
4425 : : {
4426 : : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4427 [ + - - + ]: 12 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4428 [ + - - + ]: 12 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4429 [ - + ]: 12 : Assert(lock->nGranted <= lock->nRequested);
4430 : : }
4431 : :
4432 : : /*
4433 : : * Create the hash key for the proclock table.
4434 : : */
6985 4435 : 90 : proclocktag.myLock = lock;
4436 : 90 : proclocktag.myProc = proc;
4437 : :
4438 : 90 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4439 : :
4440 : : /*
4441 : : * Find or create a proclock entry with this tag
4442 : : */
4443 : 90 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4444 : : &proclocktag,
4445 : : proclock_hashcode,
4446 : : HASH_ENTER_NULL,
4447 : : &found);
7386 4448 [ - + ]: 90 : if (!proclock)
4449 : : {
4450 : : /* Oops, not enough shmem for the proclock */
7386 tgl@sss.pgh.pa.us 4451 [ # # ]:UBC 0 : if (lock->nRequested == 0)
4452 : : {
4453 : : /*
4454 : : * There are no other requestors of this lock, so garbage-collect
4455 : : * the lock object. We *must* do this to avoid a permanent leak
4456 : : * of shared memory, because there won't be anything to cause
4457 : : * anyone to release the lock object later.
4458 : : */
962 andres@anarazel.de 4459 [ # # ]: 0 : Assert(dlist_is_empty(&lock->procLocks));
6985 tgl@sss.pgh.pa.us 4460 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
943 peter@eisentraut.org 4461 : 0 : &(lock->tag),
4462 : : hashcode,
4463 : : HASH_REMOVE,
4464 : : NULL))
7386 tgl@sss.pgh.pa.us 4465 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
4466 : : }
7209 4467 : 0 : LWLockRelease(partitionLock);
7386 4468 [ # # ]: 0 : ereport(ERROR,
4469 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4470 : : errmsg("out of shared memory"),
4471 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4472 : : }
4473 : :
4474 : : /*
4475 : : * If new, initialize the new entry
4476 : : */
7386 tgl@sss.pgh.pa.us 4477 [ + + ]:CBC 90 : if (!found)
4478 : : {
3499 rhaas@postgresql.org 4479 [ - + ]: 82 : Assert(proc->lockGroupLeader == NULL);
4480 : 82 : proclock->groupLeader = proc;
7386 tgl@sss.pgh.pa.us 4481 : 82 : proclock->holdMask = 0;
4482 : 82 : proclock->releaseMask = 0;
4483 : : /* Add proclock to appropriate lists */
962 andres@anarazel.de 4484 : 82 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4485 : 82 : dlist_push_tail(&proc->myProcLocks[partition],
4486 : : &proclock->procLink);
4487 : : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4488 : : }
4489 : : else
4490 : : {
4491 : : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
7386 tgl@sss.pgh.pa.us 4492 [ - + ]: 8 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4493 : : }
4494 : :
4495 : : /*
4496 : : * lock->nRequested and lock->requested[] count the total number of
4497 : : * requests, whether granted or waiting, so increment those immediately.
4498 : : */
4499 : 90 : lock->nRequested++;
4500 : 90 : lock->requested[lockmode]++;
4501 [ + - - + ]: 90 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4502 : :
4503 : : /*
4504 : : * We shouldn't already hold the desired lock.
4505 : : */
4506 [ - + ]: 90 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
7386 tgl@sss.pgh.pa.us 4507 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4508 : : lockMethodTable->lockModeNames[lockmode],
4509 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
4510 : : lock->tag.locktag_field3);
4511 : :
4512 : : /*
4513 : : * We ignore any possible conflicts and just grant ourselves the lock. Not
4514 : : * only because we don't bother, but also to avoid deadlocks when
4515 : : * switching from standby to normal mode. See function comment.
4516 : : */
7386 tgl@sss.pgh.pa.us 4517 :CBC 90 : GrantLock(lock, proclock, lockmode);
4518 : :
4519 : : /*
4520 : : * Bump strong lock count, to make sure any fast-path lock requests won't
4521 : : * be granted without consulting the primary lock table.
4522 : : */
4847 rhaas@postgresql.org 4523 [ + - + + : 90 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
+ - + + ]
4524 : : {
4836 bruce@momjian.us 4525 : 18 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4526 : :
5163 rhaas@postgresql.org 4527 [ - + ]: 18 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4528 : 18 : FastPathStrongRelationLocks->count[fasthashcode]++;
4529 : 18 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4530 : : }
4531 : :
7209 tgl@sss.pgh.pa.us 4532 : 90 : LWLockRelease(partitionLock);
7386 4533 : 90 : }
4534 : :
4535 : : /*
4536 : : * Re-acquire a lock belonging to a transaction that was prepared, when
4537 : : * starting up into hot standby mode.
4538 : : */
4539 : : void
61 michael@paquier.xyz 4540 :UNC 0 : lock_twophase_standby_recover(FullTransactionId fxid, uint16 info,
4541 : : void *recdata, uint32 len)
4542 : : {
5740 simon@2ndQuadrant.co 4543 :UBC 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4544 : : LOCKTAG *locktag;
4545 : : LOCKMODE lockmode;
4546 : : LOCKMETHODID lockmethodid;
4547 : :
4548 [ # # ]: 0 : Assert(len == sizeof(TwoPhaseLockRecord));
4549 : 0 : locktag = &rec->locktag;
4550 : 0 : lockmode = rec->lockmode;
4551 : 0 : lockmethodid = locktag->locktag_lockmethodid;
4552 : :
4553 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4554 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4555 : :
4556 [ # # ]: 0 : if (lockmode == AccessExclusiveLock &&
4557 [ # # ]: 0 : locktag->locktag_type == LOCKTAG_RELATION)
4558 : : {
61 michael@paquier.xyz 4559 :UNC 0 : StandbyAcquireAccessExclusiveLock(XidFromFullTransactionId(fxid),
4560 : : locktag->locktag_field1 /* dboid */ ,
4561 : : locktag->locktag_field2 /* reloid */ );
4562 : : }
5740 simon@2ndQuadrant.co 4563 :UBC 0 : }
4564 : :
4565 : :
4566 : : /*
4567 : : * 2PC processing routine for COMMIT PREPARED case.
4568 : : *
4569 : : * Find and release the lock indicated by the 2PC record.
4570 : : */
4571 : : void
61 michael@paquier.xyz 4572 :GNC 715 : lock_twophase_postcommit(FullTransactionId fxid, uint16 info,
4573 : : void *recdata, uint32 len)
4574 : : {
7386 tgl@sss.pgh.pa.us 4575 :CBC 715 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
61 michael@paquier.xyz 4576 :GNC 715 : PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4577 : : LOCKTAG *locktag;
4578 : : LOCKMETHODID lockmethodid;
4579 : : LockMethod lockMethodTable;
4580 : :
7386 tgl@sss.pgh.pa.us 4581 [ - + ]:CBC 715 : Assert(len == sizeof(TwoPhaseLockRecord));
4582 : 715 : locktag = &rec->locktag;
4583 : 715 : lockmethodid = locktag->locktag_lockmethodid;
4584 : :
7211 4585 [ + - - + ]: 715 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
7386 tgl@sss.pgh.pa.us 4586 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
7211 tgl@sss.pgh.pa.us 4587 :CBC 715 : lockMethodTable = LockMethods[lockmethodid];
4588 : :
5215 rhaas@postgresql.org 4589 : 715 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
7386 tgl@sss.pgh.pa.us 4590 : 715 : }
4591 : :
4592 : : /*
4593 : : * 2PC processing routine for ROLLBACK PREPARED case.
4594 : : *
4595 : : * This is actually just the same as the COMMIT case.
4596 : : */
4597 : : void
61 michael@paquier.xyz 4598 :GNC 126 : lock_twophase_postabort(FullTransactionId fxid, uint16 info,
4599 : : void *recdata, uint32 len)
4600 : : {
4601 : 126 : lock_twophase_postcommit(fxid, info, recdata, len);
7386 tgl@sss.pgh.pa.us 4602 :CBC 126 : }
4603 : :
4604 : : /*
4605 : : * VirtualXactLockTableInsert
4606 : : *
4607 : : * Take vxid lock via the fast-path. There can't be any pre-existing
4608 : : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4609 : : *
4610 : : * Since MyProc->fpLocalTransactionId will normally contain the same data
4611 : : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4612 : : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4613 : : * examined by procarray.c, while fpLocalTransactionId is protected by
4614 : : * fpInfoLock and is used only by the locking subsystem. Doing it this
4615 : : * way makes it easier to verify that there are no funny race conditions.
4616 : : *
4617 : : * We don't bother recording this lock in the local lock table, since it's
4618 : : * only ever released at the end of a transaction. Instead,
4619 : : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4620 : : */
4621 : : void
5147 rhaas@postgresql.org 4622 : 318712 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4623 : : {
4624 [ - + ]: 318712 : Assert(VirtualTransactionIdIsValid(vxid));
4625 : :
1940 tgl@sss.pgh.pa.us 4626 : 318712 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4627 : :
552 heikki.linnakangas@i 4628 [ - + ]: 318712 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
5147 rhaas@postgresql.org 4629 [ - + ]: 318712 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4630 [ - + ]: 318712 : Assert(MyProc->fpVXIDLock == false);
4631 : :
4632 : 318712 : MyProc->fpVXIDLock = true;
4633 : 318712 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4634 : :
1940 tgl@sss.pgh.pa.us 4635 : 318712 : LWLockRelease(&MyProc->fpInfoLock);
5147 rhaas@postgresql.org 4636 : 318712 : }
4637 : :
4638 : : /*
4639 : : * VirtualXactLockTableCleanup
4640 : : *
4641 : : * Check whether a VXID lock has been materialized; if so, release it,
4642 : : * unblocking waiters.
4643 : : */
4644 : : void
4300 tgl@sss.pgh.pa.us 4645 : 319177 : VirtualXactLockTableCleanup(void)
4646 : : {
4647 : : bool fastpath;
4648 : : LocalTransactionId lxid;
4649 : :
552 heikki.linnakangas@i 4650 [ - + ]: 319177 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4651 : :
4652 : : /*
4653 : : * Clean up shared memory state.
4654 : : */
1940 tgl@sss.pgh.pa.us 4655 : 319177 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4656 : :
5147 rhaas@postgresql.org 4657 : 319177 : fastpath = MyProc->fpVXIDLock;
4658 : 319177 : lxid = MyProc->fpLocalTransactionId;
4659 : 319177 : MyProc->fpVXIDLock = false;
4660 : 319177 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4661 : :
1940 tgl@sss.pgh.pa.us 4662 : 319177 : LWLockRelease(&MyProc->fpInfoLock);
4663 : :
4664 : : /*
4665 : : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4666 : : * that means someone transferred the lock to the main lock table.
4667 : : */
5147 rhaas@postgresql.org 4668 [ + + + + ]: 319177 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4669 : : {
4670 : : VirtualTransactionId vxid;
4671 : : LOCKTAG locktag;
4672 : :
552 heikki.linnakangas@i 4673 : 251 : vxid.procNumber = MyProcNumber;
5147 rhaas@postgresql.org 4674 : 251 : vxid.localTransactionId = lxid;
4675 : 251 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4676 : :
4677 : 251 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4678 : : &locktag, ExclusiveLock, false);
4679 : : }
4680 : 319177 : }
4681 : :
4682 : : /*
4683 : : * XactLockForVirtualXact
4684 : : *
4685 : : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4686 : : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4687 : : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4688 : : * prepared, committed, or aborted.
4689 : : *
4690 : : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4691 : : * known as "vxid" before its PREPARE TRANSACTION.
4692 : : */
4693 : : static bool
1414 noah@leadboat.com 4694 : 274 : XactLockForVirtualXact(VirtualTransactionId vxid,
4695 : : TransactionId xid, bool wait)
4696 : : {
4697 : 274 : bool more = false;
4698 : :
4699 : : /* There is no point to wait for 2PCs if you have no 2PCs. */
4700 [ + + ]: 274 : if (max_prepared_xacts == 0)
4701 : 92 : return true;
4702 : :
4703 : : do
4704 : : {
4705 : : LockAcquireResult lar;
4706 : : LOCKTAG tag;
4707 : :
4708 : : /* Clear state from previous iterations. */
4709 [ - + ]: 182 : if (more)
4710 : : {
1414 noah@leadboat.com 4711 :UBC 0 : xid = InvalidTransactionId;
4712 : 0 : more = false;
4713 : : }
4714 : :
4715 : : /* If we have no xid, try to find one. */
1414 noah@leadboat.com 4716 [ + + ]:CBC 182 : if (!TransactionIdIsValid(xid))
4717 : 81 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4718 [ + + ]: 182 : if (!TransactionIdIsValid(xid))
4719 : : {
4720 [ - + ]: 75 : Assert(!more);
4721 : 75 : return true;
4722 : : }
4723 : :
4724 : : /* Check or wait for XID completion. */
4725 : 107 : SET_LOCKTAG_TRANSACTION(tag, xid);
4726 : 107 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4727 [ - + ]: 107 : if (lar == LOCKACQUIRE_NOT_AVAIL)
1414 noah@leadboat.com 4728 :UBC 0 : return false;
1414 noah@leadboat.com 4729 :CBC 107 : LockRelease(&tag, ShareLock, false);
4730 [ - + ]: 107 : } while (more);
4731 : :
4732 : 107 : return true;
4733 : : }
4734 : :
4735 : : /*
4736 : : * VirtualXactLock
4737 : : *
4738 : : * If wait = true, wait as long as the given VXID or any XID acquired by the
4739 : : * same transaction is still running. Then, return true.
4740 : : *
4741 : : * If wait = false, just check whether that VXID or one of those XIDs is still
4742 : : * running, and return true or false.
4743 : : */
4744 : : bool
5147 rhaas@postgresql.org 4745 : 314 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4746 : : {
4747 : : LOCKTAG tag;
4748 : : PGPROC *proc;
1414 noah@leadboat.com 4749 : 314 : TransactionId xid = InvalidTransactionId;
4750 : :
5147 rhaas@postgresql.org 4751 [ - + ]: 314 : Assert(VirtualTransactionIdIsValid(vxid));
4752 : :
1414 noah@leadboat.com 4753 [ + + ]: 314 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4754 : : /* no vxid lock; localTransactionId is a normal, locked XID */
4755 : 1 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4756 : :
5147 rhaas@postgresql.org 4757 : 313 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4758 : :
4759 : : /*
4760 : : * If a lock table entry must be made, this is the PGPROC on whose behalf
4761 : : * it must be done. Note that the transaction might end or the PGPROC
4762 : : * might be reassigned to a new backend before we get around to examining
4763 : : * it, but it doesn't matter. If we find upon examination that the
4764 : : * relevant lxid is no longer running here, that's enough to prove that
4765 : : * it's no longer running anywhere.
4766 : : */
552 heikki.linnakangas@i 4767 : 313 : proc = ProcNumberGetProc(vxid.procNumber);
5079 rhaas@postgresql.org 4768 [ + + ]: 313 : if (proc == NULL)
1414 noah@leadboat.com 4769 : 4 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4770 : :
4771 : : /*
4772 : : * We must acquire this lock before checking the procNumber and lxid
4773 : : * against the ones we're waiting for. The target backend will only set
4774 : : * or clear lxid while holding this lock.
4775 : : */
1940 tgl@sss.pgh.pa.us 4776 : 309 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4777 : :
552 heikki.linnakangas@i 4778 [ + - ]: 309 : if (proc->vxid.procNumber != vxid.procNumber
5147 rhaas@postgresql.org 4779 [ + + ]: 309 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4780 : : {
4781 : : /* VXID ended */
1940 tgl@sss.pgh.pa.us 4782 : 39 : LWLockRelease(&proc->fpInfoLock);
1414 noah@leadboat.com 4783 : 39 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4784 : : }
4785 : :
4786 : : /*
4787 : : * If we aren't asked to wait, there's no need to set up a lock table
4788 : : * entry. The transaction is still in progress, so just return false.
4789 : : */
5147 rhaas@postgresql.org 4790 [ + + ]: 270 : if (!wait)
4791 : : {
1940 tgl@sss.pgh.pa.us 4792 : 15 : LWLockRelease(&proc->fpInfoLock);
5147 rhaas@postgresql.org 4793 : 15 : return false;
4794 : : }
4795 : :
4796 : : /*
4797 : : * OK, we're going to need to sleep on the VXID. But first, we must set
4798 : : * up the primary lock table entry, if needed (ie, convert the proc's
4799 : : * fast-path lock on its VXID to a regular lock).
4800 : : */
4801 [ + + ]: 255 : if (proc->fpVXIDLock)
4802 : : {
4803 : : PROCLOCK *proclock;
4804 : : uint32 hashcode;
4805 : : LWLock *partitionLock;
4806 : :
4807 : 251 : hashcode = LockTagHashCode(&tag);
4808 : :
4624 tgl@sss.pgh.pa.us 4809 : 251 : partitionLock = LockHashPartitionLock(hashcode);
4810 : 251 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4811 : :
5147 rhaas@postgresql.org 4812 : 251 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4813 : : &tag, hashcode, ExclusiveLock);
4814 [ - + ]: 251 : if (!proclock)
4815 : : {
4624 tgl@sss.pgh.pa.us 4816 :UBC 0 : LWLockRelease(partitionLock);
1940 4817 : 0 : LWLockRelease(&proc->fpInfoLock);
5147 rhaas@postgresql.org 4818 [ # # ]: 0 : ereport(ERROR,
4819 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4820 : : errmsg("out of shared memory"),
4821 : : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4822 : : }
5147 rhaas@postgresql.org 4823 :CBC 251 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4824 : :
4624 tgl@sss.pgh.pa.us 4825 : 251 : LWLockRelease(partitionLock);
4826 : :
5147 rhaas@postgresql.org 4827 : 251 : proc->fpVXIDLock = false;
4828 : : }
4829 : :
4830 : : /*
4831 : : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4832 : : * search. The proc might have assigned this XID but not yet locked it,
4833 : : * in which case the proc will lock this XID before releasing the VXID.
4834 : : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4835 : : * so we won't save an XID of a different VXID. It doesn't matter whether
4836 : : * we save this before or after setting up the primary lock table entry.
4837 : : */
1414 noah@leadboat.com 4838 : 255 : xid = proc->xid;
4839 : :
4840 : : /* Done with proc->fpLockBits */
1940 tgl@sss.pgh.pa.us 4841 : 255 : LWLockRelease(&proc->fpInfoLock);
4842 : :
4843 : : /* Time to wait. */
5147 rhaas@postgresql.org 4844 : 255 : (void) LockAcquire(&tag, ShareLock, false, false);
4845 : :
4846 : 230 : LockRelease(&tag, ShareLock, false);
1414 noah@leadboat.com 4847 : 230 : return XactLockForVirtualXact(vxid, xid, wait);
4848 : : }
4849 : :
4850 : : /*
4851 : : * LockWaiterCount
4852 : : *
4853 : : * Find the number of lock requester on this locktag
4854 : : */
4855 : : int
3438 rhaas@postgresql.org 4856 : 63632 : LockWaiterCount(const LOCKTAG *locktag)
4857 : : {
4858 : 63632 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4859 : : LOCK *lock;
4860 : : bool found;
4861 : : uint32 hashcode;
4862 : : LWLock *partitionLock;
4863 : 63632 : int waiters = 0;
4864 : :
4865 [ + - - + ]: 63632 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3438 rhaas@postgresql.org 4866 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4867 : :
3438 rhaas@postgresql.org 4868 :CBC 63632 : hashcode = LockTagHashCode(locktag);
4869 : 63632 : partitionLock = LockHashPartitionLock(hashcode);
4870 : 63632 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4871 : :
4872 : 63632 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4873 : : locktag,
4874 : : hashcode,
4875 : : HASH_FIND,
4876 : : &found);
4877 [ + + ]: 63632 : if (found)
4878 : : {
4879 [ - + ]: 15 : Assert(lock != NULL);
4880 : 15 : waiters = lock->nRequested;
4881 : : }
4882 : 63632 : LWLockRelease(partitionLock);
4883 : :
4884 : 63632 : return waiters;
4885 : : }
|