Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * lwlock.c
4 : : * Lightweight lock manager
5 : : *
6 : : * Lightweight locks are intended primarily to provide mutual exclusion of
7 : : * access to shared-memory data structures. Therefore, they offer both
8 : : * exclusive and shared lock modes (to support read/write and read-only
9 : : * access to a shared object). There are few other frammishes. User-level
10 : : * locking should be done with the full lock manager --- which depends on
11 : : * LWLocks to protect its shared state.
12 : : *
13 : : * In addition to exclusive and shared modes, lightweight locks can be used to
14 : : * wait until a variable changes value. The variable is initially not set
15 : : * when the lock is acquired with LWLockAcquire, i.e. it remains set to the
16 : : * value it was set to when the lock was released last, and can be updated
17 : : * without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar
18 : : * waits for the variable to be updated, or until the lock is free. When
19 : : * releasing the lock with LWLockReleaseClearVar() the value can be set to an
20 : : * appropriate value for a free lock. The meaning of the variable is up to
21 : : * the caller, the lightweight lock code just assigns and compares it.
22 : : *
23 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
24 : : * Portions Copyright (c) 1994, Regents of the University of California
25 : : *
26 : : * IDENTIFICATION
27 : : * src/backend/storage/lmgr/lwlock.c
28 : : *
29 : : * NOTES:
30 : : *
31 : : * This used to be a pretty straight forward reader-writer lock
32 : : * implementation, in which the internal state was protected by a
33 : : * spinlock. Unfortunately the overhead of taking the spinlock proved to be
34 : : * too high for workloads/locks that were taken in shared mode very
35 : : * frequently. Often we were spinning in the (obviously exclusive) spinlock,
36 : : * while trying to acquire a shared lock that was actually free.
37 : : *
38 : : * Thus a new implementation was devised that provides wait-free shared lock
39 : : * acquisition for locks that aren't exclusively locked.
40 : : *
41 : : * The basic idea is to have a single atomic variable 'lockcount' instead of
42 : : * the formerly separate shared and exclusive counters and to use atomic
43 : : * operations to acquire the lock. That's fairly easy to do for plain
44 : : * rw-spinlocks, but a lot harder for something like LWLocks that want to wait
45 : : * in the OS.
46 : : *
47 : : * For lock acquisition we use an atomic compare-and-exchange on the lockcount
48 : : * variable. For exclusive lock we swap in a sentinel value
49 : : * (LW_VAL_EXCLUSIVE), for shared locks we count the number of holders.
50 : : *
51 : : * To release the lock we use an atomic decrement to release the lock. If the
52 : : * new value is zero (we get that atomically), we know we can/have to release
53 : : * waiters.
54 : : *
55 : : * Obviously it is important that the sentinel value for exclusive locks
56 : : * doesn't conflict with the maximum number of possible share lockers -
57 : : * luckily MAX_BACKENDS makes that easily possible.
58 : : *
59 : : *
60 : : * The attentive reader might have noticed that naively doing the above has a
61 : : * glaring race condition: We try to lock using the atomic operations and
62 : : * notice that we have to wait. Unfortunately by the time we have finished
63 : : * queuing, the former locker very well might have already finished its
64 : : * work. That's problematic because we're now stuck waiting inside the OS.
65 : :
66 : : * To mitigate those races we use a two phased attempt at locking:
67 : : * Phase 1: Try to do it atomically, if we succeed, nice
68 : : * Phase 2: Add ourselves to the waitqueue of the lock
69 : : * Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
70 : : * the queue
71 : : * Phase 4: Sleep till wake-up, goto Phase 1
72 : : *
73 : : * This protects us against the problem from above as nobody can release too
74 : : * quick, before we're queued, since after Phase 2 we're already queued.
75 : : * -------------------------------------------------------------------------
76 : : */
77 : : #include "postgres.h"
78 : :
79 : : #include "miscadmin.h"
80 : : #include "pg_trace.h"
81 : : #include "pgstat.h"
82 : : #include "port/pg_bitutils.h"
83 : : #include "storage/proc.h"
84 : : #include "storage/proclist.h"
85 : : #include "storage/procnumber.h"
86 : : #include "storage/spin.h"
87 : : #include "utils/memutils.h"
88 : :
89 : : #ifdef LWLOCK_STATS
90 : : #include "utils/hsearch.h"
91 : : #endif
92 : :
93 : :
94 : : #define LW_FLAG_HAS_WAITERS ((uint32) 1 << 31)
95 : : #define LW_FLAG_RELEASE_OK ((uint32) 1 << 30)
96 : : #define LW_FLAG_LOCKED ((uint32) 1 << 29)
97 : : #define LW_FLAG_BITS 3
98 : : #define LW_FLAG_MASK (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
99 : :
100 : : /* assumes MAX_BACKENDS is a (power of 2) - 1, checked below */
101 : : #define LW_VAL_EXCLUSIVE (MAX_BACKENDS + 1)
102 : : #define LW_VAL_SHARED 1
103 : :
104 : : /* already (power of 2)-1, i.e. suitable for a mask */
105 : : #define LW_SHARED_MASK MAX_BACKENDS
106 : : #define LW_LOCK_MASK (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
107 : :
108 : :
109 : : StaticAssertDecl(((MAX_BACKENDS + 1) & MAX_BACKENDS) == 0,
110 : : "MAX_BACKENDS + 1 needs to be a power of 2");
111 : :
112 : : StaticAssertDecl((MAX_BACKENDS & LW_FLAG_MASK) == 0,
113 : : "MAX_BACKENDS and LW_FLAG_MASK overlap");
114 : :
115 : : StaticAssertDecl((LW_VAL_EXCLUSIVE & LW_FLAG_MASK) == 0,
116 : : "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap");
117 : :
118 : : /*
119 : : * There are three sorts of LWLock "tranches":
120 : : *
121 : : * 1. The individually-named locks defined in lwlocklist.h each have their
122 : : * own tranche. We absorb the names of these tranches from there into
123 : : * BuiltinTrancheNames here.
124 : : *
125 : : * 2. There are some predefined tranches for built-in groups of locks defined
126 : : * in lwlocklist.h. We absorb the names of these tranches, too.
127 : : *
128 : : * 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
129 : : * or LWLockNewTrancheId. These names are stored in shared memory and can be
130 : : * accessed via LWLockTrancheNames.
131 : : *
132 : : * All these names are user-visible as wait event names, so choose with care
133 : : * ... and do not forget to update the documentation's list of wait events.
134 : : */
135 : : static const char *const BuiltinTrancheNames[] = {
136 : : #define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
137 : : #define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
138 : : #include "storage/lwlocklist.h"
139 : : #undef PG_LWLOCK
140 : : #undef PG_LWLOCKTRANCHE
141 : : };
142 : :
143 : : StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
144 : : LWTRANCHE_FIRST_USER_DEFINED,
145 : : "missing entries in BuiltinTrancheNames[]");
146 : :
147 : : /*
148 : : * This is indexed by tranche ID minus LWTRANCHE_FIRST_USER_DEFINED, and
149 : : * points to the shared memory locations of the names of all
150 : : * dynamically-created tranches. Backends inherit the pointer by fork from the
151 : : * postmaster (except in the EXEC_BACKEND case, where we have special measures
152 : : * to pass it down).
153 : : */
154 : : char **LWLockTrancheNames = NULL;
155 : :
156 : : /*
157 : : * This points to the main array of LWLocks in shared memory. Backends inherit
158 : : * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
159 : : * where we have special measures to pass it down).
160 : : */
161 : : LWLockPadded *MainLWLockArray = NULL;
162 : :
163 : : /*
164 : : * We use this structure to keep track of locked LWLocks for release
165 : : * during error recovery. Normally, only a few will be held at once, but
166 : : * occasionally the number can be much higher.
167 : : */
168 : : #define MAX_SIMUL_LWLOCKS 200
169 : :
170 : : /* struct representing the LWLocks we're holding */
171 : : typedef struct LWLockHandle
172 : : {
173 : : LWLock *lock;
174 : : LWLockMode mode;
175 : : } LWLockHandle;
176 : :
177 : : static int num_held_lwlocks = 0;
178 : : static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS];
179 : :
180 : : /* struct representing the LWLock tranche request for named tranche */
181 : : typedef struct NamedLWLockTrancheRequest
182 : : {
183 : : char tranche_name[NAMEDATALEN];
184 : : int num_lwlocks;
185 : : } NamedLWLockTrancheRequest;
186 : :
187 : : /*
188 : : * NamedLWLockTrancheRequests is the valid length of the request array. These
189 : : * variables are non-static so that launch_backend.c can copy them to child
190 : : * processes in EXEC_BACKEND builds.
191 : : */
192 : : int NamedLWLockTrancheRequests = 0;
193 : : NamedLWLockTrancheRequest *NamedLWLockTrancheRequestArray = NULL;
194 : :
195 : : /* postmaster's local copy of the request array */
196 : : static NamedLWLockTrancheRequest *LocalNamedLWLockTrancheRequestArray = NULL;
197 : :
198 : : /* shared memory counter of registered tranches */
199 : : int *LWLockCounter = NULL;
200 : :
201 : : /* backend-local counter of registered tranches */
202 : : static int LocalLWLockCounter;
203 : :
204 : : #define MAX_NAMED_TRANCHES 256
205 : :
206 : : static void InitializeLWLocks(void);
207 : : static inline void LWLockReportWaitStart(LWLock *lock);
208 : : static inline void LWLockReportWaitEnd(void);
209 : : static const char *GetLWTrancheName(uint16 trancheId);
210 : :
211 : : #define T_NAME(lock) \
212 : : GetLWTrancheName((lock)->tranche)
213 : :
214 : : #ifdef LWLOCK_STATS
215 : : typedef struct lwlock_stats_key
216 : : {
217 : : int tranche;
218 : : void *instance;
219 : : } lwlock_stats_key;
220 : :
221 : : typedef struct lwlock_stats
222 : : {
223 : : lwlock_stats_key key;
224 : : int sh_acquire_count;
225 : : int ex_acquire_count;
226 : : int block_count;
227 : : int dequeue_self_count;
228 : : int spin_delay_count;
229 : : } lwlock_stats;
230 : :
231 : : static HTAB *lwlock_stats_htab;
232 : : static lwlock_stats lwlock_stats_dummy;
233 : : #endif
234 : :
235 : : #ifdef LOCK_DEBUG
236 : : bool Trace_lwlocks = false;
237 : :
238 : : inline static void
239 : : PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
240 : : {
241 : : /* hide statement & context here, otherwise the log is just too verbose */
242 : : if (Trace_lwlocks)
243 : : {
244 : : uint32 state = pg_atomic_read_u32(&lock->state);
245 : :
246 : : ereport(LOG,
247 : : (errhidestmt(true),
248 : : errhidecontext(true),
249 : : errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u rOK %d",
250 : : MyProcPid,
251 : : where, T_NAME(lock), lock,
252 : : (state & LW_VAL_EXCLUSIVE) != 0,
253 : : state & LW_SHARED_MASK,
254 : : (state & LW_FLAG_HAS_WAITERS) != 0,
255 : : pg_atomic_read_u32(&lock->nwaiters),
256 : : (state & LW_FLAG_RELEASE_OK) != 0)));
257 : : }
258 : : }
259 : :
260 : : inline static void
261 : : LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
262 : : {
263 : : /* hide statement & context here, otherwise the log is just too verbose */
264 : : if (Trace_lwlocks)
265 : : {
266 : : ereport(LOG,
267 : : (errhidestmt(true),
268 : : errhidecontext(true),
269 : : errmsg_internal("%s(%s %p): %s", where,
270 : : T_NAME(lock), lock, msg)));
271 : : }
272 : : }
273 : :
274 : : #else /* not LOCK_DEBUG */
275 : : #define PRINT_LWDEBUG(a,b,c) ((void)0)
276 : : #define LOG_LWDEBUG(a,b,c) ((void)0)
277 : : #endif /* LOCK_DEBUG */
278 : :
279 : : #ifdef LWLOCK_STATS
280 : :
281 : : static void init_lwlock_stats(void);
282 : : static void print_lwlock_stats(int code, Datum arg);
283 : : static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
284 : :
285 : : static void
286 : : init_lwlock_stats(void)
287 : : {
288 : : HASHCTL ctl;
289 : : static MemoryContext lwlock_stats_cxt = NULL;
290 : : static bool exit_registered = false;
291 : :
292 : : if (lwlock_stats_cxt != NULL)
293 : : MemoryContextDelete(lwlock_stats_cxt);
294 : :
295 : : /*
296 : : * The LWLock stats will be updated within a critical section, which
297 : : * requires allocating new hash entries. Allocations within a critical
298 : : * section are normally not allowed because running out of memory would
299 : : * lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
300 : : * turned on in production, so that's an acceptable risk. The hash entries
301 : : * are small, so the risk of running out of memory is minimal in practice.
302 : : */
303 : : lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext,
304 : : "LWLock stats",
305 : : ALLOCSET_DEFAULT_SIZES);
306 : : MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
307 : :
308 : : ctl.keysize = sizeof(lwlock_stats_key);
309 : : ctl.entrysize = sizeof(lwlock_stats);
310 : : ctl.hcxt = lwlock_stats_cxt;
311 : : lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
312 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
313 : : if (!exit_registered)
314 : : {
315 : : on_shmem_exit(print_lwlock_stats, 0);
316 : : exit_registered = true;
317 : : }
318 : : }
319 : :
320 : : static void
321 : : print_lwlock_stats(int code, Datum arg)
322 : : {
323 : : HASH_SEQ_STATUS scan;
324 : : lwlock_stats *lwstats;
325 : :
326 : : hash_seq_init(&scan, lwlock_stats_htab);
327 : :
328 : : /* Grab an LWLock to keep different backends from mixing reports */
329 : : LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
330 : :
331 : : while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
332 : : {
333 : : fprintf(stderr,
334 : : "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
335 : : MyProcPid, GetLWTrancheName(lwstats->key.tranche),
336 : : lwstats->key.instance, lwstats->sh_acquire_count,
337 : : lwstats->ex_acquire_count, lwstats->block_count,
338 : : lwstats->spin_delay_count, lwstats->dequeue_self_count);
339 : : }
340 : :
341 : : LWLockRelease(&MainLWLockArray[0].lock);
342 : : }
343 : :
344 : : static lwlock_stats *
345 : : get_lwlock_stats_entry(LWLock *lock)
346 : : {
347 : : lwlock_stats_key key;
348 : : lwlock_stats *lwstats;
349 : : bool found;
350 : :
351 : : /*
352 : : * During shared memory initialization, the hash table doesn't exist yet.
353 : : * Stats of that phase aren't very interesting, so just collect operations
354 : : * on all locks in a single dummy entry.
355 : : */
356 : : if (lwlock_stats_htab == NULL)
357 : : return &lwlock_stats_dummy;
358 : :
359 : : /* Fetch or create the entry. */
360 : : MemSet(&key, 0, sizeof(key));
361 : : key.tranche = lock->tranche;
362 : : key.instance = lock;
363 : : lwstats = hash_search(lwlock_stats_htab, &key, HASH_ENTER, &found);
364 : : if (!found)
365 : : {
366 : : lwstats->sh_acquire_count = 0;
367 : : lwstats->ex_acquire_count = 0;
368 : : lwstats->block_count = 0;
369 : : lwstats->dequeue_self_count = 0;
370 : : lwstats->spin_delay_count = 0;
371 : : }
372 : : return lwstats;
373 : : }
374 : : #endif /* LWLOCK_STATS */
375 : :
376 : :
377 : : /*
378 : : * Compute number of LWLocks required by named tranches. These will be
379 : : * allocated in the main array.
380 : : */
381 : : static int
1993 tgl@sss.pgh.pa.us 382 :CBC 2998 : NumLWLocksForNamedTranches(void)
383 : : {
3554 rhaas@postgresql.org 384 : 2998 : int numLocks = 0;
385 : : int i;
386 : :
387 [ + + ]: 3115 : for (i = 0; i < NamedLWLockTrancheRequests; i++)
388 : 117 : numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
389 : :
390 : 2998 : return numLocks;
391 : : }
392 : :
393 : : /*
394 : : * Compute shmem space needed for LWLocks and named tranches.
395 : : */
396 : : Size
8795 tgl@sss.pgh.pa.us 397 : 2998 : LWLockShmemSize(void)
398 : : {
399 : : Size size;
3548 rhaas@postgresql.org 400 : 2998 : int numLocks = NUM_FIXED_LWLOCKS;
401 : :
402 : : /*
403 : : * If re-initializing shared memory, the request array will no longer be
404 : : * accessible, so switch to the copy in postmaster's local memory. We'll
405 : : * copy it back into shared memory later when CreateLWLocks() is called
406 : : * again.
407 : : */
40 nathan@postgresql.or 408 [ - + ]:GNC 2998 : if (LocalNamedLWLockTrancheRequestArray)
40 nathan@postgresql.or 409 :UNC 0 : NamedLWLockTrancheRequestArray = LocalNamedLWLockTrancheRequestArray;
410 : :
411 : : /* Calculate total number of locks needed in the main array. */
1993 tgl@sss.pgh.pa.us 412 :CBC 2998 : numLocks += NumLWLocksForNamedTranches();
413 : :
414 : : /* Space for dynamic allocation counter. */
55 nathan@postgresql.or 415 :GNC 2998 : size = MAXALIGN(sizeof(int));
416 : :
417 : : /* Space for named tranches. */
418 : 2998 : size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
419 : 2998 : size = add_size(size, mul_size(MAX_NAMED_TRANCHES, NAMEDATALEN));
420 : :
421 : : /*
422 : : * Make space for named tranche requests. This is done for the benefit of
423 : : * EXEC_BACKEND builds, which otherwise wouldn't be able to call
424 : : * GetNamedLWLockTranche() outside postmaster.
425 : : */
47 426 : 2998 : size = add_size(size, mul_size(NamedLWLockTrancheRequests,
427 : : sizeof(NamedLWLockTrancheRequest)));
428 : :
429 : : /* Space for the LWLock array, plus room for cache line alignment. */
55 430 : 2998 : size = add_size(size, LWLOCK_PADDED_SIZE);
431 : 2998 : size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
432 : :
7374 tgl@sss.pgh.pa.us 433 :CBC 2998 : return size;
434 : : }
435 : :
436 : : /*
437 : : * Allocate shmem space for the main LWLock array and all tranches and
438 : : * initialize it.
439 : : */
440 : : void
8795 441 : 1049 : CreateLWLocks(void)
442 : : {
4292 rhaas@postgresql.org 443 [ + - ]: 1049 : if (!IsUnderPostmaster)
444 : : {
445 : 1049 : Size spaceLocks = LWLockShmemSize();
446 : : char *ptr;
447 : :
448 : : /* Allocate space */
449 : 1049 : ptr = (char *) ShmemAlloc(spaceLocks);
450 : :
451 : : /* Initialize the dynamic-allocation counter for tranches */
60 nathan@postgresql.or 452 :GNC 1049 : LWLockCounter = (int *) ptr;
453 : 1049 : *LWLockCounter = LWTRANCHE_FIRST_USER_DEFINED;
55 454 : 1049 : ptr += MAXALIGN(sizeof(int));
455 : :
456 : : /* Initialize tranche names */
457 : 1049 : LWLockTrancheNames = (char **) ptr;
458 : 1049 : ptr += MAX_NAMED_TRANCHES * sizeof(char *);
459 [ + + ]: 269593 : for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
460 : : {
461 : 268544 : LWLockTrancheNames[i] = ptr;
462 : 268544 : ptr += NAMEDATALEN;
463 : : }
464 : :
465 : : /*
466 : : * Move named tranche requests to shared memory. This is done for the
467 : : * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
468 : : * call GetNamedLWLockTranche() outside postmaster.
469 : : */
47 470 [ + + ]: 1049 : if (NamedLWLockTrancheRequests > 0)
471 : : {
472 : : /*
473 : : * Save the pointer to the request array in postmaster's local
474 : : * memory. We'll need it if we ever need to re-initialize shared
475 : : * memory after a crash.
476 : : */
40 477 : 8 : LocalNamedLWLockTrancheRequestArray = NamedLWLockTrancheRequestArray;
478 : :
47 479 : 8 : memcpy(ptr, NamedLWLockTrancheRequestArray,
480 : : NamedLWLockTrancheRequests * sizeof(NamedLWLockTrancheRequest));
481 : 8 : NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *) ptr;
482 : 8 : ptr += NamedLWLockTrancheRequests * sizeof(NamedLWLockTrancheRequest);
483 : : }
484 : :
485 : : /* Ensure desired alignment of LWLock array */
4292 rhaas@postgresql.org 486 : 1049 : ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
487 : 1049 : MainLWLockArray = (LWLockPadded *) ptr;
488 : :
489 : : /* Initialize all LWLocks */
3547 rhaas@postgresql.org 490 :CBC 1049 : InitializeLWLocks();
491 : : }
492 : 1049 : }
493 : :
494 : : /*
495 : : * Initialize LWLocks that are fixed and those belonging to named tranches.
496 : : */
497 : : static void
498 : 1049 : InitializeLWLocks(void)
499 : : {
500 : : int id;
501 : : int i;
502 : : int j;
503 : : LWLockPadded *lock;
504 : :
505 : : /* Initialize all individual LWLocks in main array */
506 [ + + ]: 57695 : for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
3238 507 : 56646 : LWLockInitialize(&lock->lock, id);
508 : :
509 : : /* Initialize buffer mapping LWLocks in main array */
1799 michael@paquier.xyz 510 : 1049 : lock = MainLWLockArray + BUFFER_MAPPING_LWLOCK_OFFSET;
3547 rhaas@postgresql.org 511 [ + + ]: 135321 : for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
512 : 134272 : LWLockInitialize(&lock->lock, LWTRANCHE_BUFFER_MAPPING);
513 : :
514 : : /* Initialize lmgrs' LWLocks in main array */
1799 michael@paquier.xyz 515 : 1049 : lock = MainLWLockArray + LOCK_MANAGER_LWLOCK_OFFSET;
3547 rhaas@postgresql.org 516 [ + + ]: 17833 : for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
517 : 16784 : LWLockInitialize(&lock->lock, LWTRANCHE_LOCK_MANAGER);
518 : :
519 : : /* Initialize predicate lmgrs' LWLocks in main array */
1799 michael@paquier.xyz 520 : 1049 : lock = MainLWLockArray + PREDICATELOCK_MANAGER_LWLOCK_OFFSET;
3547 rhaas@postgresql.org 521 [ + + ]: 17833 : for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
522 : 16784 : LWLockInitialize(&lock->lock, LWTRANCHE_PREDICATE_LOCK_MANAGER);
523 : :
524 : : /*
525 : : * Copy the info about any named tranches into shared memory (so that
526 : : * other processes can see it), and initialize the requested LWLocks.
527 : : */
528 [ + + ]: 1049 : if (NamedLWLockTrancheRequests > 0)
529 : : {
530 : 8 : lock = &MainLWLockArray[NUM_FIXED_LWLOCKS];
531 : :
532 [ + + ]: 47 : for (i = 0; i < NamedLWLockTrancheRequests; i++)
533 : : {
534 : : NamedLWLockTrancheRequest *request;
535 : : int tranche;
536 : :
537 : 39 : request = &NamedLWLockTrancheRequestArray[i];
55 nathan@postgresql.or 538 :GNC 39 : tranche = LWLockNewTrancheId(request->tranche_name);
539 : :
3547 rhaas@postgresql.org 540 [ + + ]:CBC 78 : for (j = 0; j < request->num_lwlocks; j++, lock++)
55 nathan@postgresql.or 541 :GNC 39 : LWLockInitialize(&lock->lock, tranche);
542 : : }
543 : : }
3547 rhaas@postgresql.org 544 :CBC 1049 : }
545 : :
546 : : /*
547 : : * InitLWLockAccess - initialize backend-local state needed to hold LWLocks
548 : : */
549 : : void
4138 heikki.linnakangas@i 550 : 19091 : InitLWLockAccess(void)
551 : : {
552 : : #ifdef LWLOCK_STATS
553 : : init_lwlock_stats();
554 : : #endif
555 : 19091 : }
556 : :
557 : : /*
558 : : * GetNamedLWLockTranche - returns the base address of LWLock from the
559 : : * specified tranche.
560 : : *
561 : : * Caller needs to retrieve the requested number of LWLocks starting from
562 : : * the base lock address returned by this API. This can be used for
563 : : * tranches that are requested by using RequestNamedLWLockTranche() API.
564 : : */
565 : : LWLockPadded *
3554 rhaas@postgresql.org 566 : 9 : GetNamedLWLockTranche(const char *tranche_name)
567 : : {
568 : : int lock_pos;
569 : : int i;
570 : :
571 : : /*
572 : : * Obtain the position of base address of LWLock belonging to requested
573 : : * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
574 : : * in MainLWLockArray after fixed locks.
575 : : */
3548 576 : 9 : lock_pos = NUM_FIXED_LWLOCKS;
3554 577 [ + + ]: 41 : for (i = 0; i < NamedLWLockTrancheRequests; i++)
578 : : {
579 [ + + ]: 40 : if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
580 : : tranche_name) == 0)
581 : 8 : return &MainLWLockArray[lock_pos];
582 : :
3554 rhaas@postgresql.org 583 :GBC 32 : lock_pos += NamedLWLockTrancheRequestArray[i].num_lwlocks;
584 : : }
585 : :
1993 tgl@sss.pgh.pa.us 586 [ + - ]: 1 : elog(ERROR, "requested tranche is not registered");
587 : :
588 : : /* just to keep compiler quiet */
589 : : return NULL;
590 : : }
591 : :
592 : : /*
593 : : * Allocate a new tranche ID with the provided name.
594 : : */
595 : : int
55 nathan@postgresql.or 596 :GNC 276 : LWLockNewTrancheId(const char *name)
597 : : {
598 : : int result;
599 : :
600 [ + + ]: 276 : if (!name)
601 [ + - ]: 1 : ereport(ERROR,
602 : : (errcode(ERRCODE_INVALID_NAME),
603 : : errmsg("tranche name cannot be NULL")));
604 : :
605 [ + + ]: 275 : if (strlen(name) >= NAMEDATALEN)
606 [ + - ]: 1 : ereport(ERROR,
607 : : (errcode(ERRCODE_NAME_TOO_LONG),
608 : : errmsg("tranche name too long"),
609 : : errdetail("LWLock tranche names must be no longer than %d bytes.",
610 : : NAMEDATALEN - 1)));
611 : :
612 : : /*
613 : : * We use the ShmemLock spinlock to protect LWLockCounter and
614 : : * LWLockTrancheNames.
615 : : */
616 [ - + ]: 274 : SpinLockAcquire(ShmemLock);
617 : :
618 [ + + ]: 274 : if (*LWLockCounter - LWTRANCHE_FIRST_USER_DEFINED >= MAX_NAMED_TRANCHES)
619 : : {
620 : 1 : SpinLockRelease(ShmemLock);
621 [ + - ]: 1 : ereport(ERROR,
622 : : (errmsg("maximum number of tranches already registered"),
623 : : errdetail("No more than %d tranches may be registered.",
624 : : MAX_NAMED_TRANCHES)));
625 : : }
626 : :
627 : 273 : result = (*LWLockCounter)++;
628 : 273 : LocalLWLockCounter = *LWLockCounter;
629 : 273 : strlcpy(LWLockTrancheNames[result - LWTRANCHE_FIRST_USER_DEFINED], name, NAMEDATALEN);
630 : :
631 : 273 : SpinLockRelease(ShmemLock);
632 : :
633 : 273 : return result;
634 : : }
635 : :
636 : : /*
637 : : * RequestNamedLWLockTranche
638 : : * Request that extra LWLocks be allocated during postmaster
639 : : * startup.
640 : : *
641 : : * This may only be called via the shmem_request_hook of a library that is
642 : : * loaded into the postmaster via shared_preload_libraries. Calls from
643 : : * elsewhere will fail.
644 : : *
645 : : * The tranche name will be user-visible as a wait event name, so try to
646 : : * use a name that fits the style for those.
647 : : */
648 : : void
3554 rhaas@postgresql.org 649 :CBC 39 : RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
650 : : {
651 : : NamedLWLockTrancheRequest *request;
652 : : static int NamedLWLockTrancheRequestsAllocated;
653 : :
1264 654 [ - + ]: 39 : if (!process_shmem_requests_in_progress)
1264 rhaas@postgresql.org 655 [ # # ]:UBC 0 : elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
656 : :
55 nathan@postgresql.or 657 [ - + ]:GNC 39 : if (!tranche_name)
55 nathan@postgresql.or 658 [ # # ]:UNC 0 : ereport(ERROR,
659 : : (errcode(ERRCODE_INVALID_NAME),
660 : : errmsg("tranche name cannot be NULL")));
661 : :
55 nathan@postgresql.or 662 [ - + ]:GNC 39 : if (strlen(tranche_name) >= NAMEDATALEN)
55 nathan@postgresql.or 663 [ # # ]:UNC 0 : ereport(ERROR,
664 : : (errcode(ERRCODE_NAME_TOO_LONG),
665 : : errmsg("tranche name too long"),
666 : : errdetail("LWLock tranche names must be no longer than %d bytes.",
667 : : NAMEDATALEN - 1)));
668 : :
3554 rhaas@postgresql.org 669 [ + + ]:CBC 39 : if (NamedLWLockTrancheRequestArray == NULL)
670 : : {
54 nathan@postgresql.or 671 : 8 : NamedLWLockTrancheRequestsAllocated = 16;
3554 rhaas@postgresql.org 672 : 8 : NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
673 : 8 : MemoryContextAlloc(TopMemoryContext,
674 : : NamedLWLockTrancheRequestsAllocated
675 : : * sizeof(NamedLWLockTrancheRequest));
676 : : }
677 : :
54 nathan@postgresql.or 678 [ + + ]: 39 : if (NamedLWLockTrancheRequests >= NamedLWLockTrancheRequestsAllocated)
679 : : {
54 nathan@postgresql.or 680 :GBC 1 : int i = pg_nextpower2_32(NamedLWLockTrancheRequests + 1);
681 : :
682 : 1 : NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
683 : 1 : repalloc(NamedLWLockTrancheRequestArray,
684 : : i * sizeof(NamedLWLockTrancheRequest));
685 : 1 : NamedLWLockTrancheRequestsAllocated = i;
686 : : }
687 : :
3554 rhaas@postgresql.org 688 :CBC 39 : request = &NamedLWLockTrancheRequestArray[NamedLWLockTrancheRequests];
1993 tgl@sss.pgh.pa.us 689 : 39 : strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
3554 rhaas@postgresql.org 690 : 39 : request->num_lwlocks = num_lwlocks;
691 : 39 : NamedLWLockTrancheRequests++;
692 : 39 : }
693 : :
694 : : /*
695 : : * LWLockInitialize - initialize a new lwlock; it's initially unlocked
696 : : */
697 : : void
4292 698 : 11796427 : LWLockInitialize(LWLock *lock, int tranche_id)
699 : : {
700 : : /* verify the tranche_id is valid */
55 nathan@postgresql.or 701 :GNC 11796427 : (void) GetLWTrancheName(tranche_id);
702 : :
3960 andres@anarazel.de 703 :CBC 11796426 : pg_atomic_init_u32(&lock->state, LW_FLAG_RELEASE_OK);
704 : : #ifdef LOCK_DEBUG
705 : : pg_atomic_init_u32(&lock->nwaiters, 0);
706 : : #endif
4292 rhaas@postgresql.org 707 : 11796426 : lock->tranche = tranche_id;
3361 708 : 11796426 : proclist_init(&lock->waiters);
4292 709 : 11796426 : }
710 : :
711 : : /*
712 : : * Report start of wait event for light-weight locks.
713 : : *
714 : : * This function will be used by all the light-weight lock calls which
715 : : * needs to wait to acquire the lock. This function distinguishes wait
716 : : * event based on tranche and lock id.
717 : : */
718 : : static inline void
3519 719 : 231776 : LWLockReportWaitStart(LWLock *lock)
720 : : {
3238 721 : 231776 : pgstat_report_wait_start(PG_WAIT_LWLOCK | lock->tranche);
3519 722 : 231776 : }
723 : :
724 : : /*
725 : : * Report end of wait event for light-weight locks.
726 : : */
727 : : static inline void
3502 andres@anarazel.de 728 : 231776 : LWLockReportWaitEnd(void)
729 : : {
3519 rhaas@postgresql.org 730 : 231776 : pgstat_report_wait_end();
731 : 231776 : }
732 : :
733 : : /*
734 : : * Return the name of an LWLock tranche.
735 : : */
736 : : static const char *
1993 tgl@sss.pgh.pa.us 737 : 11796711 : GetLWTrancheName(uint16 trancheId)
738 : : {
739 : : /* Built-in tranche or individual LWLock? */
740 [ + + ]: 11796711 : if (trancheId < LWTRANCHE_FIRST_USER_DEFINED)
643 alvherre@alvh.no-ip. 741 : 11796071 : return BuiltinTrancheNames[trancheId];
742 : :
743 : : /*
744 : : * We only ever add new entries to LWLockTrancheNames, so most lookups can
745 : : * avoid taking the spinlock as long as the backend-local counter
746 : : * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
747 : : * we need to first update the backend-local counter with ShmemLock held
748 : : * before attempting the lookup again. In practice, the latter case is
749 : : * probably rare.
750 : : */
55 nathan@postgresql.or 751 [ + + ]:GNC 640 : if (trancheId >= LocalLWLockCounter)
752 : : {
753 [ - + ]: 1 : SpinLockAcquire(ShmemLock);
754 : 1 : LocalLWLockCounter = *LWLockCounter;
755 : 1 : SpinLockRelease(ShmemLock);
756 : :
757 [ + - ]: 1 : if (trancheId >= LocalLWLockCounter)
758 [ + - ]: 1 : elog(ERROR, "tranche %d is not registered", trancheId);
759 : : }
760 : :
761 : : /*
762 : : * It's an extension tranche, so look in LWLockTrancheNames.
763 : : */
764 : 639 : trancheId -= LWTRANCHE_FIRST_USER_DEFINED;
765 : :
1993 tgl@sss.pgh.pa.us 766 :GBC 639 : return LWLockTrancheNames[trancheId];
767 : : }
768 : :
769 : : /*
770 : : * Return an identifier for an LWLock based on the wait class and event.
771 : : */
772 : : const char *
1993 tgl@sss.pgh.pa.us 773 :CBC 284 : GetLWLockIdentifier(uint32 classId, uint16 eventId)
774 : : {
775 [ - + ]: 284 : Assert(classId == PG_WAIT_LWLOCK);
776 : : /* The event IDs are just tranche numbers. */
777 : 284 : return GetLWTrancheName(eventId);
778 : : }
779 : :
780 : : /*
781 : : * Internal function that tries to atomically acquire the lwlock in the passed
782 : : * in mode.
783 : : *
784 : : * This function will not block waiting for a lock to become free - that's the
785 : : * caller's job.
786 : : *
787 : : * Returns true if the lock isn't free and we need to wait.
788 : : */
789 : : static bool
3811 bruce@momjian.us 790 : 233209591 : LWLockAttemptLock(LWLock *lock, LWLockMode mode)
791 : : {
792 : : uint32 old_state;
793 : :
1096 peter@eisentraut.org 794 [ + + - + ]: 233209591 : Assert(mode == LW_EXCLUSIVE || mode == LW_SHARED);
795 : :
796 : : /*
797 : : * Read once outside the loop, later iterations will get the newer value
798 : : * via compare & exchange.
799 : : */
3742 andres@anarazel.de 800 : 233209591 : old_state = pg_atomic_read_u32(&lock->state);
801 : :
802 : : /* loop until we've determined whether we could acquire the lock or not */
803 : : while (true)
3960 804 : 34272 : {
805 : : uint32 desired_state;
806 : : bool lock_free;
807 : :
3742 808 : 233243863 : desired_state = old_state;
809 : :
3960 810 [ + + ]: 233243863 : if (mode == LW_EXCLUSIVE)
811 : : {
3742 812 : 115429385 : lock_free = (old_state & LW_LOCK_MASK) == 0;
3960 813 [ + + ]: 115429385 : if (lock_free)
814 : 115215591 : desired_state += LW_VAL_EXCLUSIVE;
815 : : }
816 : : else
817 : : {
3742 818 : 117814478 : lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
3960 819 [ + + ]: 117814478 : if (lock_free)
820 : 117795980 : desired_state += LW_VAL_SHARED;
821 : : }
822 : :
823 : : /*
824 : : * Attempt to swap in the state we are expecting. If we didn't see
825 : : * lock to be free, that's just the old value. If we saw it as free,
826 : : * we'll attempt to mark it acquired. The reason that we always swap
827 : : * in the value is that this doubles as a memory barrier. We could try
828 : : * to be smarter and only swap in values if we saw the lock as free,
829 : : * but benchmark haven't shown it as beneficial so far.
830 : : *
831 : : * Retry if the value changed since we last looked at it.
832 : : */
833 [ + + ]: 233243863 : if (pg_atomic_compare_exchange_u32(&lock->state,
834 : : &old_state, desired_state))
835 : : {
836 [ + + ]: 233209591 : if (lock_free)
837 : : {
838 : : /* Great! Got the lock. */
839 : : #ifdef LOCK_DEBUG
840 : : if (mode == LW_EXCLUSIVE)
841 : : lock->owner = MyProc;
842 : : #endif
843 : 232983158 : return false;
844 : : }
845 : : else
3186 heikki.linnakangas@i 846 : 226433 : return true; /* somebody else has the lock */
847 : : }
848 : : }
849 : : pg_unreachable();
850 : : }
851 : :
852 : : /*
853 : : * Lock the LWLock's wait list against concurrent activity.
854 : : *
855 : : * NB: even though the wait list is locked, non-conflicting lock operations
856 : : * may still happen concurrently.
857 : : *
858 : : * Time spent holding mutex should be short!
859 : : */
860 : : static void
3488 andres@anarazel.de 861 : 2803489 : LWLockWaitListLock(LWLock *lock)
862 : : {
863 : : uint32 old_state;
864 : : #ifdef LWLOCK_STATS
865 : : lwlock_stats *lwstats;
866 : : uint32 delays = 0;
867 : :
868 : : lwstats = get_lwlock_stats_entry(lock);
869 : : #endif
870 : :
871 : : while (true)
872 : : {
873 : : /* always try once to acquire lock directly */
874 : 2811561 : old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
875 [ + + ]: 2811561 : if (!(old_state & LW_FLAG_LOCKED))
876 : 2803489 : break; /* got lock */
877 : :
878 : : /* and then spin without atomic operations until lock is released */
879 : : {
880 : : SpinDelayStatus delayStatus;
881 : :
3484 882 : 8072 : init_local_spin_delay(&delayStatus);
883 : :
3488 884 [ + + ]: 77266 : while (old_state & LW_FLAG_LOCKED)
885 : : {
886 : 69194 : perform_spin_delay(&delayStatus);
887 : 69194 : old_state = pg_atomic_read_u32(&lock->state);
888 : : }
889 : : #ifdef LWLOCK_STATS
890 : : delays += delayStatus.delays;
891 : : #endif
892 : 8072 : finish_spin_delay(&delayStatus);
893 : : }
894 : :
895 : : /*
896 : : * Retry. The lock might obviously already be re-acquired by the time
897 : : * we're attempting to get it again.
898 : : */
899 : : }
900 : :
901 : : #ifdef LWLOCK_STATS
902 : : lwstats->spin_delay_count += delays;
903 : : #endif
904 : 2803489 : }
905 : :
906 : : /*
907 : : * Unlock the LWLock's wait list.
908 : : *
909 : : * Note that it can be more efficient to manipulate flags and release the
910 : : * locks in a single atomic operation.
911 : : */
912 : : static void
913 : 2556610 : LWLockWaitListUnlock(LWLock *lock)
914 : : {
915 : : uint32 old_state PG_USED_FOR_ASSERTS_ONLY;
916 : :
917 : 2556610 : old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
918 : :
919 [ - + ]: 2556610 : Assert(old_state & LW_FLAG_LOCKED);
920 : 2556610 : }
921 : :
922 : : /*
923 : : * Wakeup all the lockers that currently have a chance to acquire the lock.
924 : : */
925 : : static void
3960 926 : 246879 : LWLockWakeup(LWLock *lock)
927 : : {
928 : : bool new_release_ok;
929 : 246879 : bool wokeup_somebody = false;
930 : : proclist_head wakeup;
931 : : proclist_mutable_iter iter;
932 : :
3361 rhaas@postgresql.org 933 : 246879 : proclist_init(&wakeup);
934 : :
3960 andres@anarazel.de 935 : 246879 : new_release_ok = true;
936 : :
937 : : /* lock wait list while collecting backends to wake up */
3488 938 : 246879 : LWLockWaitListLock(lock);
939 : :
3361 rhaas@postgresql.org 940 [ + + + + : 387461 : proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
+ + ]
941 : : {
942 : 236860 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
943 : :
3960 andres@anarazel.de 944 [ + + + + ]: 236860 : if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
945 : 953 : continue;
946 : :
3361 rhaas@postgresql.org 947 : 235907 : proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
948 : 235907 : proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
949 : :
3960 andres@anarazel.de 950 [ + + ]: 235907 : if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
951 : : {
952 : : /*
953 : : * Prevent additional wakeups until retryer gets to run. Backends
954 : : * that are just waiting for the lock to become free don't retry
955 : : * automatically.
956 : : */
957 : 105383 : new_release_ok = false;
958 : :
959 : : /*
960 : : * Don't wakeup (further) exclusive locks.
961 : : */
962 : 105383 : wokeup_somebody = true;
963 : : }
964 : :
965 : : /*
966 : : * Signal that the process isn't on the wait list anymore. This allows
967 : : * LWLockDequeueSelf() to remove itself of the waitlist with a
968 : : * proclist_delete(), rather than having to check if it has been
969 : : * removed from the list.
970 : : */
1073 971 [ - + ]: 235907 : Assert(waiter->lwWaiting == LW_WS_WAITING);
972 : 235907 : waiter->lwWaiting = LW_WS_PENDING_WAKEUP;
973 : :
974 : : /*
975 : : * Once we've woken up an exclusive lock, there's no point in waking
976 : : * up anybody else.
977 : : */
3811 bruce@momjian.us 978 [ + + ]: 235907 : if (waiter->lwWaitMode == LW_EXCLUSIVE)
3960 andres@anarazel.de 979 : 96278 : break;
980 : : }
981 : :
3361 rhaas@postgresql.org 982 [ + + - + ]: 246879 : Assert(proclist_is_empty(&wakeup) || pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS);
983 : :
984 : : /* unset required flags, and release lock, in one fell swoop */
985 : : {
986 : : uint32 old_state;
987 : : uint32 desired_state;
988 : :
3488 andres@anarazel.de 989 : 246879 : old_state = pg_atomic_read_u32(&lock->state);
990 : : while (true)
991 : : {
992 : 247252 : desired_state = old_state;
993 : :
994 : : /* compute desired flags */
995 : :
996 [ + + ]: 247252 : if (new_release_ok)
997 : 143236 : desired_state |= LW_FLAG_RELEASE_OK;
998 : : else
999 : 104016 : desired_state &= ~LW_FLAG_RELEASE_OK;
1000 : :
3361 rhaas@postgresql.org 1001 [ + + ]: 247252 : if (proclist_is_empty(&wakeup))
3488 andres@anarazel.de 1002 : 95665 : desired_state &= ~LW_FLAG_HAS_WAITERS;
1003 : :
1004 : 247252 : desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1005 : :
1006 [ + + ]: 247252 : if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
1007 : : desired_state))
1008 : 246879 : break;
1009 : : }
1010 : : }
1011 : :
1012 : : /* Awaken any waiters I removed from the queue. */
3361 rhaas@postgresql.org 1013 [ + + + + : 482786 : proclist_foreach_modify(iter, &wakeup, lwWaitLink)
+ + ]
1014 : : {
1015 : 235907 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
1016 : :
1017 : : LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1018 : 235907 : proclist_delete(&wakeup, iter.cur, lwWaitLink);
1019 : :
1020 : : /*
1021 : : * Guarantee that lwWaiting being unset only becomes visible once the
1022 : : * unlink from the link has completed. Otherwise the target backend
1023 : : * could be woken up for other reason and enqueue for a new lock - if
1024 : : * that happens before the list unlink happens, the list would end up
1025 : : * being corrupted.
1026 : : *
1027 : : * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1028 : : * another lock.
1029 : : */
3960 andres@anarazel.de 1030 : 235907 : pg_write_barrier();
1073 1031 : 235907 : waiter->lwWaiting = LW_WS_NOT_WAITING;
3242 tgl@sss.pgh.pa.us 1032 : 235907 : PGSemaphoreUnlock(waiter->sem);
1033 : : }
3960 andres@anarazel.de 1034 : 246879 : }
1035 : :
1036 : : /*
1037 : : * Add ourselves to the end of the queue.
1038 : : *
1039 : : * NB: Mode can be LW_WAIT_UNTIL_FREE here!
1040 : : */
1041 : : static void
1042 : 251726 : LWLockQueueSelf(LWLock *lock, LWLockMode mode)
1043 : : {
1044 : : /*
1045 : : * If we don't have a PGPROC structure, there's no way to wait. This
1046 : : * should never occur, since MyProc should only be null during shared
1047 : : * memory initialization.
1048 : : */
1049 [ - + ]: 251726 : if (MyProc == NULL)
3960 andres@anarazel.de 1050 [ # # ]:UBC 0 : elog(PANIC, "cannot wait without a PGPROC structure");
1051 : :
1073 andres@anarazel.de 1052 [ - + ]:CBC 251726 : if (MyProc->lwWaiting != LW_WS_NOT_WAITING)
3960 andres@anarazel.de 1053 [ # # ]:UBC 0 : elog(PANIC, "queueing for lock while waiting on another one");
1054 : :
3488 andres@anarazel.de 1055 :CBC 251726 : LWLockWaitListLock(lock);
1056 : :
1057 : : /* setting the flag is protected by the spinlock */
3960 1058 : 251726 : pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_HAS_WAITERS);
1059 : :
1073 1060 : 251726 : MyProc->lwWaiting = LW_WS_WAITING;
3960 1061 : 251726 : MyProc->lwWaitMode = mode;
1062 : :
1063 : : /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1064 [ + + ]: 251726 : if (mode == LW_WAIT_UNTIL_FREE)
614 heikki.linnakangas@i 1065 : 131608 : proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1066 : : else
1067 : 120118 : proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1068 : :
1069 : : /* Can release the mutex now */
3488 andres@anarazel.de 1070 : 251726 : LWLockWaitListUnlock(lock);
1071 : :
1072 : : #ifdef LOCK_DEBUG
1073 : : pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1074 : : #endif
3960 1075 : 251726 : }
1076 : :
1077 : : /*
1078 : : * Remove ourselves from the waitlist.
1079 : : *
1080 : : * This is used if we queued ourselves because we thought we needed to sleep
1081 : : * but, after further checking, we discovered that we don't actually need to
1082 : : * do so.
1083 : : */
1084 : : static void
1085 : 19950 : LWLockDequeueSelf(LWLock *lock)
1086 : : {
1087 : : bool on_waitlist;
1088 : :
1089 : : #ifdef LWLOCK_STATS
1090 : : lwlock_stats *lwstats;
1091 : :
1092 : : lwstats = get_lwlock_stats_entry(lock);
1093 : :
1094 : : lwstats->dequeue_self_count++;
1095 : : #endif
1096 : :
3488 1097 : 19950 : LWLockWaitListLock(lock);
1098 : :
1099 : : /*
1100 : : * Remove ourselves from the waitlist, unless we've already been removed.
1101 : : * The removal happens with the wait list lock held, so there's no race in
1102 : : * this check.
1103 : : */
1073 1104 : 19950 : on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
1105 [ + + ]: 19950 : if (on_waitlist)
614 heikki.linnakangas@i 1106 : 15341 : proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1107 : :
3361 rhaas@postgresql.org 1108 [ + + ]: 19950 : if (proclist_is_empty(&lock->waiters) &&
3960 andres@anarazel.de 1109 [ + + ]: 19477 : (pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS) != 0)
1110 : : {
1111 : 19473 : pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_HAS_WAITERS);
1112 : : }
1113 : :
1114 : : /* XXX: combine with fetch_and above? */
3488 1115 : 19950 : LWLockWaitListUnlock(lock);
1116 : :
1117 : : /* clear waiting state again, nice for debugging */
1073 1118 [ + + ]: 19950 : if (on_waitlist)
1119 : 15341 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
1120 : : else
1121 : : {
3811 bruce@momjian.us 1122 : 4609 : int extraWaits = 0;
1123 : :
1124 : : /*
1125 : : * Somebody else dequeued us and has or will wake us up. Deal with the
1126 : : * superfluous absorption of a wakeup.
1127 : : */
1128 : :
1129 : : /*
1130 : : * Reset RELEASE_OK flag if somebody woke us before we removed
1131 : : * ourselves - they'll have set it to false.
1132 : : */
3960 andres@anarazel.de 1133 : 4609 : pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_RELEASE_OK);
1134 : :
1135 : : /*
1136 : : * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1137 : : * get reset at some inconvenient point later. Most of the time this
1138 : : * will immediately return.
1139 : : */
1140 : : for (;;)
1141 : : {
3242 tgl@sss.pgh.pa.us 1142 : 4609 : PGSemaphoreLock(MyProc->sem);
1073 andres@anarazel.de 1143 [ + - ]: 4609 : if (MyProc->lwWaiting == LW_WS_NOT_WAITING)
3960 1144 : 4609 : break;
3960 andres@anarazel.de 1145 :UBC 0 : extraWaits++;
1146 : : }
1147 : :
1148 : : /*
1149 : : * Fix the process wait semaphore's count for any absorbed wakeups.
1150 : : */
3960 andres@anarazel.de 1151 [ - + ]:CBC 4609 : while (extraWaits-- > 0)
3242 tgl@sss.pgh.pa.us 1152 :UBC 0 : PGSemaphoreUnlock(MyProc->sem);
1153 : : }
1154 : :
1155 : : #ifdef LOCK_DEBUG
1156 : : {
1157 : : /* not waiting anymore */
1158 : : uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1159 : :
1160 : : Assert(nwaiters < MAX_BACKENDS);
1161 : : }
1162 : : #endif
3960 andres@anarazel.de 1163 :CBC 19950 : }
1164 : :
1165 : : /*
1166 : : * LWLockAcquire - acquire a lightweight lock in the specified mode
1167 : : *
1168 : : * If the lock is not available, sleep until it is. Returns true if the lock
1169 : : * was available immediately, false if we had to sleep.
1170 : : *
1171 : : * Side effect: cancel/die interrupts are held off until lock release.
1172 : : */
1173 : : bool
3742 1174 : 230948335 : LWLockAcquire(LWLock *lock, LWLockMode mode)
1175 : : {
8540 JanWieck@Yahoo.com 1176 : 230948335 : PGPROC *proc = MyProc;
4239 heikki.linnakangas@i 1177 : 230948335 : bool result = true;
8695 tgl@sss.pgh.pa.us 1178 : 230948335 : int extraWaits = 0;
1179 : : #ifdef LWLOCK_STATS
1180 : : lwlock_stats *lwstats;
1181 : :
1182 : : lwstats = get_lwlock_stats_entry(lock);
1183 : : #endif
1184 : :
1096 peter@eisentraut.org 1185 [ + + - + ]: 230948335 : Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1186 : :
1187 : : PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1188 : :
1189 : : #ifdef LWLOCK_STATS
1190 : : /* Count lock acquisition attempts */
1191 : : if (mode == LW_EXCLUSIVE)
1192 : : lwstats->ex_acquire_count++;
1193 : : else
1194 : : lwstats->sh_acquire_count++;
1195 : : #endif /* LWLOCK_STATS */
1196 : :
1197 : : /*
1198 : : * We can't wait if we haven't got a PGPROC. This should only occur
1199 : : * during bootstrap or shared memory initialization. Put an Assert here
1200 : : * to catch unsafe coding practices.
1201 : : */
8434 tgl@sss.pgh.pa.us 1202 [ + + - + ]: 230948335 : Assert(!(proc == NULL && IsUnderPostmaster));
1203 : :
1204 : : /* Ensure we will have room to remember the lock */
7508 1205 [ - + ]: 230948335 : if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
7508 tgl@sss.pgh.pa.us 1206 [ # # ]:UBC 0 : elog(ERROR, "too many LWLocks taken");
1207 : :
1208 : : /*
1209 : : * Lock out cancel/die interrupts until we exit the code section protected
1210 : : * by the LWLock. This ensures that interrupts will not interfere with
1211 : : * manipulations of data structures in shared memory.
1212 : : */
8795 tgl@sss.pgh.pa.us 1213 :CBC 230948335 : HOLD_INTERRUPTS();
1214 : :
1215 : : /*
1216 : : * Loop here to try to acquire lock after each time we are signaled by
1217 : : * LWLockRelease.
1218 : : *
1219 : : * NOTE: it might seem better to have LWLockRelease actually grant us the
1220 : : * lock, rather than retrying and possibly having to go back to sleep. But
1221 : : * in practice that is no good because it means a process swap for every
1222 : : * lock acquisition when two or more processes are contending for the same
1223 : : * lock. Since LWLocks are normally used to protect not-very-long
1224 : : * sections of computation, a process needs to be able to acquire and
1225 : : * release the same lock many times during a single CPU time slice, even
1226 : : * in the presence of contention. The efficiency of being able to do that
1227 : : * outweighs the inefficiency of sometimes wasting a process dispatch
1228 : : * cycle because the lock is not free when a released waiter finally gets
1229 : : * to run. See pgsql-hackers archives for 29-Dec-01.
1230 : : */
1231 : : for (;;)
8704 bruce@momjian.us 1232 : 100985 : {
1233 : : bool mustwait;
1234 : :
1235 : : /*
1236 : : * Try to grab the lock the first time, we're not in the waitqueue
1237 : : * yet/anymore.
1238 : : */
3960 andres@anarazel.de 1239 : 231049320 : mustwait = LWLockAttemptLock(lock, mode);
1240 : :
8695 tgl@sss.pgh.pa.us 1241 [ + + ]: 231049320 : if (!mustwait)
1242 : : {
1243 : : LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1244 : 230929202 : break; /* got the lock */
1245 : : }
1246 : :
1247 : : /*
1248 : : * Ok, at this point we couldn't grab the lock on the first try. We
1249 : : * cannot simply queue ourselves to the end of the list and wait to be
1250 : : * woken up because by now the lock could long have been released.
1251 : : * Instead add us to the queue and try to grab the lock again. If we
1252 : : * succeed we need to revert the queuing and be happy, otherwise we
1253 : : * recheck the lock. If we still couldn't grab it, we know that the
1254 : : * other locker will see our queue entries when releasing since they
1255 : : * existed before we checked for the lock.
1256 : : */
1257 : :
1258 : : /* add to the queue */
3960 andres@anarazel.de 1259 : 120118 : LWLockQueueSelf(lock, mode);
1260 : :
1261 : : /* we're now guaranteed to be woken up if necessary */
1262 : 120118 : mustwait = LWLockAttemptLock(lock, mode);
1263 : :
1264 : : /* ok, grabbed the lock the second time round, need to undo queueing */
1265 [ + + ]: 120118 : if (!mustwait)
1266 : : {
1267 : : LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1268 : :
1269 : 19133 : LWLockDequeueSelf(lock);
1270 : 19133 : break;
1271 : : }
1272 : :
1273 : : /*
1274 : : * Wait until awakened.
1275 : : *
1276 : : * It is possible that we get awakened for a reason other than being
1277 : : * signaled by LWLockRelease. If so, loop back and wait again. Once
1278 : : * we've gotten the LWLock, re-increment the sema by the number of
1279 : : * additional signals received.
1280 : : */
1281 : : LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1282 : :
1283 : : #ifdef LWLOCK_STATS
1284 : : lwstats->block_count++;
1285 : : #endif
1286 : :
3519 rhaas@postgresql.org 1287 : 100985 : LWLockReportWaitStart(lock);
1288 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1289 : : TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1290 : :
1291 : : for (;;)
1292 : : {
3242 tgl@sss.pgh.pa.us 1293 : 100985 : PGSemaphoreLock(proc->sem);
1073 andres@anarazel.de 1294 [ + - ]: 100985 : if (proc->lwWaiting == LW_WS_NOT_WAITING)
8795 tgl@sss.pgh.pa.us 1295 : 100985 : break;
8795 tgl@sss.pgh.pa.us 1296 :UBC 0 : extraWaits++;
1297 : : }
1298 : :
1299 : : /* Retrying, allow LWLockRelease to release waiters again. */
3960 andres@anarazel.de 1300 :CBC 100985 : pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_RELEASE_OK);
1301 : :
1302 : : #ifdef LOCK_DEBUG
1303 : : {
1304 : : /* not waiting anymore */
1305 : : uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1306 : :
1307 : : Assert(nwaiters < MAX_BACKENDS);
1308 : : }
1309 : : #endif
1310 : :
1311 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1312 : : TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
3519 rhaas@postgresql.org 1313 : 100985 : LWLockReportWaitEnd();
1314 : :
1315 : : LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1316 : :
1317 : : /* Now loop back and try to acquire lock again. */
4239 heikki.linnakangas@i 1318 : 100985 : result = false;
1319 : : }
1320 : :
1321 : : if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1322 : : TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
1323 : :
1324 : : /* Add lock to list of locks held by this backend */
3960 andres@anarazel.de 1325 : 230948335 : held_lwlocks[num_held_lwlocks].lock = lock;
1326 : 230948335 : held_lwlocks[num_held_lwlocks++].mode = mode;
1327 : :
1328 : : /*
1329 : : * Fix the process wait semaphore's count for any absorbed wakeups.
1330 : : */
8695 tgl@sss.pgh.pa.us 1331 [ - + ]: 230948335 : while (extraWaits-- > 0)
3242 tgl@sss.pgh.pa.us 1332 :UBC 0 : PGSemaphoreUnlock(proc->sem);
1333 : :
4239 heikki.linnakangas@i 1334 :CBC 230948335 : return result;
1335 : : }
1336 : :
1337 : : /*
1338 : : * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
1339 : : *
1340 : : * If the lock is not available, return false with no side-effects.
1341 : : *
1342 : : * If successful, cancel/die interrupts are held off until lock release.
1343 : : */
1344 : : bool
4054 rhaas@postgresql.org 1345 : 1906735 : LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
1346 : : {
1347 : : bool mustwait;
1348 : :
1096 peter@eisentraut.org 1349 [ + + - + ]: 1906735 : Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1350 : :
1351 : : PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1352 : :
1353 : : /* Ensure we will have room to remember the lock */
7508 tgl@sss.pgh.pa.us 1354 [ - + ]: 1906735 : if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
7508 tgl@sss.pgh.pa.us 1355 [ # # ]:UBC 0 : elog(ERROR, "too many LWLocks taken");
1356 : :
1357 : : /*
1358 : : * Lock out cancel/die interrupts until we exit the code section protected
1359 : : * by the LWLock. This ensures that interrupts will not interfere with
1360 : : * manipulations of data structures in shared memory.
1361 : : */
8795 tgl@sss.pgh.pa.us 1362 :CBC 1906735 : HOLD_INTERRUPTS();
1363 : :
1364 : : /* Check for the lock */
3960 andres@anarazel.de 1365 : 1906735 : mustwait = LWLockAttemptLock(lock, mode);
1366 : :
8795 tgl@sss.pgh.pa.us 1367 [ + + ]: 1906735 : if (mustwait)
1368 : : {
1369 : : /* Failed to get lock, so release interrupt holdoff */
1370 [ - + ]: 1211 : RESUME_INTERRUPTS();
1371 : :
1372 : : LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1373 : : if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1374 : : TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
1375 : : }
1376 : : else
1377 : : {
1378 : : /* Add lock to list of locks held by this backend */
3960 andres@anarazel.de 1379 : 1905524 : held_lwlocks[num_held_lwlocks].lock = lock;
1380 : 1905524 : held_lwlocks[num_held_lwlocks++].mode = mode;
1381 : : if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1382 : : TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
1383 : : }
8795 tgl@sss.pgh.pa.us 1384 : 1906735 : return !mustwait;
1385 : : }
1386 : :
1387 : : /*
1388 : : * LWLockAcquireOrWait - Acquire lock, or wait until it's free
1389 : : *
1390 : : * The semantics of this function are a bit funky. If the lock is currently
1391 : : * free, it is acquired in the given mode, and the function returns true. If
1392 : : * the lock isn't immediately free, the function waits until it is released
1393 : : * and returns false, but does not acquire the lock.
1394 : : *
1395 : : * This is currently used for WALWriteLock: when a backend flushes the WAL,
1396 : : * holding WALWriteLock, it can flush the commit records of many other
1397 : : * backends as a side-effect. Those other backends need to wait until the
1398 : : * flush finishes, but don't need to acquire the lock anymore. They can just
1399 : : * wake up, observe that their records have already been flushed, and return.
1400 : : */
1401 : : bool
4054 rhaas@postgresql.org 1402 : 131321 : LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
1403 : : {
5020 heikki.linnakangas@i 1404 : 131321 : PGPROC *proc = MyProc;
1405 : : bool mustwait;
1406 : 131321 : int extraWaits = 0;
1407 : : #ifdef LWLOCK_STATS
1408 : : lwlock_stats *lwstats;
1409 : :
1410 : : lwstats = get_lwlock_stats_entry(lock);
1411 : : #endif
1412 : :
3960 andres@anarazel.de 1413 [ + - - + ]: 131321 : Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1414 : :
1415 : : PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1416 : :
1417 : : /* Ensure we will have room to remember the lock */
5020 heikki.linnakangas@i 1418 [ - + ]: 131321 : if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
5020 heikki.linnakangas@i 1419 [ # # ]:UBC 0 : elog(ERROR, "too many LWLocks taken");
1420 : :
1421 : : /*
1422 : : * Lock out cancel/die interrupts until we exit the code section protected
1423 : : * by the LWLock. This ensures that interrupts will not interfere with
1424 : : * manipulations of data structures in shared memory.
1425 : : */
5020 heikki.linnakangas@i 1426 :CBC 131321 : HOLD_INTERRUPTS();
1427 : :
1428 : : /*
1429 : : * NB: We're using nearly the same twice-in-a-row lock acquisition
1430 : : * protocol as LWLockAcquire(). Check its comments for details.
1431 : : */
3960 andres@anarazel.de 1432 : 131321 : mustwait = LWLockAttemptLock(lock, mode);
1433 : :
5020 heikki.linnakangas@i 1434 [ + + ]: 131321 : if (mustwait)
1435 : : {
3960 andres@anarazel.de 1436 : 2097 : LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
1437 : :
1438 : 2097 : mustwait = LWLockAttemptLock(lock, mode);
1439 : :
1440 [ + + ]: 2097 : if (mustwait)
1441 : : {
1442 : : /*
1443 : : * Wait until awakened. Like in LWLockAcquire, be prepared for
1444 : : * bogus wakeups.
1445 : : */
1446 : : LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1447 : :
1448 : : #ifdef LWLOCK_STATS
1449 : : lwstats->block_count++;
1450 : : #endif
1451 : :
3519 rhaas@postgresql.org 1452 : 2022 : LWLockReportWaitStart(lock);
1453 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1454 : : TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1455 : :
1456 : : for (;;)
1457 : : {
3242 tgl@sss.pgh.pa.us 1458 : 2022 : PGSemaphoreLock(proc->sem);
1073 andres@anarazel.de 1459 [ + - ]: 2022 : if (proc->lwWaiting == LW_WS_NOT_WAITING)
3960 1460 : 2022 : break;
3960 andres@anarazel.de 1461 :UBC 0 : extraWaits++;
1462 : : }
1463 : :
1464 : : #ifdef LOCK_DEBUG
1465 : : {
1466 : : /* not waiting anymore */
1467 : : uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1468 : :
1469 : : Assert(nwaiters < MAX_BACKENDS);
1470 : : }
1471 : : #endif
1472 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1473 : : TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
3519 rhaas@postgresql.org 1474 :CBC 2022 : LWLockReportWaitEnd();
1475 : :
1476 : : LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1477 : : }
1478 : : else
1479 : : {
1480 : : LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1481 : :
1482 : : /*
1483 : : * Got lock in the second attempt, undo queueing. We need to treat
1484 : : * this as having successfully acquired the lock, otherwise we'd
1485 : : * not necessarily wake up people we've prevented from acquiring
1486 : : * the lock.
1487 : : */
3960 andres@anarazel.de 1488 : 75 : LWLockDequeueSelf(lock);
1489 : : }
1490 : : }
1491 : :
1492 : : /*
1493 : : * Fix the process wait semaphore's count for any absorbed wakeups.
1494 : : */
5020 heikki.linnakangas@i 1495 [ - + ]: 131321 : while (extraWaits-- > 0)
3242 tgl@sss.pgh.pa.us 1496 :UBC 0 : PGSemaphoreUnlock(proc->sem);
1497 : :
5020 heikki.linnakangas@i 1498 [ + + ]:CBC 131321 : if (mustwait)
1499 : : {
1500 : : /* Failed to get lock, so release interrupt holdoff */
1501 [ - + ]: 2022 : RESUME_INTERRUPTS();
1502 : : LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1503 : : if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1504 : : TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
1505 : : }
1506 : : else
1507 : : {
1508 : : LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1509 : : /* Add lock to list of locks held by this backend */
3960 andres@anarazel.de 1510 : 129299 : held_lwlocks[num_held_lwlocks].lock = lock;
1511 : 129299 : held_lwlocks[num_held_lwlocks++].mode = mode;
1512 : : if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1513 : : TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
1514 : : }
1515 : :
5020 heikki.linnakangas@i 1516 : 131321 : return !mustwait;
1517 : : }
1518 : :
1519 : : /*
1520 : : * Does the lwlock in its current state need to wait for the variable value to
1521 : : * change?
1522 : : *
1523 : : * If we don't need to wait, and it's because the value of the variable has
1524 : : * changed, store the current value in newval.
1525 : : *
1526 : : * *result is set to true if the lock was free, and false otherwise.
1527 : : */
1528 : : static bool
826 michael@paquier.xyz 1529 : 4061457 : LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
1530 : : uint64 *newval, bool *result)
1531 : : {
1532 : : bool mustwait;
1533 : : uint64 value;
1534 : :
1535 : : /*
1536 : : * Test first to see if it the slot is free right now.
1537 : : *
1538 : : * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1539 : : * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1540 : : * this, so we don't need a memory barrier here as far as the current
1541 : : * usage is concerned. But that might not be safe in general.
1542 : : */
3742 andres@anarazel.de 1543 : 4061457 : mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
1544 : :
1545 [ + + ]: 4061457 : if (!mustwait)
1546 : : {
1547 : 2583311 : *result = true;
1548 : 2583311 : return false;
1549 : : }
1550 : :
1551 : 1478146 : *result = false;
1552 : :
1553 : : /*
1554 : : * Reading this value atomically is safe even on platforms where uint64
1555 : : * cannot be read without observing a torn value.
1556 : : */
826 michael@paquier.xyz 1557 : 1478146 : value = pg_atomic_read_u64(valptr);
1558 : :
3742 andres@anarazel.de 1559 [ + + ]: 1478146 : if (value != oldval)
1560 : : {
1561 : 1219866 : mustwait = false;
1562 : 1219866 : *newval = value;
1563 : : }
1564 : : else
1565 : : {
1566 : 258280 : mustwait = true;
1567 : : }
1568 : :
1569 : 1478146 : return mustwait;
1570 : : }
1571 : :
1572 : : /*
1573 : : * LWLockWaitForVar - Wait until lock is free, or a variable is updated.
1574 : : *
1575 : : * If the lock is held and *valptr equals oldval, waits until the lock is
1576 : : * either freed, or the lock holder updates *valptr by calling
1577 : : * LWLockUpdateVar. If the lock is free on exit (immediately or after
1578 : : * waiting), returns true. If the lock is still held, but *valptr no longer
1579 : : * matches oldval, returns false and sets *newval to the current value in
1580 : : * *valptr.
1581 : : *
1582 : : * Note: this function ignores shared lock holders; if the lock is held
1583 : : * in shared mode, returns 'true'.
1584 : : *
1585 : : * Be aware that LWLockConflictsWithVar() does not include a memory barrier,
1586 : : * hence the caller of this function may want to rely on an explicit barrier or
1587 : : * an implied barrier via spinlock or LWLock to avoid memory ordering issues.
1588 : : */
1589 : : bool
826 michael@paquier.xyz 1590 : 3803177 : LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
1591 : : uint64 *newval)
1592 : : {
4239 heikki.linnakangas@i 1593 : 3803177 : PGPROC *proc = MyProc;
1594 : 3803177 : int extraWaits = 0;
1595 : 3803177 : bool result = false;
1596 : : #ifdef LWLOCK_STATS
1597 : : lwlock_stats *lwstats;
1598 : :
1599 : : lwstats = get_lwlock_stats_entry(lock);
1600 : : #endif
1601 : :
1602 : : PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1603 : :
1604 : : /*
1605 : : * Lock out cancel/die interrupts while we sleep on the lock. There is no
1606 : : * cleanup mechanism to remove us from the wait queue if we got
1607 : : * interrupted.
1608 : : */
1609 : 3803177 : HOLD_INTERRUPTS();
1610 : :
1611 : : /*
1612 : : * Loop here to check the lock's status after each time we are signaled.
1613 : : */
1614 : : for (;;)
1615 : 128769 : {
1616 : : bool mustwait;
1617 : :
3742 andres@anarazel.de 1618 : 3931946 : mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1619 : : &result);
1620 : :
4239 heikki.linnakangas@i 1621 [ + + ]: 3931946 : if (!mustwait)
1622 : 3802435 : break; /* the lock was free or value didn't match */
1623 : :
1624 : : /*
1625 : : * Add myself to wait queue. Note that this is racy, somebody else
1626 : : * could wakeup before we're finished queuing. NB: We're using nearly
1627 : : * the same twice-in-a-row lock acquisition protocol as
1628 : : * LWLockAcquire(). Check its comments for details. The only
1629 : : * difference is that we also have to check the variable's values when
1630 : : * checking the state of the lock.
1631 : : */
3960 andres@anarazel.de 1632 : 129511 : LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
1633 : :
1634 : : /*
1635 : : * Set RELEASE_OK flag, to make sure we get woken up as soon as the
1636 : : * lock is released.
1637 : : */
1638 : 129511 : pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_RELEASE_OK);
1639 : :
1640 : : /*
1641 : : * We're now guaranteed to be woken up if necessary. Recheck the lock
1642 : : * and variables state.
1643 : : */
3742 1644 : 129511 : mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1645 : : &result);
1646 : :
1647 : : /* Ok, no conflict after we queued ourselves. Undo queueing. */
3960 1648 [ + + ]: 129511 : if (!mustwait)
1649 : : {
1650 : : LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1651 : :
1652 : 742 : LWLockDequeueSelf(lock);
1653 : 742 : break;
1654 : : }
1655 : :
1656 : : /*
1657 : : * Wait until awakened.
1658 : : *
1659 : : * It is possible that we get awakened for a reason other than being
1660 : : * signaled by LWLockRelease. If so, loop back and wait again. Once
1661 : : * we've gotten the LWLock, re-increment the sema by the number of
1662 : : * additional signals received.
1663 : : */
1664 : : LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1665 : :
1666 : : #ifdef LWLOCK_STATS
1667 : : lwstats->block_count++;
1668 : : #endif
1669 : :
3519 rhaas@postgresql.org 1670 : 128769 : LWLockReportWaitStart(lock);
1671 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1672 : : TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
1673 : :
1674 : : for (;;)
1675 : : {
3242 tgl@sss.pgh.pa.us 1676 : 128769 : PGSemaphoreLock(proc->sem);
1073 andres@anarazel.de 1677 [ + - ]: 128769 : if (proc->lwWaiting == LW_WS_NOT_WAITING)
4239 heikki.linnakangas@i 1678 : 128769 : break;
4239 heikki.linnakangas@i 1679 :UBC 0 : extraWaits++;
1680 : : }
1681 : :
1682 : : #ifdef LOCK_DEBUG
1683 : : {
1684 : : /* not waiting anymore */
1685 : : uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1686 : :
1687 : : Assert(nwaiters < MAX_BACKENDS);
1688 : : }
1689 : : #endif
1690 : :
1691 : : if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1692 : : TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
3519 rhaas@postgresql.org 1693 :CBC 128769 : LWLockReportWaitEnd();
1694 : :
1695 : : LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1696 : :
1697 : : /* Now loop back and check the status of the lock again. */
1698 : : }
1699 : :
1700 : : /*
1701 : : * Fix the process wait semaphore's count for any absorbed wakeups.
1702 : : */
4239 heikki.linnakangas@i 1703 [ - + ]: 3803177 : while (extraWaits-- > 0)
3242 tgl@sss.pgh.pa.us 1704 :UBC 0 : PGSemaphoreUnlock(proc->sem);
1705 : :
1706 : : /*
1707 : : * Now okay to allow cancel/die interrupts.
1708 : : */
4239 heikki.linnakangas@i 1709 [ - + ]:CBC 3803177 : RESUME_INTERRUPTS();
1710 : :
1711 : 3803177 : return result;
1712 : : }
1713 : :
1714 : :
1715 : : /*
1716 : : * LWLockUpdateVar - Update a variable and wake up waiters atomically
1717 : : *
1718 : : * Sets *valptr to 'val', and wakes up all processes waiting for us with
1719 : : * LWLockWaitForVar(). It first sets the value atomically and then wakes up
1720 : : * waiting processes so that any process calling LWLockWaitForVar() on the same
1721 : : * lock is guaranteed to see the new value, and act accordingly.
1722 : : *
1723 : : * The caller must be holding the lock in exclusive mode.
1724 : : */
1725 : : void
826 michael@paquier.xyz 1726 : 2284934 : LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
1727 : : {
1728 : : proclist_head wakeup;
1729 : : proclist_mutable_iter iter;
1730 : :
1731 : : PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1732 : :
1733 : : /*
1734 : : * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1735 : : * that the variable is updated before waking up waiters.
1736 : : */
1737 : 2284934 : pg_atomic_exchange_u64(valptr, val);
1738 : :
3361 rhaas@postgresql.org 1739 : 2284934 : proclist_init(&wakeup);
1740 : :
3488 andres@anarazel.de 1741 : 2284934 : LWLockWaitListLock(lock);
1742 : :
3960 1743 [ - + ]: 2284934 : Assert(pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE);
1744 : :
1745 : : /*
1746 : : * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1747 : : * up. They are always in the front of the queue.
1748 : : */
3361 rhaas@postgresql.org 1749 [ + + + + ]: 2285439 : proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1750 : : {
1751 : 51906 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
1752 : :
3960 andres@anarazel.de 1753 [ + + ]: 51906 : if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1754 : 51401 : break;
1755 : :
3361 rhaas@postgresql.org 1756 : 505 : proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1757 : 505 : proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1758 : :
1759 : : /* see LWLockWakeup() */
1073 andres@anarazel.de 1760 [ - + ]: 505 : Assert(waiter->lwWaiting == LW_WS_WAITING);
1761 [ + + ]: 505 : waiter->lwWaiting = LW_WS_PENDING_WAKEUP;
1762 : : }
1763 : :
1764 : : /* We are done updating shared state of the lock itself. */
3488 1765 : 2284934 : LWLockWaitListUnlock(lock);
1766 : :
1767 : : /*
1768 : : * Awaken any waiters I removed from the queue.
1769 : : */
3361 rhaas@postgresql.org 1770 [ + + + + : 2285439 : proclist_foreach_modify(iter, &wakeup, lwWaitLink)
+ + ]
1771 : : {
1772 : 505 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
1773 : :
1774 : 505 : proclist_delete(&wakeup, iter.cur, lwWaitLink);
1775 : : /* check comment in LWLockWakeup() about this barrier */
3966 andres@anarazel.de 1776 : 505 : pg_write_barrier();
1073 1777 : 505 : waiter->lwWaiting = LW_WS_NOT_WAITING;
3242 tgl@sss.pgh.pa.us 1778 : 505 : PGSemaphoreUnlock(waiter->sem);
1779 : : }
4239 heikki.linnakangas@i 1780 : 2284934 : }
1781 : :
1782 : :
1783 : : /*
1784 : : * Stop treating lock as held by current backend.
1785 : : *
1786 : : * This is the code that can be shared between actually releasing a lock
1787 : : * (LWLockRelease()) and just not tracking ownership of the lock anymore
1788 : : * without releasing the lock (LWLockDisown()).
1789 : : *
1790 : : * Returns the mode in which the lock was held by the current backend.
1791 : : *
1792 : : * NB: This does not call RESUME_INTERRUPTS(), but leaves that responsibility
1793 : : * of the caller.
1794 : : *
1795 : : * NB: This will leave lock->owner pointing to the current backend (if
1796 : : * LOCK_DEBUG is set). This is somewhat intentional, as it makes it easier to
1797 : : * debug cases of missing wakeups during lock release.
1798 : : */
1799 : : static inline LWLockMode
249 andres@anarazel.de 1800 : 232983158 : LWLockDisownInternal(LWLock *lock)
1801 : : {
1802 : : LWLockMode mode;
1803 : : int i;
1804 : :
1805 : : /*
1806 : : * Remove lock from list of locks held. Usually, but not always, it will
1807 : : * be the latest-acquired lock; so search array backwards.
1808 : : */
8769 bruce@momjian.us 1809 [ + - ]: 258789627 : for (i = num_held_lwlocks; --i >= 0;)
3960 andres@anarazel.de 1810 [ + + ]: 258789627 : if (lock == held_lwlocks[i].lock)
8795 tgl@sss.pgh.pa.us 1811 : 232983158 : break;
1812 : :
1813 [ - + ]: 232983158 : if (i < 0)
3238 rhaas@postgresql.org 1814 [ # # ]:UBC 0 : elog(ERROR, "lock %s is not held", T_NAME(lock));
1815 : :
3248 sfrost@snowman.net 1816 :CBC 232983158 : mode = held_lwlocks[i].mode;
1817 : :
8795 tgl@sss.pgh.pa.us 1818 : 232983158 : num_held_lwlocks--;
1819 [ + + ]: 258789627 : for (; i < num_held_lwlocks; i++)
8769 bruce@momjian.us 1820 : 25806469 : held_lwlocks[i] = held_lwlocks[i + 1];
1821 : :
249 andres@anarazel.de 1822 : 232983158 : return mode;
1823 : : }
1824 : :
1825 : : /*
1826 : : * Helper function to release lock, shared between LWLockRelease() and
1827 : : * LWLockReleaseDisowned().
1828 : : */
1829 : : static void
1830 : 232983158 : LWLockReleaseInternal(LWLock *lock, LWLockMode mode)
1831 : : {
1832 : : uint32 oldstate;
1833 : : bool check_waiters;
1834 : :
1835 : : /*
1836 : : * Release my hold on lock, after that it can immediately be acquired by
1837 : : * others, even if we still have to wakeup other waiters.
1838 : : */
3960 1839 [ + + ]: 232983158 : if (mode == LW_EXCLUSIVE)
1840 : 115207377 : oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_EXCLUSIVE);
1841 : : else
1842 : 117775781 : oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
1843 : :
1844 : : /* nobody else can have that kind of lock */
1845 [ - + ]: 232983158 : Assert(!(oldstate & LW_VAL_EXCLUSIVE));
1846 : :
1847 : : if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1848 : : TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
1849 : :
1850 : : /*
1851 : : * We're still waiting for backends to get scheduled, don't wake them up
1852 : : * again.
1853 : : */
1854 [ + + ]: 232983158 : if ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) ==
1855 : 275506 : (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK) &&
1856 [ + + ]: 275506 : (oldstate & LW_LOCK_MASK) == 0)
1857 : 246879 : check_waiters = true;
1858 : : else
1859 : 232736279 : check_waiters = false;
1860 : :
1861 : : /*
1862 : : * As waking up waiters requires the spinlock to be acquired, only do so
1863 : : * if necessary.
1864 : : */
1865 [ + + ]: 232983158 : if (check_waiters)
1866 : : {
1867 : : /* XXX: remove before commit? */
1868 : : LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1869 : 246879 : LWLockWakeup(lock);
1870 : : }
249 1871 : 232983158 : }
1872 : :
1873 : :
1874 : : /*
1875 : : * Stop treating lock as held by current backend.
1876 : : *
1877 : : * After calling this function it's the callers responsibility to ensure that
1878 : : * the lock gets released (via LWLockReleaseDisowned()), even in case of an
1879 : : * error. This only is desirable if the lock is going to be released in a
1880 : : * different process than the process that acquired it.
1881 : : */
1882 : : void
249 andres@anarazel.de 1883 :UBC 0 : LWLockDisown(LWLock *lock)
1884 : : {
1885 : 0 : LWLockDisownInternal(lock);
1886 : :
1887 [ # # ]: 0 : RESUME_INTERRUPTS();
1888 : 0 : }
1889 : :
1890 : : /*
1891 : : * LWLockRelease - release a previously acquired lock
1892 : : */
1893 : : void
249 andres@anarazel.de 1894 :CBC 232983158 : LWLockRelease(LWLock *lock)
1895 : : {
1896 : : LWLockMode mode;
1897 : :
1898 : 232983158 : mode = LWLockDisownInternal(lock);
1899 : :
1900 : : PRINT_LWDEBUG("LWLockRelease", lock, mode);
1901 : :
1902 : 232983158 : LWLockReleaseInternal(lock, mode);
1903 : :
1904 : : /*
1905 : : * Now okay to allow cancel/die interrupts.
1906 : : */
8795 tgl@sss.pgh.pa.us 1907 [ - + ]: 232983158 : RESUME_INTERRUPTS();
1908 : 232983158 : }
1909 : :
1910 : : /*
1911 : : * Release lock previously disowned with LWLockDisown().
1912 : : */
1913 : : void
249 andres@anarazel.de 1914 :UBC 0 : LWLockReleaseDisowned(LWLock *lock, LWLockMode mode)
1915 : : {
1916 : 0 : LWLockReleaseInternal(lock, mode);
1917 : 0 : }
1918 : :
1919 : : /*
1920 : : * LWLockReleaseClearVar - release a previously acquired lock, reset variable
1921 : : */
1922 : : void
826 michael@paquier.xyz 1923 :CBC 13939067 : LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
1924 : : {
1925 : : /*
1926 : : * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1927 : : * that the variable is updated before releasing the lock.
1928 : : */
1929 : 13939067 : pg_atomic_exchange_u64(valptr, val);
1930 : :
3742 andres@anarazel.de 1931 : 13939067 : LWLockRelease(lock);
1932 : 13939067 : }
1933 : :
1934 : :
1935 : : /*
1936 : : * LWLockReleaseAll - release all currently-held locks
1937 : : *
1938 : : * Used to clean up after ereport(ERROR). An important difference between this
1939 : : * function and retail LWLockRelease calls is that InterruptHoldoffCount is
1940 : : * unchanged by this operation. This is necessary since InterruptHoldoffCount
1941 : : * has been set to an appropriate level earlier in error recovery. We could
1942 : : * decrement it below zero if we allow it to drop for each released lock!
1943 : : */
1944 : : void
8795 tgl@sss.pgh.pa.us 1945 : 53130 : LWLockReleaseAll(void)
1946 : : {
1947 [ + + ]: 53333 : while (num_held_lwlocks > 0)
1948 : : {
1949 : 203 : HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1950 : :
3960 andres@anarazel.de 1951 : 203 : LWLockRelease(held_lwlocks[num_held_lwlocks - 1].lock);
1952 : : }
8795 tgl@sss.pgh.pa.us 1953 : 53130 : }
1954 : :
1955 : :
1956 : : /*
1957 : : * ForEachLWLockHeldByMe - run a callback for each held lock
1958 : : *
1959 : : * This is meant as debug support only.
1960 : : */
1961 : : void
194 noah@leadboat.com 1962 : 103734113 : ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
1963 : : void *context)
1964 : : {
1965 : : int i;
1966 : :
1967 [ + + ]: 103860675 : for (i = 0; i < num_held_lwlocks; i++)
1968 : 126562 : callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
1969 : 103734113 : }
1970 : :
1971 : : /*
1972 : : * LWLockHeldByMe - test whether my process holds a lock in any mode
1973 : : *
1974 : : * This is meant as debug support only.
1975 : : */
1976 : : bool
1135 pg@bowt.ie 1977 : 67437594 : LWLockHeldByMe(LWLock *lock)
1978 : : {
1979 : : int i;
1980 : :
7809 tgl@sss.pgh.pa.us 1981 [ + + ]: 105035598 : for (i = 0; i < num_held_lwlocks; i++)
1982 : : {
1135 pg@bowt.ie 1983 [ + + ]: 49756824 : if (held_lwlocks[i].lock == lock)
7809 tgl@sss.pgh.pa.us 1984 : 12158820 : return true;
1985 : : }
1986 : 55278774 : return false;
1987 : : }
1988 : :
1989 : : /*
1990 : : * LWLockAnyHeldByMe - test whether my process holds any of an array of locks
1991 : : *
1992 : : * This is meant as debug support only.
1993 : : */
1994 : : bool
1135 pg@bowt.ie 1995 : 1171205 : LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
1996 : : {
1997 : : char *held_lock_addr;
1998 : : char *begin;
1999 : : char *end;
2000 : : int i;
2001 : :
2002 : 1171205 : begin = (char *) lock;
1205 tmunro@postgresql.or 2003 : 1171205 : end = begin + nlocks * stride;
2004 [ + + ]: 1174100 : for (i = 0; i < num_held_lwlocks; i++)
2005 : : {
2006 : 2895 : held_lock_addr = (char *) held_lwlocks[i].lock;
2007 [ - + - - ]: 2895 : if (held_lock_addr >= begin &&
1205 tmunro@postgresql.or 2008 :UBC 0 : held_lock_addr < end &&
2009 [ # # ]: 0 : (held_lock_addr - begin) % stride == 0)
2010 : 0 : return true;
2011 : : }
1205 tmunro@postgresql.or 2012 :CBC 1171205 : return false;
2013 : : }
2014 : :
2015 : : /*
2016 : : * LWLockHeldByMeInMode - test whether my process holds a lock in given mode
2017 : : *
2018 : : * This is meant as debug support only.
2019 : : */
2020 : : bool
1135 pg@bowt.ie 2021 : 50214370 : LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
2022 : : {
2023 : : int i;
2024 : :
3340 simon@2ndQuadrant.co 2025 [ + - ]: 56130340 : for (i = 0; i < num_held_lwlocks; i++)
2026 : : {
1135 pg@bowt.ie 2027 [ + + + - ]: 56130340 : if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
3340 simon@2ndQuadrant.co 2028 : 50214370 : return true;
2029 : : }
3340 simon@2ndQuadrant.co 2030 :UBC 0 : return false;
2031 : : }
|