Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * hashpage.c
4 : : * Hash table page management code for the Postgres hash access method
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/hash/hashpage.c
12 : : *
13 : : * NOTES
14 : : * Postgres hash pages look like ordinary relation pages. The opaque
15 : : * data at high addresses includes information about the page including
16 : : * whether a page is an overflow page or a true bucket, the bucket
17 : : * number, and the block numbers of the preceding and following pages
18 : : * in the same bucket.
19 : : *
20 : : * The first page in a hash relation, page zero, is special -- it stores
21 : : * information describing the hash table; it is referred to as the
22 : : * "meta page." Pages one and higher store the actual data.
23 : : *
24 : : * There are also bitmap pages, which are not manipulated here;
25 : : * see hashovfl.c.
26 : : *
27 : : *-------------------------------------------------------------------------
28 : : */
29 : : #include "postgres.h"
30 : :
31 : : #include "access/hash.h"
32 : : #include "access/hash_xlog.h"
33 : : #include "access/xloginsert.h"
34 : : #include "miscadmin.h"
35 : : #include "port/pg_bitutils.h"
36 : : #include "storage/predicate.h"
37 : : #include "storage/smgr.h"
38 : : #include "utils/rel.h"
39 : :
40 : : static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
41 : : uint32 nblocks);
42 : : static void _hash_splitbucket(Relation rel, Buffer metabuf,
43 : : Bucket obucket, Bucket nbucket,
44 : : Buffer obuf,
45 : : Buffer nbuf,
46 : : HTAB *htab,
47 : : uint32 maxbucket,
48 : : uint32 highmask, uint32 lowmask);
49 : : static void log_split_page(Relation rel, Buffer buf);
50 : :
51 : :
52 : : /*
53 : : * _hash_getbuf() -- Get a buffer by block number for read or write.
54 : : *
55 : : * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
56 : : * 'flags' is a bitwise OR of the allowed page types.
57 : : *
58 : : * This must be used only to fetch pages that are expected to be valid
59 : : * already. _hash_checkpage() is applied using the given flags.
60 : : *
61 : : * When this routine returns, the appropriate lock is set on the
62 : : * requested buffer and its reference count has been incremented
63 : : * (ie, the buffer is "locked and pinned").
64 : : *
65 : : * P_NEW is disallowed because this routine can only be used
66 : : * to access pages that are known to be before the filesystem EOF.
67 : : * Extending the index should be done with _hash_getnewbuf.
68 : : */
69 : : Buffer
6942 tgl@sss.pgh.pa.us 70 :CBC 1339030 : _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
71 : : {
72 : : Buffer buf;
73 : :
6956 74 [ - + ]: 1339030 : if (blkno == P_NEW)
6956 tgl@sss.pgh.pa.us 75 [ # # ]:UBC 0 : elog(ERROR, "hash AM does not use P_NEW");
76 : :
8279 tgl@sss.pgh.pa.us 77 :CBC 1339030 : buf = ReadBuffer(rel, blkno);
78 : :
79 [ + + ]: 1339030 : if (access != HASH_NOLOCK)
80 : 848710 : LockBuffer(buf, access);
81 : :
82 : : /* ref count and lock type are correct */
83 : :
6942 84 : 1339030 : _hash_checkpage(rel, buf, flags);
85 : :
86 : 1339030 : return buf;
87 : : }
88 : :
89 : : /*
90 : : * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
91 : : *
92 : : * We read the page and try to acquire a cleanup lock. If we get it,
93 : : * we return the buffer; otherwise, we return InvalidBuffer.
94 : : */
95 : : Buffer
3443 rhaas@postgresql.org 96 : 893 : _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
97 : : {
98 : : Buffer buf;
99 : :
100 [ - + ]: 893 : if (blkno == P_NEW)
3443 rhaas@postgresql.org 101 [ # # ]:UBC 0 : elog(ERROR, "hash AM does not use P_NEW");
102 : :
3443 rhaas@postgresql.org 103 :CBC 893 : buf = ReadBuffer(rel, blkno);
104 : :
105 [ - + ]: 893 : if (!ConditionalLockBufferForCleanup(buf))
106 : : {
3443 rhaas@postgresql.org 107 :UBC 0 : ReleaseBuffer(buf);
108 : 0 : return InvalidBuffer;
109 : : }
110 : :
111 : : /* ref count and lock type are correct */
112 : :
3443 rhaas@postgresql.org 113 :CBC 893 : _hash_checkpage(rel, buf, flags);
114 : :
115 : 893 : return buf;
116 : : }
117 : :
118 : : /*
119 : : * _hash_getinitbuf() -- Get and initialize a buffer by block number.
120 : : *
121 : : * This must be used only to fetch pages that are known to be before
122 : : * the index's filesystem EOF, but are to be filled from scratch.
123 : : * _hash_pageinit() is applied automatically. Otherwise it has
124 : : * effects similar to _hash_getbuf() with access = HASH_WRITE.
125 : : *
126 : : * When this routine returns, a write lock is set on the
127 : : * requested buffer and its reference count has been incremented
128 : : * (ie, the buffer is "locked and pinned").
129 : : *
130 : : * P_NEW is disallowed because this routine can only be used
131 : : * to access pages that are known to be before the filesystem EOF.
132 : : * Extending the index should be done with _hash_getnewbuf.
133 : : */
134 : : Buffer
6942 tgl@sss.pgh.pa.us 135 : 45 : _hash_getinitbuf(Relation rel, BlockNumber blkno)
136 : : {
137 : : Buffer buf;
138 : :
139 [ - + ]: 45 : if (blkno == P_NEW)
6942 tgl@sss.pgh.pa.us 140 [ # # ]:UBC 0 : elog(ERROR, "hash AM does not use P_NEW");
141 : :
4191 heikki.linnakangas@i 142 :CBC 45 : buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
143 : : NULL);
144 : :
145 : : /* ref count and lock type are correct */
146 : :
147 : : /* initialize the page */
3667 kgrittn@postgresql.o 148 : 45 : _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
149 : :
8279 tgl@sss.pgh.pa.us 150 : 45 : return buf;
151 : : }
152 : :
153 : : /*
154 : : * _hash_initbuf() -- Get and initialize a buffer by bucket number.
155 : : */
156 : : void
3346 rhaas@postgresql.org 157 : 5571 : _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
158 : : bool initpage)
159 : : {
160 : : HashPageOpaque pageopaque;
161 : : Page page;
162 : :
163 : 5571 : page = BufferGetPage(buf);
164 : :
165 : : /* initialize the page */
166 [ + + ]: 5571 : if (initpage)
167 : 291 : _hash_pageinit(page, BufferGetPageSize(buf));
168 : :
1495 michael@paquier.xyz 169 : 5571 : pageopaque = HashPageGetOpaque(page);
170 : :
171 : : /*
172 : : * Set hasho_prevblkno with current hashm_maxbucket. This value will be
173 : : * used to validate cached HashMetaPageData. See
174 : : * _hash_getbucketbuf_from_hashkey().
175 : : */
3346 rhaas@postgresql.org 176 : 5571 : pageopaque->hasho_prevblkno = max_bucket;
177 : 5571 : pageopaque->hasho_nextblkno = InvalidBlockNumber;
178 : 5571 : pageopaque->hasho_bucket = num_bucket;
179 : 5571 : pageopaque->hasho_flag = flag;
180 : 5571 : pageopaque->hasho_page_id = HASHO_PAGE_ID;
181 : 5571 : }
182 : :
183 : : /*
184 : : * _hash_getnewbuf() -- Get a new page at the end of the index.
185 : : *
186 : : * This has the same API as _hash_getinitbuf, except that we are adding
187 : : * a page to the index, and hence expect the page to be past the
188 : : * logical EOF. (However, we have to support the case where it isn't,
189 : : * since a prior try might have crashed after extending the filesystem
190 : : * EOF but before updating the metapage to reflect the added page.)
191 : : *
192 : : * It is caller's responsibility to ensure that only one process can
193 : : * extend the index at a time. In practice, this function is called
194 : : * only while holding write lock on the metapage, because adding a page
195 : : * is always associated with an update of metapage data.
196 : : */
197 : : Buffer
5606 198 : 6828 : _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
199 : : {
200 : 6828 : BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
201 : : Buffer buf;
202 : :
6956 tgl@sss.pgh.pa.us 203 [ - + ]: 6828 : if (blkno == P_NEW)
6956 tgl@sss.pgh.pa.us 204 [ # # ]:UBC 0 : elog(ERROR, "hash AM does not use P_NEW");
6956 tgl@sss.pgh.pa.us 205 [ - + ]:CBC 6828 : if (blkno > nblocks)
6956 tgl@sss.pgh.pa.us 206 [ # # ]:UBC 0 : elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
207 : : RelationGetRelationName(rel));
208 : :
209 : : /* smgr insists we explicitly extend the relation */
6956 tgl@sss.pgh.pa.us 210 [ + + ]:CBC 6828 : if (blkno == nblocks)
211 : : {
986 tmunro@postgresql.or 212 : 5935 : buf = ExtendBufferedRel(BMR_REL(rel), forkNum, NULL,
213 : : EB_LOCK_FIRST | EB_SKIP_EXTENSION_LOCK);
6956 tgl@sss.pgh.pa.us 214 [ - + ]: 5935 : if (BufferGetBlockNumber(buf) != blkno)
6956 tgl@sss.pgh.pa.us 215 [ # # ]:UBC 0 : elog(ERROR, "unexpected hash relation size: %u, should be %u",
216 : : BufferGetBlockNumber(buf), blkno);
217 : : }
218 : : else
219 : : {
4191 heikki.linnakangas@i 220 :CBC 893 : buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
221 : : NULL);
222 : : }
223 : :
224 : : /* ref count and lock type are correct */
225 : :
226 : : /* initialize the page */
3667 kgrittn@postgresql.o 227 : 6828 : _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
228 : :
6956 tgl@sss.pgh.pa.us 229 : 6828 : return buf;
230 : : }
231 : :
232 : : /*
233 : : * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
234 : : *
235 : : * This is identical to _hash_getbuf() but also allows a buffer access
236 : : * strategy to be specified. We use this for VACUUM operations.
237 : : */
238 : : Buffer
6915 239 : 796 : _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
240 : : int access, int flags,
241 : : BufferAccessStrategy bstrategy)
242 : : {
243 : : Buffer buf;
244 : :
245 [ - + ]: 796 : if (blkno == P_NEW)
6915 tgl@sss.pgh.pa.us 246 [ # # ]:UBC 0 : elog(ERROR, "hash AM does not use P_NEW");
247 : :
6395 heikki.linnakangas@i 248 :CBC 796 : buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
249 : :
6915 tgl@sss.pgh.pa.us 250 [ + - ]: 796 : if (access != HASH_NOLOCK)
251 : 796 : LockBuffer(buf, access);
252 : :
253 : : /* ref count and lock type are correct */
254 : :
255 : 796 : _hash_checkpage(rel, buf, flags);
256 : :
257 : 796 : return buf;
258 : : }
259 : :
260 : : /*
261 : : * _hash_relbuf() -- release a locked buffer.
262 : : *
263 : : * Lock and pin (refcount) are both dropped.
264 : : */
265 : : void
8279 266 : 787455 : _hash_relbuf(Relation rel, Buffer buf)
267 : : {
7340 268 : 787455 : UnlockReleaseBuffer(buf);
8279 269 : 787455 : }
270 : :
271 : : /*
272 : : * _hash_dropbuf() -- release an unlocked buffer.
273 : : *
274 : : * This is used to unpin a buffer on which we hold no lock.
275 : : */
276 : : void
277 : 560912 : _hash_dropbuf(Relation rel, Buffer buf)
278 : : {
279 : 560912 : ReleaseBuffer(buf);
280 : 560912 : }
281 : :
282 : : /*
283 : : * _hash_dropscanbuf() -- release buffers used in scan.
284 : : *
285 : : * This routine unpins the buffers used during scan on which we
286 : : * hold no lock.
287 : : */
288 : : void
3443 rhaas@postgresql.org 289 : 832 : _hash_dropscanbuf(Relation rel, HashScanOpaque so)
290 : : {
291 : : /* release pin we hold on primary bucket page */
292 [ + + ]: 832 : if (BufferIsValid(so->hashso_bucket_buf) &&
3147 293 [ + + ]: 341 : so->hashso_bucket_buf != so->currPos.buf)
3443 294 : 93 : _hash_dropbuf(rel, so->hashso_bucket_buf);
295 : 832 : so->hashso_bucket_buf = InvalidBuffer;
296 : :
297 : : /* release pin we hold on primary bucket page of bucket being split */
298 [ - + ]: 832 : if (BufferIsValid(so->hashso_split_bucket_buf) &&
3147 rhaas@postgresql.org 299 [ # # ]:UBC 0 : so->hashso_split_bucket_buf != so->currPos.buf)
3443 300 : 0 : _hash_dropbuf(rel, so->hashso_split_bucket_buf);
3443 rhaas@postgresql.org 301 :CBC 832 : so->hashso_split_bucket_buf = InvalidBuffer;
302 : :
303 : : /* release any pin we still hold */
3147 304 [ + + ]: 832 : if (BufferIsValid(so->currPos.buf))
305 : 248 : _hash_dropbuf(rel, so->currPos.buf);
306 : 832 : so->currPos.buf = InvalidBuffer;
307 : :
308 : : /* reset split scan */
3443 309 : 832 : so->hashso_buc_populated = false;
310 : 832 : so->hashso_buc_split = false;
311 : 832 : }
312 : :
313 : :
314 : : /*
315 : : * _hash_init() -- Initialize the metadata page of a hash index,
316 : : * the initial buckets, and the initial bitmap page.
317 : : *
318 : : * The initial number of buckets is dependent on num_tuples, an estimate
319 : : * of the number of tuples to be loaded into the index initially. The
320 : : * chosen number of buckets is returned.
321 : : *
322 : : * We are fairly cavalier about locking here, since we know that no one else
323 : : * could be accessing this index. In particular the rule about not holding
324 : : * multiple buffer locks is ignored.
325 : : */
326 : : uint32
3346 327 : 211 : _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
328 : : {
329 : : Buffer metabuf;
330 : : Buffer buf;
331 : : Buffer bitmapbuf;
332 : : Page pg;
333 : : HashMetaPage metap;
334 : : RegProcedure procid;
335 : : int32 data_width;
336 : : int32 item_width;
337 : : int32 ffactor;
338 : : uint32 num_buckets;
339 : : uint32 i;
340 : : bool use_wal;
341 : :
342 : : /* safety check */
5606 343 [ - + ]: 211 : if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
8324 tgl@sss.pgh.pa.us 344 [ # # ]:UBC 0 : elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
345 : : RelationGetRelationName(rel));
346 : :
347 : : /*
348 : : * WAL log creation of pages if the relation is persistent, or this is the
349 : : * init fork. Init forks for unlogged relations always need to be WAL
350 : : * logged.
351 : : */
3214 rhaas@postgresql.org 352 [ + + + + :CBC 211 : use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
- + - - +
+ ]
353 : :
354 : : /*
355 : : * Determine the target fill factor (in tuples per bucket) for this index.
356 : : * The idea is to make the fill factor correspond to pages about as full
357 : : * as the user-settable fillfactor parameter says. We can compute it
358 : : * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
359 : : */
6441 tgl@sss.pgh.pa.us 360 : 211 : data_width = sizeof(uint32);
7405 361 : 211 : item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
362 : : sizeof(ItemIdData); /* include the line pointer */
2353 michael@paquier.xyz 363 [ + - - + : 211 : ffactor = HashGetTargetPageUsage(rel) / item_width;
+ + ]
364 : : /* keep to a sane range */
8279 tgl@sss.pgh.pa.us 365 [ - + ]: 211 : if (ffactor < 10)
8279 tgl@sss.pgh.pa.us 366 :UBC 0 : ffactor = 10;
367 : :
3169 rhaas@postgresql.org 368 :CBC 211 : procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
369 : :
370 : : /*
371 : : * We initialize the metapage, the first N bucket pages, and the first
372 : : * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
373 : : * calls to occur. This ensures that the smgr level has the right idea of
374 : : * the physical index length.
375 : : *
376 : : * Critical section not required, because on error the creation of the
377 : : * whole relation will be rolled back.
378 : : */
3346 379 : 211 : metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
380 : 211 : _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
381 : 211 : MarkBufferDirty(metabuf);
382 : :
383 : 211 : pg = BufferGetPage(metabuf);
384 : 211 : metap = HashPageGetMeta(pg);
385 : :
386 : : /* XLOG stuff */
3214 387 [ + + ]: 211 : if (use_wal)
388 : : {
389 : : xl_hash_init_meta_page xlrec;
390 : : XLogRecPtr recptr;
391 : :
3339 392 : 165 : xlrec.num_tuples = num_tuples;
393 : 165 : xlrec.procid = metap->hashm_procid;
394 : 165 : xlrec.ffactor = metap->hashm_ffactor;
395 : :
396 : 165 : XLogBeginInsert();
448 peter@eisentraut.org 397 : 165 : XLogRegisterData(&xlrec, SizeOfHashInitMetaPage);
3105 tgl@sss.pgh.pa.us 398 : 165 : XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
399 : :
3339 rhaas@postgresql.org 400 : 165 : recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
401 : :
402 : 165 : PageSetLSN(BufferGetPage(metabuf), recptr);
403 : : }
404 : :
3346 405 : 211 : num_buckets = metap->hashm_maxbucket + 1;
406 : :
407 : : /*
408 : : * Release buffer lock on the metapage while we initialize buckets.
409 : : * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
410 : : * won't accomplish anything. It's a bad idea to hold buffer locks for
411 : : * long intervals in any case, since that can block the bgwriter.
412 : : */
413 : 211 : LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
414 : :
415 : : /*
416 : : * Initialize and WAL Log the first N buckets
417 : : */
418 [ + + ]: 5491 : for (i = 0; i < num_buckets; i++)
419 : : {
420 : : BlockNumber blkno;
421 : :
422 : : /* Allow interrupts, in case N is huge */
423 [ - + ]: 5280 : CHECK_FOR_INTERRUPTS();
424 : :
425 [ + + ]: 5280 : blkno = BUCKET_TO_BLKNO(metap, i);
426 : 5280 : buf = _hash_getnewbuf(rel, blkno, forkNum);
427 : 5280 : _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
428 : 5280 : MarkBufferDirty(buf);
429 : :
3214 430 [ + + ]: 5280 : if (use_wal)
1399 431 : 5134 : log_newpage(&rel->rd_locator,
432 : : forkNum,
433 : : blkno,
434 : : BufferGetPage(buf),
435 : : true);
3346 436 : 5280 : _hash_relbuf(rel, buf);
437 : : }
438 : :
439 : : /* Now reacquire buffer lock on metapage */
440 : 211 : LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
441 : :
442 : : /*
443 : : * Initialize bitmap page
444 : : */
445 : 211 : bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
446 : 211 : _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
447 : 211 : MarkBufferDirty(bitmapbuf);
448 : :
449 : : /* add the new bitmap page to the metapage's list of bitmaps */
450 : : /* metapage already has a write lock */
451 [ - + ]: 211 : if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
3346 rhaas@postgresql.org 452 [ # # ]:UBC 0 : ereport(ERROR,
453 : : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
454 : : errmsg("out of overflow pages in hash index \"%s\"",
455 : : RelationGetRelationName(rel))));
456 : :
3346 rhaas@postgresql.org 457 :CBC 211 : metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
458 : :
459 : 211 : metap->hashm_nmaps++;
460 : 211 : MarkBufferDirty(metabuf);
461 : :
462 : : /* XLOG stuff */
3214 463 [ + + ]: 211 : if (use_wal)
464 : : {
465 : : xl_hash_init_bitmap_page xlrec;
466 : : XLogRecPtr recptr;
467 : :
3339 468 : 165 : xlrec.bmsize = metap->hashm_bmsize;
469 : :
470 : 165 : XLogBeginInsert();
448 peter@eisentraut.org 471 : 165 : XLogRegisterData(&xlrec, SizeOfHashInitBitmapPage);
3339 rhaas@postgresql.org 472 : 165 : XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
473 : :
474 : : /*
475 : : * This is safe only because nobody else can be modifying the index at
476 : : * this stage; it's only visible to the transaction that is creating
477 : : * it.
478 : : */
479 : 165 : XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
480 : :
481 : 165 : recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
482 : :
483 : 165 : PageSetLSN(BufferGetPage(bitmapbuf), recptr);
484 : 165 : PageSetLSN(BufferGetPage(metabuf), recptr);
485 : : }
486 : :
487 : : /* all done */
3346 488 : 211 : _hash_relbuf(rel, bitmapbuf);
489 : 211 : _hash_relbuf(rel, metabuf);
490 : :
491 : 211 : return num_buckets;
492 : : }
493 : :
494 : : /*
495 : : * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
496 : : */
497 : : void
498 : 240 : _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
499 : : uint16 ffactor, bool initpage)
500 : : {
501 : : HashMetaPage metap;
502 : : HashPageOpaque pageopaque;
503 : : Page page;
504 : : double dnumbuckets;
505 : : uint32 num_buckets;
506 : : uint32 spare_index;
507 : : uint32 lshift;
508 : :
509 : : /*
510 : : * Choose the number of initial bucket pages to match the fill factor
511 : : * given the estimated number of tuples. We round up the result to the
512 : : * total number of buckets which has to be allocated before using its
513 : : * hashm_spares element. However always force at least 2 bucket pages. The
514 : : * upper limit is determined by considerations explained in
515 : : * _hash_expandtable().
516 : : */
6625 tgl@sss.pgh.pa.us 517 : 240 : dnumbuckets = num_tuples / ffactor;
518 [ + + ]: 240 : if (dnumbuckets <= 2.0)
519 : 71 : num_buckets = 2;
520 [ - + ]: 169 : else if (dnumbuckets >= (double) 0x40000000)
6625 tgl@sss.pgh.pa.us 521 :UBC 0 : num_buckets = 0x40000000;
522 : : else
3319 rhaas@postgresql.org 523 :CBC 169 : num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
524 : :
525 : 240 : spare_index = _hash_spareindex(num_buckets);
526 [ - + ]: 240 : Assert(spare_index < HASH_MAX_SPLITPOINTS);
527 : :
3346 528 : 240 : page = BufferGetPage(buf);
529 [ + + ]: 240 : if (initpage)
530 : 29 : _hash_pageinit(page, BufferGetPageSize(buf));
531 : :
1495 michael@paquier.xyz 532 : 240 : pageopaque = HashPageGetOpaque(page);
8282 tgl@sss.pgh.pa.us 533 : 240 : pageopaque->hasho_prevblkno = InvalidBlockNumber;
534 : 240 : pageopaque->hasho_nextblkno = InvalidBlockNumber;
1768 peter@eisentraut.org 535 : 240 : pageopaque->hasho_bucket = InvalidBucket;
8281 tgl@sss.pgh.pa.us 536 : 240 : pageopaque->hasho_flag = LH_META_PAGE;
6966 537 : 240 : pageopaque->hasho_page_id = HASHO_PAGE_ID;
538 : :
3346 rhaas@postgresql.org 539 : 240 : metap = HashPageGetMeta(page);
540 : :
10467 bruce@momjian.us 541 : 240 : metap->hashm_magic = HASH_MAGIC;
542 : 240 : metap->hashm_version = HASH_VERSION;
8282 tgl@sss.pgh.pa.us 543 : 240 : metap->hashm_ntuples = 0;
10467 bruce@momjian.us 544 : 240 : metap->hashm_nmaps = 0;
8279 tgl@sss.pgh.pa.us 545 : 240 : metap->hashm_ffactor = ffactor;
3346 rhaas@postgresql.org 546 : 240 : metap->hashm_bsize = HashGetMaxBitmapSize(page);
547 : :
548 : : /* find largest bitmap array size that will fit in page size */
2218 drowley@postgresql.o 549 : 240 : lshift = pg_leftmost_one_pos32(metap->hashm_bsize);
550 [ - + ]: 240 : Assert(lshift > 0);
551 : 240 : metap->hashm_bmsize = 1 << lshift;
552 : 240 : metap->hashm_bmshift = lshift + BYTE_TO_BIT;
8282 tgl@sss.pgh.pa.us 553 [ - + ]: 240 : Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
554 : :
555 : : /*
556 : : * Label the index with its primary hash support function's OID. This is
557 : : * pretty useless for normal operation (in fact, hashm_procid is not used
558 : : * anywhere), but it might be handy for forensic purposes so we keep it.
559 : : */
3346 rhaas@postgresql.org 560 : 240 : metap->hashm_procid = procid;
561 : :
562 : : /*
563 : : * We initialize the index with N buckets, 0 .. N-1, occupying physical
564 : : * blocks 1 to N. The first freespace bitmap page is in block N+1.
565 : : */
3319 566 : 240 : metap->hashm_maxbucket = num_buckets - 1;
567 : :
568 : : /*
569 : : * Set highmask as next immediate ((2 ^ x) - 1), which should be
570 : : * sufficient to cover num_buckets.
571 : : */
2218 drowley@postgresql.o 572 : 240 : metap->hashm_highmask = pg_nextpower2_32(num_buckets + 1) - 1;
3319 rhaas@postgresql.org 573 : 240 : metap->hashm_lowmask = (metap->hashm_highmask >> 1);
574 : :
7664 neilc@samurai.com 575 [ - + - - : 240 : MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
- - - - -
- ]
576 [ - + - - : 240 : MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
- - - - -
- ]
577 : :
578 : : /* Set up mapping for one spare page after the initial splitpoints */
3319 rhaas@postgresql.org 579 : 240 : metap->hashm_spares[spare_index] = 1;
580 : 240 : metap->hashm_ovflpoint = spare_index;
8282 tgl@sss.pgh.pa.us 581 : 240 : metap->hashm_firstfree = 0;
582 : :
583 : : /*
584 : : * Set pd_lower just past the end of the metadata. This is essential,
585 : : * because without doing so, metadata will be lost if xlog.c compresses
586 : : * the page.
587 : : */
3346 rhaas@postgresql.org 588 : 240 : ((PageHeader) page)->pd_lower =
589 : 240 : ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
10892 scrappy@hub.org 590 : 240 : }
591 : :
592 : : /*
593 : : * _hash_pageinit() -- Initialize a new hash index page.
594 : : */
595 : : void
596 : 7385 : _hash_pageinit(Page page, Size size)
597 : : {
10467 bruce@momjian.us 598 : 7385 : PageInit(page, size, sizeof(HashPageOpaqueData));
10892 scrappy@hub.org 599 : 7385 : }
600 : :
601 : : /*
602 : : * Attempt to expand the hash table by creating one new bucket.
603 : : *
604 : : * This will silently do nothing if we don't get cleanup lock on old or
605 : : * new bucket.
606 : : *
607 : : * Complete the pending splits and remove the tuples from old bucket,
608 : : * if there are any left over from the previous split.
609 : : *
610 : : * The caller must hold a pin, but no lock, on the metapage buffer.
611 : : * The buffer is returned in the same state.
612 : : */
613 : : void
614 : 893 : _hash_expandtable(Relation rel, Buffer metabuf)
615 : : {
616 : : HashMetaPage metap;
617 : : Bucket old_bucket;
618 : : Bucket new_bucket;
619 : : uint32 spare_ndx;
620 : : BlockNumber start_oblkno;
621 : : BlockNumber start_nblkno;
622 : : Buffer buf_nblkno;
623 : : Buffer buf_oblkno;
624 : : Page opage;
625 : : Page npage;
626 : : HashPageOpaque oopaque;
627 : : HashPageOpaque nopaque;
628 : : uint32 maxbucket;
629 : : uint32 highmask;
630 : : uint32 lowmask;
3339 rhaas@postgresql.org 631 : 893 : bool metap_update_masks = false;
632 : 893 : bool metap_update_splitpoint = false;
633 : : XLogRecPtr recptr;
634 : :
3443 rhaas@postgresql.org 635 :UBC 0 : restart_expand:
636 : :
637 : : /*
638 : : * Write-lock the meta page. It used to be necessary to acquire a
639 : : * heavyweight lock to begin a split, but that is no longer required.
640 : : */
3420 rhaas@postgresql.org 641 :CBC 893 : LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
642 : :
7485 tgl@sss.pgh.pa.us 643 : 893 : _hash_checkpage(rel, metabuf, LH_META_PAGE);
3667 kgrittn@postgresql.o 644 : 893 : metap = HashPageGetMeta(BufferGetPage(metabuf));
645 : :
646 : : /*
647 : : * Check to see if split is still needed; someone else might have already
648 : : * done one while we waited for the lock.
649 : : *
650 : : * Make sure this stays in sync with _hash_doinsert()
651 : : */
8279 tgl@sss.pgh.pa.us 652 : 893 : if (metap->hashm_ntuples <=
653 [ - + ]: 893 : (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
8279 tgl@sss.pgh.pa.us 654 :UBC 0 : goto fail;
655 : :
656 : : /*
657 : : * Can't split anymore if maxbucket has reached its maximum possible
658 : : * value.
659 : : *
660 : : * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
661 : : * the calculation maxbucket+1 mustn't overflow). Currently we restrict
662 : : * to half that to prevent failure of pg_ceil_log2_32() and insufficient
663 : : * space in hashm_spares[]. It's moot anyway because an index with 2^32
664 : : * buckets would certainly overflow BlockNumber and hence
665 : : * _hash_alloc_buckets() would fail, but if we supported buckets smaller
666 : : * than a disk block then this would be an independent constraint.
667 : : *
668 : : * If you change this, see also the maximum initial number of buckets in
669 : : * _hash_init().
670 : : */
7107 tgl@sss.pgh.pa.us 671 [ - + ]:CBC 893 : if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
7107 tgl@sss.pgh.pa.us 672 :UBC 0 : goto fail;
673 : :
674 : : /*
675 : : * Determine which bucket is to be split, and attempt to take cleanup lock
676 : : * on the old bucket. If we can't get the lock, give up.
677 : : *
678 : : * The cleanup lock protects us not only against other backends, but
679 : : * against our own backend as well.
680 : : *
681 : : * The cleanup lock is mainly to protect the split from concurrent
682 : : * inserts. See src/backend/access/hash/README, Lock Definitions for
683 : : * further details. Due to this locking restriction, if there is any
684 : : * pending scan, the split will give up which is not good, but harmless.
685 : : */
6956 tgl@sss.pgh.pa.us 686 :CBC 893 : new_bucket = metap->hashm_maxbucket + 1;
687 : :
8282 688 : 893 : old_bucket = (new_bucket & metap->hashm_lowmask);
689 : :
8279 690 [ + + ]: 893 : start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
691 : :
3443 rhaas@postgresql.org 692 : 893 : buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
693 [ - + ]: 893 : if (!buf_oblkno)
8279 tgl@sss.pgh.pa.us 694 :UBC 0 : goto fail;
695 : :
3443 rhaas@postgresql.org 696 :CBC 893 : opage = BufferGetPage(buf_oblkno);
1495 michael@paquier.xyz 697 : 893 : oopaque = HashPageGetOpaque(opage);
698 : :
699 : : /*
700 : : * We want to finish the split from a bucket as there is no apparent
701 : : * benefit by not doing so and it will make the code complicated to finish
702 : : * the split that involves multiple buckets considering the case where new
703 : : * split also fails. We don't need to consider the new bucket for
704 : : * completing the split here as it is not possible that a re-split of new
705 : : * bucket starts when there is still a pending split from old bucket.
706 : : */
3443 rhaas@postgresql.org 707 [ - + ]: 893 : if (H_BUCKET_BEING_SPLIT(oopaque))
708 : : {
709 : : /*
710 : : * Copy bucket mapping info now; refer the comment in code below where
711 : : * we copy this information before calling _hash_splitbucket to see
712 : : * why this is okay.
713 : : */
3443 rhaas@postgresql.org 714 :UBC 0 : maxbucket = metap->hashm_maxbucket;
715 : 0 : highmask = metap->hashm_highmask;
716 : 0 : lowmask = metap->hashm_lowmask;
717 : :
718 : : /*
719 : : * Release the lock on metapage and old_bucket, before completing the
720 : : * split.
721 : : */
3420 722 : 0 : LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
723 : 0 : LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
724 : :
3443 725 : 0 : _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
726 : : highmask, lowmask);
727 : :
728 : : /* release the pin on old buffer and retry for expand. */
729 : 0 : _hash_dropbuf(rel, buf_oblkno);
730 : :
731 : 0 : goto restart_expand;
732 : : }
733 : :
734 : : /*
735 : : * Clean the tuples remained from the previous split. This operation
736 : : * requires cleanup lock and we already have one on the old bucket, so
737 : : * let's do it. We also don't want to allow further splits from the bucket
738 : : * till the garbage of previous split is cleaned. This has two
739 : : * advantages; first, it helps in avoiding the bloat due to garbage and
740 : : * second is, during cleanup of bucket, we are always sure that the
741 : : * garbage tuples belong to most recently split bucket. On the contrary,
742 : : * if we allow cleanup of bucket after meta page is updated to indicate
743 : : * the new split and before the actual split, the cleanup operation won't
744 : : * be able to decide whether the tuple has been moved to the newly created
745 : : * bucket and ended up deleting such tuples.
746 : : */
3443 rhaas@postgresql.org 747 [ - + ]:CBC 893 : if (H_NEEDS_SPLIT_CLEANUP(oopaque))
748 : : {
749 : : /*
750 : : * Copy bucket mapping info now; refer to the comment in code below
751 : : * where we copy this information before calling _hash_splitbucket to
752 : : * see why this is okay.
753 : : */
3438 rhaas@postgresql.org 754 :UBC 0 : maxbucket = metap->hashm_maxbucket;
755 : 0 : highmask = metap->hashm_highmask;
756 : 0 : lowmask = metap->hashm_lowmask;
757 : :
758 : : /* Release the metapage lock. */
3420 759 : 0 : LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
760 : :
3443 761 : 0 : hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
762 : : maxbucket, highmask, lowmask, NULL, NULL, true,
763 : : NULL, NULL);
764 : :
765 : 0 : _hash_dropbuf(rel, buf_oblkno);
766 : :
767 : 0 : goto restart_expand;
768 : : }
769 : :
770 : : /*
771 : : * There shouldn't be any active scan on new bucket.
772 : : *
773 : : * Note: it is safe to compute the new bucket's blkno here, even though we
774 : : * may still need to update the BUCKET_TO_BLKNO mapping. This is because
775 : : * the current value of hashm_spares[hashm_ovflpoint] correctly shows
776 : : * where we are going to put a new splitpoint's worth of buckets.
777 : : */
6956 tgl@sss.pgh.pa.us 778 [ + - ]:CBC 893 : start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
779 : :
780 : : /*
781 : : * If the split point is increasing we need to allocate a new batch of
782 : : * bucket pages.
783 : : */
3319 rhaas@postgresql.org 784 : 893 : spare_ndx = _hash_spareindex(new_bucket + 1);
6956 tgl@sss.pgh.pa.us 785 [ + + ]: 893 : if (spare_ndx > metap->hashm_ovflpoint)
786 : : {
787 : : uint32 buckets_to_add;
788 : :
789 [ - + ]: 39 : Assert(spare_ndx == metap->hashm_ovflpoint + 1);
790 : :
791 : : /*
792 : : * We treat allocation of buckets as a separate WAL-logged action.
793 : : * Even if we fail after this operation, won't leak bucket pages;
794 : : * rather, the next split will consume this space. In any case, even
795 : : * without failure we don't use all the space in one split operation.
796 : : */
3319 rhaas@postgresql.org 797 : 39 : buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
798 [ - + ]: 39 : if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
799 : : {
800 : : /* can't split due to BlockNumber overflow */
3443 rhaas@postgresql.org 801 :UBC 0 : _hash_relbuf(rel, buf_oblkno);
6956 tgl@sss.pgh.pa.us 802 : 0 : goto fail;
803 : : }
804 : : }
805 : :
806 : : /*
807 : : * Physically allocate the new bucket's primary page. We want to do this
808 : : * before changing the metapage's mapping info, in case we can't get the
809 : : * disk space.
810 : : *
811 : : * XXX It doesn't make sense to call _hash_getnewbuf first, zeroing the
812 : : * buffer, and then only afterwards check whether we have a cleanup lock.
813 : : * However, since no scan can be accessing the buffer yet, any concurrent
814 : : * accesses will just be from processes like the bgwriter or checkpointer
815 : : * which don't care about its contents, so it doesn't really matter.
816 : : */
4054 tgl@sss.pgh.pa.us 817 :CBC 893 : buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
3443 rhaas@postgresql.org 818 [ - + ]: 893 : if (!IsBufferCleanupOK(buf_nblkno))
819 : : {
3443 rhaas@postgresql.org 820 :UBC 0 : _hash_relbuf(rel, buf_oblkno);
821 : 0 : _hash_relbuf(rel, buf_nblkno);
822 : 0 : goto fail;
823 : : }
824 : :
825 : : /*
826 : : * Since we are scribbling on the pages in the shared buffers, establish a
827 : : * critical section. Any failure in this next code leaves us with a big
828 : : * problem: the metapage is effectively corrupt but could get written back
829 : : * to disk.
830 : : */
7635 tgl@sss.pgh.pa.us 831 :CBC 893 : START_CRIT_SECTION();
832 : :
833 : : /*
834 : : * Okay to proceed with split. Update the metapage bucket mapping info.
835 : : */
8279 836 : 893 : metap->hashm_maxbucket = new_bucket;
837 : :
8282 838 [ + + ]: 893 : if (new_bucket > metap->hashm_highmask)
839 : : {
840 : : /* Starting a new doubling */
841 : 15 : metap->hashm_lowmask = metap->hashm_highmask;
842 : 15 : metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
3339 rhaas@postgresql.org 843 : 15 : metap_update_masks = true;
844 : : }
845 : :
846 : : /*
847 : : * If the split point is increasing we need to adjust the hashm_spares[]
848 : : * array and hashm_ovflpoint so that future overflow pages will be created
849 : : * beyond this new batch of bucket pages.
850 : : */
9060 tgl@sss.pgh.pa.us 851 [ + + ]: 893 : if (spare_ndx > metap->hashm_ovflpoint)
852 : : {
853 : 39 : metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
854 : 39 : metap->hashm_ovflpoint = spare_ndx;
3339 rhaas@postgresql.org 855 : 39 : metap_update_splitpoint = true;
856 : : }
857 : :
3352 858 : 893 : MarkBufferDirty(metabuf);
859 : :
860 : : /*
861 : : * Copy bucket mapping info now; this saves re-accessing the meta page
862 : : * inside _hash_splitbucket's inner loop. Note that once we drop the
863 : : * split lock, other splits could begin, so these values might be out of
864 : : * date before _hash_splitbucket finishes. That's okay, since all it
865 : : * needs is to tell which of these two buckets to map hashkeys into.
866 : : */
8279 tgl@sss.pgh.pa.us 867 : 893 : maxbucket = metap->hashm_maxbucket;
868 : 893 : highmask = metap->hashm_highmask;
869 : 893 : lowmask = metap->hashm_lowmask;
870 : :
3352 rhaas@postgresql.org 871 : 893 : opage = BufferGetPage(buf_oblkno);
1495 michael@paquier.xyz 872 : 893 : oopaque = HashPageGetOpaque(opage);
873 : :
874 : : /*
875 : : * Mark the old bucket to indicate that split is in progress. (At
876 : : * operation end, we will clear the split-in-progress flag.) Also, for a
877 : : * primary bucket page, hasho_prevblkno stores the number of buckets that
878 : : * existed as of the last split, so we must update that value here.
879 : : */
3352 rhaas@postgresql.org 880 : 893 : oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
881 : 893 : oopaque->hasho_prevblkno = maxbucket;
882 : :
883 : 893 : MarkBufferDirty(buf_oblkno);
884 : :
885 : 893 : npage = BufferGetPage(buf_nblkno);
886 : :
887 : : /*
888 : : * initialize the new bucket's primary page and mark it to indicate that
889 : : * split is in progress.
890 : : */
1495 michael@paquier.xyz 891 : 893 : nopaque = HashPageGetOpaque(npage);
3352 rhaas@postgresql.org 892 : 893 : nopaque->hasho_prevblkno = maxbucket;
893 : 893 : nopaque->hasho_nextblkno = InvalidBlockNumber;
894 : 893 : nopaque->hasho_bucket = new_bucket;
895 : 893 : nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
896 : 893 : nopaque->hasho_page_id = HASHO_PAGE_ID;
897 : :
898 : 893 : MarkBufferDirty(buf_nblkno);
899 : :
900 : : /* XLOG stuff */
3339 901 [ + - - + : 893 : if (RelationNeedsWAL(rel))
- - - - ]
3339 rhaas@postgresql.org 902 :GIC 893 : {
903 : : xl_hash_split_allocate_page xlrec;
904 : :
3339 rhaas@postgresql.org 905 :CBC 893 : xlrec.new_bucket = maxbucket;
906 : 893 : xlrec.old_bucket_flag = oopaque->hasho_flag;
907 : 893 : xlrec.new_bucket_flag = nopaque->hasho_flag;
908 : 893 : xlrec.flags = 0;
909 : :
910 : 893 : XLogBeginInsert();
911 : :
912 : 893 : XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
913 : 893 : XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
914 : 893 : XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
915 : :
916 [ + + ]: 893 : if (metap_update_masks)
917 : : {
918 : 15 : xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS;
448 peter@eisentraut.org 919 : 15 : XLogRegisterBufData(2, &metap->hashm_lowmask, sizeof(uint32));
920 : 15 : XLogRegisterBufData(2, &metap->hashm_highmask, sizeof(uint32));
921 : : }
922 : :
3339 rhaas@postgresql.org 923 [ + + ]: 893 : if (metap_update_splitpoint)
924 : : {
925 : 39 : xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT;
448 peter@eisentraut.org 926 : 39 : XLogRegisterBufData(2, &metap->hashm_ovflpoint,
927 : : sizeof(uint32));
3339 rhaas@postgresql.org 928 : 39 : XLogRegisterBufData(2,
448 peter@eisentraut.org 929 : 39 : &metap->hashm_spares[metap->hashm_ovflpoint],
930 : : sizeof(uint32));
931 : : }
932 : :
933 : 893 : XLogRegisterData(&xlrec, SizeOfHashSplitAllocPage);
934 : :
3339 rhaas@postgresql.org 935 : 893 : recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
936 : : }
937 : : else
44 pg@bowt.ie 938 :UNC 0 : recptr = XLogGetFakeLSN(rel);
939 : :
44 pg@bowt.ie 940 :GNC 893 : PageSetLSN(BufferGetPage(buf_oblkno), recptr);
941 : 893 : PageSetLSN(BufferGetPage(buf_nblkno), recptr);
942 : 893 : PageSetLSN(BufferGetPage(metabuf), recptr);
943 : :
3352 rhaas@postgresql.org 944 [ - + ]:CBC 893 : END_CRIT_SECTION();
945 : :
946 : : /* drop lock, but keep pin */
3420 947 : 893 : LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
948 : :
949 : : /* Relocate records to the new bucket */
4050 tgl@sss.pgh.pa.us 950 : 893 : _hash_splitbucket(rel, metabuf,
951 : : old_bucket, new_bucket,
952 : : buf_oblkno, buf_nblkno, NULL,
953 : : maxbucket, highmask, lowmask);
954 : :
955 : : /* all done, now release the pins on primary buckets. */
3196 rhaas@postgresql.org 956 : 893 : _hash_dropbuf(rel, buf_oblkno);
957 : 893 : _hash_dropbuf(rel, buf_nblkno);
958 : :
8279 tgl@sss.pgh.pa.us 959 : 893 : return;
960 : :
961 : : /* Here if decide not to split or fail to acquire old bucket lock */
8279 tgl@sss.pgh.pa.us 962 :UBC 0 : fail:
963 : :
964 : : /* We didn't write the metapage, so just drop lock */
3420 rhaas@postgresql.org 965 : 0 : LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
966 : : }
967 : :
968 : :
969 : : /*
970 : : * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
971 : : *
972 : : * This does not need to initialize the new bucket pages; we'll do that as
973 : : * each one is used by _hash_expandtable(). But we have to extend the logical
974 : : * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
975 : : * sync with ours, so that we don't get complaints from smgr.
976 : : *
977 : : * We do this by writing a page of zeroes at the end of the splitpoint range.
978 : : * We expect that the filesystem will ensure that the intervening pages read
979 : : * as zeroes too. On many filesystems this "hole" will not be allocated
980 : : * immediately, which means that the index file may end up more fragmented
981 : : * than if we forced it all to be allocated now; but since we don't scan
982 : : * hash indexes sequentially anyway, that probably doesn't matter.
983 : : *
984 : : * XXX It's annoying that this code is executed with the metapage lock held.
985 : : * We need to interlock against _hash_addovflpage() adding a new overflow page
986 : : * concurrently, but it'd likely be better to use LockRelationForExtension
987 : : * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
988 : : * so it may not be worth worrying about.
989 : : *
990 : : * Returns true if successful, or false if allocation failed due to
991 : : * BlockNumber overflow.
992 : : */
993 : : static bool
6956 tgl@sss.pgh.pa.us 994 :CBC 39 : _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
995 : : {
996 : : BlockNumber lastblock;
997 : : PGIOAlignedBlock zerobuf;
998 : : Page page;
999 : : HashPageOpaque ovflopaque;
1000 : :
7107 1001 : 39 : lastblock = firstblock + nblocks - 1;
1002 : :
1003 : : /*
1004 : : * Check for overflow in block number calculation; if so, we cannot extend
1005 : : * the index anymore.
1006 : : */
1007 [ + - - + ]: 39 : if (lastblock < firstblock || lastblock == InvalidBlockNumber)
6956 tgl@sss.pgh.pa.us 1008 :UBC 0 : return false;
1009 : :
2803 tgl@sss.pgh.pa.us 1010 :CBC 39 : page = (Page) zerobuf.data;
1011 : :
1012 : : /*
1013 : : * Initialize the page. Just zeroing the page won't work; see
1014 : : * _hash_freeovflpage for similar usage. We take care to make the special
1015 : : * space valid for the benefit of tools such as pageinspect.
1016 : : */
3339 rhaas@postgresql.org 1017 : 39 : _hash_pageinit(page, BLCKSZ);
1018 : :
1495 michael@paquier.xyz 1019 : 39 : ovflopaque = HashPageGetOpaque(page);
1020 : :
3317 rhaas@postgresql.org 1021 : 39 : ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1022 : 39 : ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1768 peter@eisentraut.org 1023 : 39 : ovflopaque->hasho_bucket = InvalidBucket;
3317 rhaas@postgresql.org 1024 : 39 : ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1025 : 39 : ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1026 : :
3339 1027 [ + - - + : 39 : if (RelationNeedsWAL(rel))
- - - - ]
1399 1028 : 39 : log_newpage(&rel->rd_locator,
1029 : : MAIN_FORKNUM,
1030 : : lastblock,
1031 : : zerobuf.data,
1032 : : true);
1033 : :
39 andres@anarazel.de 1034 :GNC 39 : PageSetChecksum(page, lastblock);
1758 tgl@sss.pgh.pa.us 1035 :CBC 39 : smgrextend(RelationGetSmgr(rel), MAIN_FORKNUM, lastblock, zerobuf.data,
1036 : : false);
1037 : :
6956 1038 : 39 : return true;
1039 : : }
1040 : :
1041 : :
1042 : : /*
1043 : : * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
1044 : : *
1045 : : * This routine is used to partition the tuples between old and new bucket and
1046 : : * is used to finish the incomplete split operations. To finish the previously
1047 : : * interrupted split operation, the caller needs to fill htab. If htab is set,
1048 : : * then we skip the movement of tuples that exists in htab, otherwise NULL
1049 : : * value of htab indicates movement of all the tuples that belong to the new
1050 : : * bucket.
1051 : : *
1052 : : * We are splitting a bucket that consists of a base bucket page and zero
1053 : : * or more overflow (bucket chain) pages. We must relocate tuples that
1054 : : * belong in the new bucket.
1055 : : *
1056 : : * The caller must hold cleanup locks on both buckets to ensure that
1057 : : * no one else is trying to access them (see README).
1058 : : *
1059 : : * The caller must hold a pin, but no lock, on the metapage buffer.
1060 : : * The buffer is returned in the same state. (The metapage is only
1061 : : * touched if it becomes necessary to add or remove overflow pages.)
1062 : : *
1063 : : * Split needs to retain pin on primary bucket pages of both old and new
1064 : : * buckets till end of operation. This is to prevent vacuum from starting
1065 : : * while a split is in progress.
1066 : : *
1067 : : * In addition, the caller must have created the new bucket's base page,
1068 : : * which is passed in buffer nbuf, pinned and write-locked. The lock will be
1069 : : * released here and pin must be released by the caller. (The API is set up
1070 : : * this way because we must do _hash_getnewbuf() before releasing the metapage
1071 : : * write lock. So instead of passing the new bucket's start block number, we
1072 : : * pass an actual buffer.)
1073 : : */
1074 : : static void
8282 1075 : 893 : _hash_splitbucket(Relation rel,
1076 : : Buffer metabuf,
1077 : : Bucket obucket,
1078 : : Bucket nbucket,
1079 : : Buffer obuf,
1080 : : Buffer nbuf,
1081 : : HTAB *htab,
1082 : : uint32 maxbucket,
1083 : : uint32 highmask,
1084 : : uint32 lowmask)
1085 : : {
1086 : : Buffer bucket_obuf;
1087 : : Buffer bucket_nbuf;
1088 : : Page opage;
1089 : : Page npage;
1090 : : HashPageOpaque oopaque;
1091 : : HashPageOpaque nopaque;
1092 : : OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1093 : : IndexTuple itups[MaxIndexTuplesPerPage];
3339 rhaas@postgresql.org 1094 : 893 : Size all_tups_size = 0;
1095 : : int i;
1096 : 893 : uint16 nitups = 0;
1097 : : XLogRecPtr recptr;
1098 : :
3443 1099 : 893 : bucket_obuf = obuf;
1100 : 893 : opage = BufferGetPage(obuf);
1495 michael@paquier.xyz 1101 : 893 : oopaque = HashPageGetOpaque(opage);
1102 : :
3443 rhaas@postgresql.org 1103 : 893 : bucket_nbuf = nbuf;
1104 : 893 : npage = BufferGetPage(nbuf);
1495 michael@paquier.xyz 1105 : 893 : nopaque = HashPageGetOpaque(npage);
1106 : :
1107 : : /* Copy the predicate locks from old bucket to new bucket. */
2950 teodor@sigaev.ru 1108 : 893 : PredicateLockPageSplit(rel,
1109 : : BufferGetBlockNumber(bucket_obuf),
1110 : : BufferGetBlockNumber(bucket_nbuf));
1111 : :
1112 : : /*
1113 : : * Partition the tuples in the old bucket between the old bucket and the
1114 : : * new bucket, advancing along the old bucket's overflow bucket chain and
1115 : : * adding overflow pages to the new bucket as needed. Outer loop iterates
1116 : : * once per page in old bucket.
1117 : : */
1118 : : for (;;)
10467 bruce@momjian.us 1119 : 227 : {
1120 : : BlockNumber oblkno;
1121 : : OffsetNumber ooffnum;
1122 : : OffsetNumber omaxoffnum;
1123 : :
1124 : : /* Scan each tuple in old page */
6029 tgl@sss.pgh.pa.us 1125 : 1120 : omaxoffnum = PageGetMaxOffsetNumber(opage);
1126 : 1120 : for (ooffnum = FirstOffsetNumber;
1127 [ + + ]: 206380 : ooffnum <= omaxoffnum;
1128 : 205260 : ooffnum = OffsetNumberNext(ooffnum))
1129 : : {
1130 : : IndexTuple itup;
1131 : : Size itemsz;
1132 : : Bucket bucket;
3443 rhaas@postgresql.org 1133 : 205260 : bool found = false;
1134 : :
1135 : : /* skip dead tuples */
3465 1136 [ - + ]: 205260 : if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
3465 rhaas@postgresql.org 1137 :UBC 0 : continue;
1138 : :
1139 : : /*
1140 : : * Before inserting a tuple, probe the hash table containing TIDs
1141 : : * of tuples belonging to new bucket, if we find a match, then
1142 : : * skip that tuple, else fetch the item's hash key (conveniently
1143 : : * stored in the item) and determine which bucket it now belongs
1144 : : * in.
1145 : : */
6029 tgl@sss.pgh.pa.us 1146 :CBC 205260 : itup = (IndexTuple) PageGetItem(opage,
1147 : 205260 : PageGetItemId(opage, ooffnum));
1148 : :
3443 rhaas@postgresql.org 1149 [ - + ]: 205260 : if (htab)
3443 rhaas@postgresql.org 1150 :UBC 0 : (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1151 : :
3443 rhaas@postgresql.org 1152 [ - + ]:CBC 205260 : if (found)
3443 rhaas@postgresql.org 1153 :UBC 0 : continue;
1154 : :
6029 tgl@sss.pgh.pa.us 1155 :CBC 205260 : bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
1156 : : maxbucket, highmask, lowmask);
1157 : :
1158 [ + + ]: 205260 : if (bucket == nbucket)
1159 : : {
1160 : : IndexTuple new_itup;
1161 : :
1162 : : /*
1163 : : * make a copy of index tuple as we have to scribble on it.
1164 : : */
3443 rhaas@postgresql.org 1165 : 84187 : new_itup = CopyIndexTuple(itup);
1166 : :
1167 : : /*
1168 : : * mark the index tuple as moved by split, such tuples are
1169 : : * skipped by scan if there is split in progress for a bucket.
1170 : : */
1171 : 84187 : new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1172 : :
1173 : : /*
1174 : : * insert the tuple into the new bucket. if it doesn't fit on
1175 : : * the current page in the new bucket, we must allocate a new
1176 : : * overflow page and place the tuple on that page instead.
1177 : : */
2988 tgl@sss.pgh.pa.us 1178 : 84187 : itemsz = IndexTupleSize(new_itup);
6029 1179 : 84187 : itemsz = MAXALIGN(itemsz);
1180 : :
3339 rhaas@postgresql.org 1181 [ + + ]: 84187 : if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1182 : : {
1183 : : /*
1184 : : * Change the shared buffer state in critical section,
1185 : : * otherwise any error could make it unrecoverable.
1186 : : */
1187 : 52 : START_CRIT_SECTION();
1188 : :
1189 : 52 : _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
3420 1190 : 52 : MarkBufferDirty(nbuf);
1191 : : /* log the split operation before releasing the lock */
3339 1192 : 52 : log_split_page(rel, nbuf);
1193 : :
1194 [ - + ]: 52 : END_CRIT_SECTION();
1195 : :
1196 : : /* drop lock, but keep pin */
3420 1197 : 52 : LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1198 : :
1199 : : /* be tidy */
3339 1200 [ + + ]: 21216 : for (i = 0; i < nitups; i++)
1201 : 21164 : pfree(itups[i]);
1202 : 52 : nitups = 0;
1203 : 52 : all_tups_size = 0;
1204 : :
1205 : : /* chain to a new overflow page */
1700 michael@paquier.xyz 1206 : 52 : nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf));
3667 kgrittn@postgresql.o 1207 : 52 : npage = BufferGetPage(nbuf);
1495 michael@paquier.xyz 1208 : 52 : nopaque = HashPageGetOpaque(npage);
1209 : : }
1210 : :
3339 rhaas@postgresql.org 1211 : 84187 : itups[nitups++] = new_itup;
1212 : 84187 : all_tups_size += itemsz;
1213 : : }
1214 : : else
1215 : : {
1216 : : /*
1217 : : * the tuple stays on this page, so nothing to do.
1218 : : */
6029 tgl@sss.pgh.pa.us 1219 [ - + ]: 121073 : Assert(bucket == obucket);
1220 : : }
1221 : : }
1222 : :
1223 : 1120 : oblkno = oopaque->hasho_nextblkno;
1224 : :
1225 : : /* retain the pin on the old primary bucket */
3443 rhaas@postgresql.org 1226 [ + + ]: 1120 : if (obuf == bucket_obuf)
3420 1227 : 893 : LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1228 : : else
6029 tgl@sss.pgh.pa.us 1229 : 227 : _hash_relbuf(rel, obuf);
1230 : :
1231 : : /* Exit loop if no more overflow pages in old bucket */
1232 [ + + ]: 1120 : if (!BlockNumberIsValid(oblkno))
1233 : : {
1234 : : /*
1235 : : * Change the shared buffer state in critical section, otherwise
1236 : : * any error could make it unrecoverable.
1237 : : */
3339 rhaas@postgresql.org 1238 : 893 : START_CRIT_SECTION();
1239 : :
1240 : 893 : _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
3352 1241 : 893 : MarkBufferDirty(nbuf);
1242 : : /* log the split operation before releasing the lock */
3339 1243 : 893 : log_split_page(rel, nbuf);
1244 : :
1245 [ - + ]: 893 : END_CRIT_SECTION();
1246 : :
3352 1247 [ + + ]: 893 : if (nbuf == bucket_nbuf)
1248 : 889 : LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1249 : : else
1250 : 4 : _hash_relbuf(rel, nbuf);
1251 : :
1252 : : /* be tidy */
3339 1253 [ + + ]: 63916 : for (i = 0; i < nitups; i++)
1254 : 63023 : pfree(itups[i]);
6029 tgl@sss.pgh.pa.us 1255 : 893 : break;
1256 : : }
1257 : :
1258 : : /* Else, advance to next old page */
3443 rhaas@postgresql.org 1259 : 227 : obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
3667 kgrittn@postgresql.o 1260 : 227 : opage = BufferGetPage(obuf);
1495 michael@paquier.xyz 1261 : 227 : oopaque = HashPageGetOpaque(opage);
1262 : : }
1263 : :
1264 : : /*
1265 : : * We're at the end of the old bucket chain, so we're done partitioning
1266 : : * the tuples. Mark the old and new buckets to indicate split is
1267 : : * finished.
1268 : : *
1269 : : * To avoid deadlocks due to locking order of buckets, first lock the old
1270 : : * bucket and then the new bucket.
1271 : : */
3420 rhaas@postgresql.org 1272 : 893 : LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
3443 1273 : 893 : opage = BufferGetPage(bucket_obuf);
1495 michael@paquier.xyz 1274 : 893 : oopaque = HashPageGetOpaque(opage);
1275 : :
3420 rhaas@postgresql.org 1276 : 893 : LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
3443 1277 : 893 : npage = BufferGetPage(bucket_nbuf);
1495 michael@paquier.xyz 1278 : 893 : nopaque = HashPageGetOpaque(npage);
1279 : :
3339 rhaas@postgresql.org 1280 : 893 : START_CRIT_SECTION();
1281 : :
3443 1282 : 893 : oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1283 : 893 : nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
1284 : :
1285 : : /*
1286 : : * After the split is finished, mark the old bucket to indicate that it
1287 : : * contains deletable tuples. We will clear split-cleanup flag after
1288 : : * deleting such tuples either at the end of split or at the next split
1289 : : * from old bucket or at the time of vacuum.
1290 : : */
1291 : 893 : oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
1292 : :
1293 : : /*
1294 : : * now write the buffers, here we don't release the locks as caller is
1295 : : * responsible to release locks.
1296 : : */
1297 : 893 : MarkBufferDirty(bucket_obuf);
1298 : 893 : MarkBufferDirty(bucket_nbuf);
1299 : :
3339 1300 [ + - - + : 893 : if (RelationNeedsWAL(rel))
- - - - ]
3339 rhaas@postgresql.org 1301 :GIC 893 : {
1302 : : xl_hash_split_complete xlrec;
1303 : :
3339 rhaas@postgresql.org 1304 :CBC 893 : xlrec.old_bucket_flag = oopaque->hasho_flag;
1305 : 893 : xlrec.new_bucket_flag = nopaque->hasho_flag;
1306 : :
1307 : 893 : XLogBeginInsert();
1308 : :
448 peter@eisentraut.org 1309 : 893 : XLogRegisterData(&xlrec, SizeOfHashSplitComplete);
1310 : :
3339 rhaas@postgresql.org 1311 : 893 : XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1312 : 893 : XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1313 : :
1314 : 893 : recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1315 : : }
1316 : : else
44 pg@bowt.ie 1317 :UNC 0 : recptr = XLogGetFakeLSN(rel);
1318 : :
44 pg@bowt.ie 1319 :GNC 893 : PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1320 : 893 : PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1321 : :
3339 rhaas@postgresql.org 1322 [ - + ]:CBC 893 : END_CRIT_SECTION();
1323 : :
1324 : : /*
1325 : : * If possible, clean up the old bucket. We might not be able to do this
1326 : : * if someone else has a pin on it, but if not then we can go ahead. This
1327 : : * isn't absolutely necessary, but it reduces bloat; if we don't do it
1328 : : * now, VACUUM will do it eventually, but maybe not until new overflow
1329 : : * pages have been allocated. Note that there's no need to clean up the
1330 : : * new bucket.
1331 : : */
3196 1332 [ + - ]: 893 : if (IsBufferCleanupOK(bucket_obuf))
1333 : : {
1334 : 893 : LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1335 : 893 : hashbucketcleanup(rel, obucket, bucket_obuf,
1336 : : BufferGetBlockNumber(bucket_obuf), NULL,
1337 : : maxbucket, highmask, lowmask, NULL, NULL, true,
1338 : : NULL, NULL);
1339 : : }
1340 : : else
1341 : : {
3196 rhaas@postgresql.org 1342 :UBC 0 : LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1343 : 0 : LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
1344 : : }
3443 rhaas@postgresql.org 1345 :CBC 893 : }
1346 : :
1347 : : /*
1348 : : * _hash_finish_split() -- Finish the previously interrupted split operation
1349 : : *
1350 : : * To complete the split operation, we form the hash table of TIDs in new
1351 : : * bucket which is then used by split operation to skip tuples that are
1352 : : * already moved before the split operation was previously interrupted.
1353 : : *
1354 : : * The caller must hold a pin, but no lock, on the metapage and old bucket's
1355 : : * primary page buffer. The buffers are returned in the same state. (The
1356 : : * metapage is only touched if it becomes necessary to add or remove overflow
1357 : : * pages.)
1358 : : */
1359 : : void
3443 rhaas@postgresql.org 1360 :UBC 0 : _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
1361 : : uint32 maxbucket, uint32 highmask, uint32 lowmask)
1362 : : {
1363 : : HASHCTL hash_ctl;
1364 : : HTAB *tidhtab;
1365 : 0 : Buffer bucket_nbuf = InvalidBuffer;
1366 : : Buffer nbuf;
1367 : : Page npage;
1368 : : BlockNumber nblkno;
1369 : : BlockNumber bucket_nblkno;
1370 : : HashPageOpaque npageopaque;
1371 : : Bucket nbucket;
1372 : : bool found;
1373 : :
1374 : : /* Initialize hash tables used to track TIDs */
1375 : 0 : hash_ctl.keysize = sizeof(ItemPointerData);
1376 : 0 : hash_ctl.entrysize = sizeof(ItemPointerData);
1377 : 0 : hash_ctl.hcxt = CurrentMemoryContext;
1378 : :
1379 : : tidhtab =
1380 : 0 : hash_create("bucket ctids",
1381 : : 256, /* arbitrary initial size */
1382 : : &hash_ctl,
1383 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1384 : :
1385 : 0 : bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1386 : :
1387 : : /*
1388 : : * Scan the new bucket and build hash table of TIDs
1389 : : */
1390 : : for (;;)
1391 : 0 : {
1392 : : OffsetNumber noffnum;
1393 : : OffsetNumber nmaxoffnum;
1394 : :
1395 : 0 : nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1396 : : LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
1397 : :
1398 : : /* remember the primary bucket buffer to acquire cleanup lock on it. */
1399 [ # # ]: 0 : if (nblkno == bucket_nblkno)
1400 : 0 : bucket_nbuf = nbuf;
1401 : :
1402 : 0 : npage = BufferGetPage(nbuf);
1495 michael@paquier.xyz 1403 : 0 : npageopaque = HashPageGetOpaque(npage);
1404 : :
1405 : : /* Scan each tuple in new page */
3443 rhaas@postgresql.org 1406 : 0 : nmaxoffnum = PageGetMaxOffsetNumber(npage);
1407 : 0 : for (noffnum = FirstOffsetNumber;
1408 [ # # ]: 0 : noffnum <= nmaxoffnum;
1409 : 0 : noffnum = OffsetNumberNext(noffnum))
1410 : : {
1411 : : IndexTuple itup;
1412 : :
1413 : : /* Fetch the item's TID and insert it in hash table. */
1414 : 0 : itup = (IndexTuple) PageGetItem(npage,
1415 : 0 : PageGetItemId(npage, noffnum));
1416 : :
1417 : 0 : (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1418 : :
1419 [ # # ]: 0 : Assert(!found);
1420 : : }
1421 : :
1422 : 0 : nblkno = npageopaque->hasho_nextblkno;
1423 : :
1424 : : /*
1425 : : * release our write lock without modifying buffer and ensure to
1426 : : * retain the pin on primary bucket.
1427 : : */
1428 [ # # ]: 0 : if (nbuf == bucket_nbuf)
3420 1429 : 0 : LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1430 : : else
3443 1431 : 0 : _hash_relbuf(rel, nbuf);
1432 : :
1433 : : /* Exit loop if no more overflow pages in new bucket */
1434 [ # # ]: 0 : if (!BlockNumberIsValid(nblkno))
1435 : 0 : break;
1436 : : }
1437 : :
1438 : : /*
1439 : : * Conditionally get the cleanup lock on old and new buckets to perform
1440 : : * the split operation. If we don't get the cleanup locks, silently give
1441 : : * up and next insertion on old bucket will try again to complete the
1442 : : * split.
1443 : : */
1444 [ # # ]: 0 : if (!ConditionalLockBufferForCleanup(obuf))
1445 : : {
1446 : 0 : hash_destroy(tidhtab);
1447 : 0 : return;
1448 : : }
1449 [ # # ]: 0 : if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1450 : : {
3420 1451 : 0 : LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
3443 1452 : 0 : hash_destroy(tidhtab);
1453 : 0 : return;
1454 : : }
1455 : :
1456 : 0 : npage = BufferGetPage(bucket_nbuf);
1495 michael@paquier.xyz 1457 : 0 : npageopaque = HashPageGetOpaque(npage);
3443 rhaas@postgresql.org 1458 : 0 : nbucket = npageopaque->hasho_bucket;
1459 : :
3352 1460 : 0 : _hash_splitbucket(rel, metabuf, obucket,
1461 : : nbucket, obuf, bucket_nbuf, tidhtab,
1462 : : maxbucket, highmask, lowmask);
1463 : :
3196 1464 : 0 : _hash_dropbuf(rel, bucket_nbuf);
3443 1465 : 0 : hash_destroy(tidhtab);
1466 : : }
1467 : :
1468 : : /*
1469 : : * log_split_page() -- Log the split operation
1470 : : *
1471 : : * We log the split operation when the new page in new bucket gets full,
1472 : : * so we log the entire page.
1473 : : *
1474 : : * 'buf' must be locked by the caller which is also responsible for unlocking
1475 : : * it.
1476 : : */
1477 : : static void
3339 rhaas@postgresql.org 1478 :CBC 945 : log_split_page(Relation rel, Buffer buf)
1479 : : {
1480 [ + - - + : 945 : if (RelationNeedsWAL(rel))
- - - - ]
1481 : : {
1482 : : XLogRecPtr recptr;
1483 : :
1484 : 945 : XLogBeginInsert();
1485 : :
1486 : 945 : XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
1487 : :
1488 : 945 : recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1489 : :
1490 : 945 : PageSetLSN(BufferGetPage(buf), recptr);
1491 : : }
1492 : 945 : }
1493 : :
1494 : : /*
1495 : : * _hash_getcachedmetap() -- Returns cached metapage data.
1496 : : *
1497 : : * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
1498 : : * the metapage. If not set, we'll set it before returning if we have to
1499 : : * refresh the cache, and return with a pin but no lock on it; caller is
1500 : : * responsible for releasing the pin.
1501 : : *
1502 : : * We refresh the cache if it's not initialized yet or force_refresh is true.
1503 : : */
1504 : : HashMetaPage
3374 1505 : 491092 : _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
1506 : : {
1507 : : Page page;
1508 : :
1509 [ - + ]: 491092 : Assert(metabuf);
1510 [ + + + + ]: 491092 : if (force_refresh || rel->rd_amcache == NULL)
1511 : : {
3275 bruce@momjian.us 1512 : 742 : char *cache = NULL;
1513 : :
1514 : : /*
1515 : : * It's important that we don't set rd_amcache to an invalid value.
1516 : : * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1517 : : * install a pointer to the newly-allocated storage in the actual
1518 : : * relcache entry until both have succeeded.
1519 : : */
3374 rhaas@postgresql.org 1520 [ + + ]: 742 : if (rel->rd_amcache == NULL)
1521 : 328 : cache = MemoryContextAlloc(rel->rd_indexcxt,
1522 : : sizeof(HashMetaPageData));
1523 : :
1524 : : /* Read the metapage. */
1525 [ - + ]: 742 : if (BufferIsValid(*metabuf))
3374 rhaas@postgresql.org 1526 :UBC 0 : LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1527 : : else
3374 rhaas@postgresql.org 1528 :CBC 742 : *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1529 : : LH_META_PAGE);
1530 : 742 : page = BufferGetPage(*metabuf);
1531 : :
1532 : : /* Populate the cache. */
1533 [ + + ]: 742 : if (rel->rd_amcache == NULL)
1534 : 328 : rel->rd_amcache = cache;
1535 : 742 : memcpy(rel->rd_amcache, HashPageGetMeta(page),
1536 : : sizeof(HashMetaPageData));
1537 : :
1538 : : /* Release metapage lock, but keep the pin. */
1539 : 742 : LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1540 : : }
1541 : :
1542 : 491092 : return (HashMetaPage) rel->rd_amcache;
1543 : : }
1544 : :
1545 : : /*
1546 : : * _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
1547 : : * hashkey.
1548 : : *
1549 : : * Bucket pages do not move or get removed once they are allocated. This give
1550 : : * us an opportunity to use the previously saved metapage contents to reach
1551 : : * the target bucket buffer, instead of reading from the metapage every time.
1552 : : * This saves one buffer access every time we want to reach the target bucket
1553 : : * buffer, which is very helpful savings in bufmgr traffic and contention.
1554 : : *
1555 : : * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
1556 : : * bucket buffer has to be locked for reading or writing.
1557 : : *
1558 : : * The out parameter cachedmetap is set with metapage contents used for
1559 : : * hashkey to bucket buffer mapping. Some callers need this info to reach the
1560 : : * old bucket in case of bucket split, see _hash_doinsert().
1561 : : */
1562 : : Buffer
1563 : 490644 : _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
1564 : : HashMetaPage *cachedmetap)
1565 : : {
1566 : : HashMetaPage metap;
1567 : : Buffer buf;
1568 : 490644 : Buffer metabuf = InvalidBuffer;
1569 : : Page page;
1570 : : Bucket bucket;
1571 : : BlockNumber blkno;
1572 : : HashPageOpaque opaque;
1573 : :
1574 : : /* We read from target bucket buffer, hence locking is must. */
1575 [ + + - + ]: 490644 : Assert(access == HASH_READ || access == HASH_WRITE);
1576 : :
1577 : 490644 : metap = _hash_getcachedmetap(rel, &metabuf, false);
1578 [ + - ]: 490644 : Assert(metap != NULL);
1579 : :
1580 : : /*
1581 : : * Loop until we get a lock on the correct target bucket.
1582 : : */
1583 : : for (;;)
1584 : : {
1585 : : /*
1586 : : * Compute the target bucket number, and convert to block number.
1587 : : */
1588 : 491058 : bucket = _hash_hashkey2bucket(hashkey,
1589 : : metap->hashm_maxbucket,
1590 : : metap->hashm_highmask,
1591 : : metap->hashm_lowmask);
1592 : :
1593 [ + + ]: 491058 : blkno = BUCKET_TO_BLKNO(metap, bucket);
1594 : :
1595 : : /* Fetch the primary bucket page for the bucket */
1596 : 491058 : buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1597 : 491058 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 1598 : 491058 : opaque = HashPageGetOpaque(page);
3374 rhaas@postgresql.org 1599 [ - + ]: 491058 : Assert(opaque->hasho_bucket == bucket);
3283 1600 [ - + ]: 491058 : Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
1601 : :
1602 : : /*
1603 : : * If this bucket hasn't been split, we're done.
1604 : : */
1605 [ + + ]: 491058 : if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
3374 1606 : 490644 : break;
1607 : :
1608 : : /* Drop lock on this buffer, update cached metapage, and retry. */
1609 : 414 : _hash_relbuf(rel, buf);
1610 : 414 : metap = _hash_getcachedmetap(rel, &metabuf, true);
1611 [ - + ]: 414 : Assert(metap != NULL);
1612 : : }
1613 : :
1614 [ + + ]: 490644 : if (BufferIsValid(metabuf))
1615 : 725 : _hash_dropbuf(rel, metabuf);
1616 : :
1617 [ + + ]: 490644 : if (cachedmetap)
1618 : 490303 : *cachedmetap = metap;
1619 : :
1620 : 490644 : return buf;
1621 : : }
|