Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nbtpage.c
4 : : * BTree-specific page management code for the Postgres btree access
5 : : * method.
6 : : *
7 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : : * Portions Copyright (c) 1994, Regents of the University of California
9 : : *
10 : : *
11 : : * IDENTIFICATION
12 : : * src/backend/access/nbtree/nbtpage.c
13 : : *
14 : : * NOTES
15 : : * Postgres btree pages look like ordinary relation pages. The opaque
16 : : * data at high addresses includes pointers to left and right siblings
17 : : * and flag data describing page state. The first page in a btree, page
18 : : * zero, is special -- it stores meta-information describing the tree.
19 : : * Pages one and higher store the actual tree data.
20 : : *
21 : : *-------------------------------------------------------------------------
22 : : */
23 : : #include "postgres.h"
24 : :
25 : : #include "access/nbtree.h"
26 : : #include "access/nbtxlog.h"
27 : : #include "access/tableam.h"
28 : : #include "access/transam.h"
29 : : #include "access/xlog.h"
30 : : #include "access/xloginsert.h"
31 : : #include "common/int.h"
32 : : #include "miscadmin.h"
33 : : #include "storage/indexfsm.h"
34 : : #include "storage/predicate.h"
35 : : #include "storage/procarray.h"
36 : : #include "utils/injection_point.h"
37 : : #include "utils/memdebug.h"
38 : : #include "utils/memutils.h"
39 : : #include "utils/snapmgr.h"
40 : :
41 : : static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf);
42 : : static void _bt_delitems_delete(Relation rel, Buffer buf,
43 : : TransactionId snapshotConflictHorizon,
44 : : bool isCatalogRel,
45 : : OffsetNumber *deletable, int ndeletable,
46 : : BTVacuumPosting *updatable, int nupdatable);
47 : : static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
48 : : OffsetNumber *updatedoffsets,
49 : : Size *updatedbuflen, bool needswal);
50 : : static bool _bt_mark_page_halfdead(Relation rel, Relation heaprel,
51 : : Buffer leafbuf, BTStack stack);
52 : : static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
53 : : BlockNumber scanblkno,
54 : : bool *rightsib_empty,
55 : : BTVacState *vstate);
56 : : static bool _bt_lock_subtree_parent(Relation rel, Relation heaprel,
57 : : BlockNumber child, BTStack stack,
58 : : Buffer *subtreeparent, OffsetNumber *poffset,
59 : : BlockNumber *topparent,
60 : : BlockNumber *topparentrightsib);
61 : : static void _bt_pendingfsm_add(BTVacState *vstate, BlockNumber target,
62 : : FullTransactionId safexid);
63 : :
64 : : /*
65 : : * _bt_initmetapage() -- Fill a page buffer with a correct metapage image
66 : : */
67 : : void
2260 pg@bowt.ie 68 :CBC 32491 : _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
69 : : bool allequalimage)
70 : : {
71 : : BTMetaPageData *metad;
72 : : BTPageOpaque metaopaque;
73 : :
8007 tgl@sss.pgh.pa.us 74 : 32491 : _bt_pageinit(page, BLCKSZ);
75 : :
76 : 32491 : metad = BTPageGetMeta(page);
77 : 32491 : metad->btm_magic = BTREE_MAGIC;
78 : 32491 : metad->btm_version = BTREE_VERSION;
79 : 32491 : metad->btm_root = rootbknum;
80 : 32491 : metad->btm_level = level;
81 : 32491 : metad->btm_fastroot = rootbknum;
82 : 32491 : metad->btm_fastlevel = level;
1896 pg@bowt.ie 83 : 32491 : metad->btm_last_cleanup_num_delpages = 0;
2953 teodor@sigaev.ru 84 : 32491 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
2260 pg@bowt.ie 85 : 32491 : metad->btm_allequalimage = allequalimage;
86 : :
1495 michael@paquier.xyz 87 : 32491 : metaopaque = BTPageGetOpaque(page);
8007 tgl@sss.pgh.pa.us 88 : 32491 : metaopaque->btpo_flags = BTP_META;
89 : :
90 : : /*
91 : : * Set pd_lower just past the end of the metadata. This is essential,
92 : : * because without doing so, metadata will be lost if xlog.c compresses
93 : : * the page.
94 : : */
7642 95 : 32491 : ((PageHeader) page)->pd_lower =
96 : 32491 : ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
8007 97 : 32491 : }
98 : :
99 : : /*
100 : : * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to version
101 : : * 3, the last version that can be updated without broadly affecting
102 : : * on-disk compatibility. (A REINDEX is required to upgrade to v4.)
103 : : *
104 : : * This routine does purely in-memory image upgrade. Caller is
105 : : * responsible for locking, WAL-logging etc.
106 : : */
107 : : void
2953 teodor@sigaev.ru 108 :UBC 0 : _bt_upgrademetapage(Page page)
109 : : {
110 : : BTMetaPageData *metad;
111 : : BTPageOpaque metaopaque PG_USED_FOR_ASSERTS_ONLY;
112 : :
113 : 0 : metad = BTPageGetMeta(page);
1495 michael@paquier.xyz 114 : 0 : metaopaque = BTPageGetOpaque(page);
115 : :
116 : : /* It must be really a meta page of upgradable version */
2953 teodor@sigaev.ru 117 [ # # ]: 0 : Assert(metaopaque->btpo_flags & BTP_META);
2603 pg@bowt.ie 118 [ # # ]: 0 : Assert(metad->btm_version < BTREE_NOVAC_VERSION);
2953 teodor@sigaev.ru 119 [ # # ]: 0 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
120 : :
121 : : /* Set version number and fill extra fields added into version 3 */
2603 pg@bowt.ie 122 : 0 : metad->btm_version = BTREE_NOVAC_VERSION;
1896 123 : 0 : metad->btm_last_cleanup_num_delpages = 0;
2953 teodor@sigaev.ru 124 : 0 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
125 : : /* Only a REINDEX can set this field */
2260 pg@bowt.ie 126 [ # # ]: 0 : Assert(!metad->btm_allequalimage);
127 : 0 : metad->btm_allequalimage = false;
128 : :
129 : : /* Adjust pd_lower (see _bt_initmetapage() for details) */
2953 teodor@sigaev.ru 130 : 0 : ((PageHeader) page)->pd_lower =
131 : 0 : ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
132 : 0 : }
133 : :
134 : : /*
135 : : * Get metadata from share-locked buffer containing metapage, while performing
136 : : * standard sanity checks.
137 : : *
138 : : * Callers that cache data returned here in local cache should note that an
139 : : * on-the-fly upgrade using _bt_upgrademetapage() can change the version field
140 : : * and BTREE_NOVAC_VERSION specific fields without invalidating local cache.
141 : : */
142 : : static BTMetaPageData *
2603 pg@bowt.ie 143 :CBC 1276996 : _bt_getmeta(Relation rel, Buffer metabuf)
144 : : {
145 : : Page metapg;
146 : : BTPageOpaque metaopaque;
147 : : BTMetaPageData *metad;
148 : :
149 : 1276996 : metapg = BufferGetPage(metabuf);
1495 michael@paquier.xyz 150 : 1276996 : metaopaque = BTPageGetOpaque(metapg);
2603 pg@bowt.ie 151 : 1276996 : metad = BTPageGetMeta(metapg);
152 : :
153 : : /* sanity-check the metapage */
154 [ + - ]: 1276996 : if (!P_ISMETA(metaopaque) ||
155 [ - + ]: 1276996 : metad->btm_magic != BTREE_MAGIC)
2603 pg@bowt.ie 156 [ # # ]:UBC 0 : ereport(ERROR,
157 : : (errcode(ERRCODE_INDEX_CORRUPTED),
158 : : errmsg("index \"%s\" is not a btree",
159 : : RelationGetRelationName(rel))));
160 : :
2603 pg@bowt.ie 161 [ + - ]:CBC 1276996 : if (metad->btm_version < BTREE_MIN_VERSION ||
162 [ - + ]: 1276996 : metad->btm_version > BTREE_VERSION)
2603 pg@bowt.ie 163 [ # # ]:UBC 0 : ereport(ERROR,
164 : : (errcode(ERRCODE_INDEX_CORRUPTED),
165 : : errmsg("version mismatch in index \"%s\": file version %d, "
166 : : "current version %d, minimal supported version %d",
167 : : RelationGetRelationName(rel),
168 : : metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
169 : :
2603 pg@bowt.ie 170 :CBC 1276996 : return metad;
171 : : }
172 : :
173 : : /*
174 : : * _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
175 : : *
176 : : * Called by btvacuumcleanup when btbulkdelete was never called because no
177 : : * index tuples needed to be deleted.
178 : : */
179 : : bool
1060 180 : 21376 : _bt_vacuum_needs_cleanup(Relation rel)
181 : : {
182 : : Buffer metabuf;
183 : : Page metapg;
184 : : BTMetaPageData *metad;
185 : : uint32 btm_version;
186 : : BlockNumber prev_num_delpages;
187 : :
188 : : /*
189 : : * Copy details from metapage to local variables quickly.
190 : : *
191 : : * Note that we deliberately avoid using cached version of metapage here.
192 : : */
193 : 21376 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
1880 194 : 21376 : metapg = BufferGetPage(metabuf);
195 : 21376 : metad = BTPageGetMeta(metapg);
196 : 21376 : btm_version = metad->btm_version;
197 : :
198 [ - + ]: 21376 : if (btm_version < BTREE_NOVAC_VERSION)
199 : : {
200 : : /*
201 : : * Metapage needs to be dynamically upgraded to store fields that are
202 : : * only present when btm_version >= BTREE_NOVAC_VERSION
203 : : */
1880 pg@bowt.ie 204 :UBC 0 : _bt_relbuf(rel, metabuf);
205 : 0 : return true;
206 : : }
207 : :
1880 pg@bowt.ie 208 :CBC 21376 : prev_num_delpages = metad->btm_last_cleanup_num_delpages;
209 : 21376 : _bt_relbuf(rel, metabuf);
210 : :
211 : : /*
212 : : * Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
213 : : * total size of the index. We can reasonably expect (though are not
214 : : * guaranteed) to be able to recycle this many pages if we decide to do a
215 : : * btvacuumscan call during the ongoing btvacuumcleanup. For further
216 : : * details see the nbtree/README section on placing deleted pages in the
217 : : * FSM.
218 : : */
219 [ + + ]: 21376 : if (prev_num_delpages > 0 &&
220 [ + - ]: 9 : prev_num_delpages > RelationGetNumberOfBlocks(rel) / 20)
221 : 9 : return true;
222 : :
223 : 21367 : return false;
224 : : }
225 : :
226 : : /*
227 : : * _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup.
228 : : *
229 : : * Called at the end of btvacuumcleanup, when num_delpages value has been
230 : : * finalized.
231 : : */
232 : : void
1060 233 : 1687 : _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
234 : : {
235 : : Buffer metabuf;
236 : : Page metapg;
237 : : BTMetaPageData *metad;
238 : : XLogRecPtr recptr;
239 : :
240 : : /*
241 : : * On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
242 : : * field started out as a TransactionId field called btm_oldest_btpo_xact.
243 : : * Both "versions" are just uint32 fields. It was convenient to repurpose
244 : : * the field when we began to use 64-bit XIDs in deleted pages.
245 : : *
246 : : * It's possible that a pg_upgrade'd database will contain an XID value in
247 : : * what is now recognized as the metapage's btm_last_cleanup_num_delpages
248 : : * field. _bt_vacuum_needs_cleanup() may even believe that this value
249 : : * indicates that there are lots of pages that it needs to recycle, when
250 : : * in reality there are only one or two. The worst that can happen is
251 : : * that there will be a call to btvacuumscan a little earlier, which will
252 : : * set btm_last_cleanup_num_delpages to a sane value when we're called.
253 : : *
254 : : * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
255 : : * no longer used as of PostgreSQL 14. We set it to -1.0 on rewrite, just
256 : : * to be consistent.
257 : : */
258 : 1687 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
2953 teodor@sigaev.ru 259 : 1687 : metapg = BufferGetPage(metabuf);
260 : 1687 : metad = BTPageGetMeta(metapg);
261 : :
262 : : /* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
1882 pg@bowt.ie 263 [ + - ]: 1687 : if (metad->btm_version >= BTREE_NOVAC_VERSION &&
264 [ + + ]: 1687 : metad->btm_last_cleanup_num_delpages == num_delpages)
265 : : {
266 : : /* Usually means index continues to have num_delpages of 0 */
2953 teodor@sigaev.ru 267 : 1558 : _bt_relbuf(rel, metabuf);
268 : 1558 : return;
269 : : }
270 : :
271 : : /* trade in our read lock for a write lock */
2114 pg@bowt.ie 272 : 129 : _bt_unlockbuf(rel, metabuf);
273 : 129 : _bt_lockbuf(rel, metabuf, BT_WRITE);
274 : :
2953 teodor@sigaev.ru 275 : 129 : START_CRIT_SECTION();
276 : :
277 : : /* upgrade meta-page if needed */
2603 pg@bowt.ie 278 [ - + ]: 129 : if (metad->btm_version < BTREE_NOVAC_VERSION)
2953 teodor@sigaev.ru 279 :UBC 0 : _bt_upgrademetapage(metapg);
280 : :
281 : : /* update cleanup-related information */
1896 pg@bowt.ie 282 :CBC 129 : metad->btm_last_cleanup_num_delpages = num_delpages;
1882 283 : 129 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
2953 teodor@sigaev.ru 284 : 129 : MarkBufferDirty(metabuf);
285 : :
286 : : /* write wal record if needed */
287 [ + - + + : 129 : if (RelationNeedsWAL(rel))
+ - + - ]
2953 teodor@sigaev.ru 288 :GIC 129 : {
289 : : xl_btree_metadata md;
290 : :
2953 teodor@sigaev.ru 291 :CBC 129 : XLogBeginInsert();
292 : 129 : XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
293 : :
2603 pg@bowt.ie 294 [ - + ]: 129 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
295 : 129 : md.version = metad->btm_version;
2953 teodor@sigaev.ru 296 : 129 : md.root = metad->btm_root;
297 : 129 : md.level = metad->btm_level;
298 : 129 : md.fastroot = metad->btm_fastroot;
299 : 129 : md.fastlevel = metad->btm_fastlevel;
1896 pg@bowt.ie 300 : 129 : md.last_cleanup_num_delpages = num_delpages;
2260 301 : 129 : md.allequalimage = metad->btm_allequalimage;
302 : :
448 peter@eisentraut.org 303 : 129 : XLogRegisterBufData(0, &md, sizeof(xl_btree_metadata));
304 : :
2953 teodor@sigaev.ru 305 : 129 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_META_CLEANUP);
306 : : }
307 : : else
53 pg@bowt.ie 308 :UNC 0 : recptr = XLogGetFakeLSN(rel);
309 : :
53 pg@bowt.ie 310 :GNC 129 : PageSetLSN(metapg, recptr);
311 : :
2953 teodor@sigaev.ru 312 [ - + ]:CBC 129 : END_CRIT_SECTION();
313 : :
314 : 129 : _bt_relbuf(rel, metabuf);
315 : : }
316 : :
317 : : /*
318 : : * _bt_getroot() -- Get the root page of the btree.
319 : : *
320 : : * Since the root page can move around the btree file, we have to read
321 : : * its location from the metadata page, and then read the root page
322 : : * itself. If no root page exists yet, we have to create one.
323 : : *
324 : : * The access type parameter (BT_READ or BT_WRITE) controls whether
325 : : * a new root page will be created or not. If access = BT_READ,
326 : : * and no root page exists, we just return InvalidBuffer. For
327 : : * BT_WRITE, we try to create the root page if it doesn't exist.
328 : : * NOTE that the returned root page will have only a read lock set
329 : : * on it even if access = BT_WRITE!
330 : : *
331 : : * If access = BT_WRITE, heaprel must be set; otherwise caller can just
332 : : * pass NULL. See _bt_allocbuf for an explanation.
333 : : *
334 : : * The returned page is not necessarily the true root --- it could be
335 : : * a "fast root" (a page that is alone in its level due to deletions).
336 : : * Also, if the root page is split while we are "in flight" to it,
337 : : * what we will return is the old root, which is now just the leftmost
338 : : * page on a probably-not-very-wide level. For most purposes this is
339 : : * as good as or better than the true root, so we do not bother to
340 : : * insist on finding the true root. We do, however, guarantee to
341 : : * return a live (not deleted or half-dead) page.
342 : : *
343 : : * On successful return, the root page is pinned and read-locked.
344 : : * The metadata page is not locked or pinned on exit.
345 : : */
346 : : Buffer
1130 andres@anarazel.de 347 : 15791724 : _bt_getroot(Relation rel, Relation heaprel, int access)
348 : : {
349 : : Buffer metabuf;
350 : : Buffer rootbuf;
351 : : Page rootpage;
352 : : BTPageOpaque rootopaque;
353 : : BlockNumber rootblkno;
354 : : uint32 rootlevel;
355 : : BTMetaPageData *metad;
356 : : XLogRecPtr recptr;
357 : :
1060 pg@bowt.ie 358 [ + + - + ]: 15791724 : Assert(access == BT_READ || heaprel != NULL);
359 : :
360 : : /*
361 : : * Try to use previously-cached metapage data to find the root. This
362 : : * normally saves one buffer access per index search, which is a very
363 : : * helpful savings in bufmgr traffic and hence contention.
364 : : */
7315 tgl@sss.pgh.pa.us 365 [ + + ]: 15791724 : if (rel->rd_amcache != NULL)
366 : : {
367 : 15447757 : metad = (BTMetaPageData *) rel->rd_amcache;
368 : : /* We shouldn't have cached it if any of these fail */
369 [ - + ]: 15447757 : Assert(metad->btm_magic == BTREE_MAGIC);
2953 teodor@sigaev.ru 370 [ - + ]: 15447757 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
371 [ - + ]: 15447757 : Assert(metad->btm_version <= BTREE_VERSION);
2260 pg@bowt.ie 372 [ + + - + ]: 15447757 : Assert(!metad->btm_allequalimage ||
373 : : metad->btm_version > BTREE_NOVAC_VERSION);
7315 tgl@sss.pgh.pa.us 374 [ - + ]: 15447757 : Assert(metad->btm_root != P_NONE);
375 : :
376 : 15447757 : rootblkno = metad->btm_fastroot;
377 [ - + ]: 15447757 : Assert(rootblkno != P_NONE);
378 : 15447757 : rootlevel = metad->btm_fastlevel;
379 : :
1060 pg@bowt.ie 380 : 15447757 : rootbuf = _bt_getbuf(rel, rootblkno, BT_READ);
3667 kgrittn@postgresql.o 381 : 15447757 : rootpage = BufferGetPage(rootbuf);
1495 michael@paquier.xyz 382 : 15447757 : rootopaque = BTPageGetOpaque(rootpage);
383 : :
384 : : /*
385 : : * Since the cache might be stale, we check the page more carefully
386 : : * here than normal. We *must* check that it's not deleted. If it's
387 : : * not alone on its level, then we reject too --- this may be overly
388 : : * paranoid but better safe than sorry. Note we don't check P_ISROOT,
389 : : * because that's not set in a "fast root".
390 : : */
7315 tgl@sss.pgh.pa.us 391 [ + - ]: 15447757 : if (!P_IGNORE(rootopaque) &&
1896 pg@bowt.ie 392 [ + - ]: 15447757 : rootopaque->btpo_level == rootlevel &&
7315 tgl@sss.pgh.pa.us 393 [ + - ]: 15447757 : P_LEFTMOST(rootopaque) &&
394 [ + + ]: 15447757 : P_RIGHTMOST(rootopaque))
395 : : {
396 : : /* OK, accept cached page as the root */
397 : 15297392 : return rootbuf;
398 : : }
399 : 150365 : _bt_relbuf(rel, rootbuf);
400 : : /* Cache is stale, throw it away */
401 [ + - ]: 150365 : if (rel->rd_amcache)
402 : 150365 : pfree(rel->rd_amcache);
403 : 150365 : rel->rd_amcache = NULL;
404 : : }
405 : :
1060 pg@bowt.ie 406 : 494332 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
2483 407 : 494332 : metad = _bt_getmeta(rel, metabuf);
408 : :
409 : : /* if no root page initialized yet, do it */
10467 bruce@momjian.us 410 [ + + ]: 494332 : if (metad->btm_root == P_NONE)
411 : : {
412 : : Page metapg;
413 : :
414 : : /* If access = BT_READ, caller doesn't want us to create root yet */
9419 tgl@sss.pgh.pa.us 415 [ + + ]: 343705 : if (access == BT_READ)
416 : : {
9060 417 : 336307 : _bt_relbuf(rel, metabuf);
9419 418 : 336307 : return InvalidBuffer;
419 : : }
420 : :
421 : : /* trade in our read lock for a write lock */
2114 pg@bowt.ie 422 : 7398 : _bt_unlockbuf(rel, metabuf);
423 : 7398 : _bt_lockbuf(rel, metabuf, BT_WRITE);
424 : :
425 : : /*
426 : : * Race condition: if someone else initialized the metadata between
427 : : * the time we released the read lock and acquired the write lock, we
428 : : * must avoid doing it again.
429 : : */
8473 tgl@sss.pgh.pa.us 430 [ - + ]: 7398 : if (metad->btm_root != P_NONE)
431 : : {
432 : : /*
433 : : * Metadata initialized by someone else. In order to guarantee no
434 : : * deadlocks, we have to release the metadata page and start all
435 : : * over again. (Is that really true? But it's hardly worth trying
436 : : * to optimize this case.)
437 : : */
8473 tgl@sss.pgh.pa.us 438 :UBC 0 : _bt_relbuf(rel, metabuf);
1130 andres@anarazel.de 439 : 0 : return _bt_getroot(rel, heaprel, access);
440 : : }
441 : :
442 : : /*
443 : : * Get, initialize, write, and leave a lock of the appropriate type on
444 : : * the new root page. Since this is the first page in the tree, it's
445 : : * a leaf as well as the root.
446 : : */
1060 pg@bowt.ie 447 :CBC 7398 : rootbuf = _bt_allocbuf(rel, heaprel);
8473 tgl@sss.pgh.pa.us 448 : 7398 : rootblkno = BufferGetBlockNumber(rootbuf);
3667 kgrittn@postgresql.o 449 : 7398 : rootpage = BufferGetPage(rootbuf);
1495 michael@paquier.xyz 450 : 7398 : rootopaque = BTPageGetOpaque(rootpage);
8473 tgl@sss.pgh.pa.us 451 : 7398 : rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
452 : 7398 : rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
1896 pg@bowt.ie 453 : 7398 : rootopaque->btpo_level = 0;
7302 tgl@sss.pgh.pa.us 454 : 7398 : rootopaque->btpo_cycleid = 0;
455 : : /* Get raw page pointer for metapage */
2483 pg@bowt.ie 456 : 7398 : metapg = BufferGetPage(metabuf);
457 : :
458 : : /* NO ELOG(ERROR) till meta is updated */
8473 tgl@sss.pgh.pa.us 459 : 7398 : START_CRIT_SECTION();
460 : :
461 : : /* upgrade metapage if needed */
2603 pg@bowt.ie 462 [ - + ]: 7398 : if (metad->btm_version < BTREE_NOVAC_VERSION)
2897 teodor@sigaev.ru 463 :UBC 0 : _bt_upgrademetapage(metapg);
464 : :
8473 tgl@sss.pgh.pa.us 465 :CBC 7398 : metad->btm_root = rootblkno;
466 : 7398 : metad->btm_level = 0;
467 : 7398 : metad->btm_fastroot = rootblkno;
468 : 7398 : metad->btm_fastlevel = 0;
1896 pg@bowt.ie 469 : 7398 : metad->btm_last_cleanup_num_delpages = 0;
2953 teodor@sigaev.ru 470 : 7398 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
471 : :
7340 tgl@sss.pgh.pa.us 472 : 7398 : MarkBufferDirty(rootbuf);
473 : 7398 : MarkBufferDirty(metabuf);
474 : :
475 : : /* XLOG stuff */
5622 rhaas@postgresql.org 476 [ + + + + : 7398 : if (RelationNeedsWAL(rel))
+ + + + ]
8473 tgl@sss.pgh.pa.us 477 :GIC 7095 : {
478 : : xl_btree_newroot xlrec;
479 : : xl_btree_metadata md;
480 : :
4184 heikki.linnakangas@i 481 :CBC 7095 : XLogBeginInsert();
482 : 7095 : XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
3105 tgl@sss.pgh.pa.us 483 : 7095 : XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
484 : :
2603 pg@bowt.ie 485 [ - + ]: 7095 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
486 : 7095 : md.version = metad->btm_version;
4184 heikki.linnakangas@i 487 : 7095 : md.root = rootblkno;
488 : 7095 : md.level = 0;
489 : 7095 : md.fastroot = rootblkno;
490 : 7095 : md.fastlevel = 0;
1896 pg@bowt.ie 491 : 7095 : md.last_cleanup_num_delpages = 0;
2260 492 : 7095 : md.allequalimage = metad->btm_allequalimage;
493 : :
448 peter@eisentraut.org 494 : 7095 : XLogRegisterBufData(2, &md, sizeof(xl_btree_metadata));
495 : :
8473 tgl@sss.pgh.pa.us 496 : 7095 : xlrec.rootblk = rootblkno;
497 : 7095 : xlrec.level = 0;
498 : :
448 peter@eisentraut.org 499 : 7095 : XLogRegisterData(&xlrec, SizeOfBtreeNewroot);
500 : :
4184 heikki.linnakangas@i 501 : 7095 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
502 : : }
503 : : else
53 pg@bowt.ie 504 :GNC 303 : recptr = XLogGetFakeLSN(rel);
505 : :
506 : 7398 : PageSetLSN(rootpage, recptr);
507 : 7398 : PageSetLSN(metapg, recptr);
508 : :
8473 tgl@sss.pgh.pa.us 509 [ - + ]:CBC 7398 : END_CRIT_SECTION();
510 : :
511 : : /*
512 : : * swap root write lock for read lock. There is no danger of anyone
513 : : * else accessing the new root page while it's unlocked, since no one
514 : : * else knows where it is yet.
515 : : */
2114 pg@bowt.ie 516 : 7398 : _bt_unlockbuf(rel, rootbuf);
517 : 7398 : _bt_lockbuf(rel, rootbuf, BT_READ);
518 : :
519 : : /* okay, metadata is correct, release lock on it without caching */
7340 tgl@sss.pgh.pa.us 520 : 7398 : _bt_relbuf(rel, metabuf);
521 : : }
522 : : else
523 : : {
8474 524 : 150627 : rootblkno = metad->btm_fastroot;
8473 525 [ - + ]: 150627 : Assert(rootblkno != P_NONE);
526 : 150627 : rootlevel = metad->btm_fastlevel;
527 : :
528 : : /*
529 : : * Cache the metapage data for next time
530 : : */
2483 pg@bowt.ie 531 : 150627 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
532 : : sizeof(BTMetaPageData));
533 : 150627 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
534 : :
535 : : /*
536 : : * We are done with the metapage; arrange to release it via first
537 : : * _bt_relandgetbuf call
538 : : */
8049 tgl@sss.pgh.pa.us 539 : 150627 : rootbuf = metabuf;
540 : :
541 : : for (;;)
542 : : {
543 : 150627 : rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
3667 kgrittn@postgresql.o 544 : 150627 : rootpage = BufferGetPage(rootbuf);
1495 michael@paquier.xyz 545 : 150627 : rootopaque = BTPageGetOpaque(rootpage);
546 : :
8473 tgl@sss.pgh.pa.us 547 [ + - ]: 150627 : if (!P_IGNORE(rootopaque))
548 : 150627 : break;
549 : :
550 : : /* it's dead, Jim. step right one page */
8473 tgl@sss.pgh.pa.us 551 [ # # ]:UBC 0 : if (P_RIGHTMOST(rootopaque))
6700 552 [ # # ]: 0 : elog(ERROR, "no live root page found in index \"%s\"",
553 : : RelationGetRelationName(rel));
8473 554 : 0 : rootblkno = rootopaque->btpo_next;
555 : : }
556 : :
1896 pg@bowt.ie 557 [ - + ]:CBC 150627 : if (rootopaque->btpo_level != rootlevel)
6700 tgl@sss.pgh.pa.us 558 [ # # ]:UBC 0 : elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
559 : : rootblkno, RelationGetRelationName(rel),
560 : : rootopaque->btpo_level, rootlevel);
561 : : }
562 : :
563 : : /*
564 : : * By here, we have a pin and read lock on the root page, and no lock set
565 : : * on the metadata page. Return the root page's buffer.
566 : : */
8474 tgl@sss.pgh.pa.us 567 :CBC 158025 : return rootbuf;
568 : : }
569 : :
570 : : /*
571 : : * _bt_gettrueroot() -- Get the true root page of the btree.
572 : : *
573 : : * This is the same as the BT_READ case of _bt_getroot(), except
574 : : * we follow the true-root link not the fast-root link.
575 : : *
576 : : * By the time we acquire lock on the root page, it might have been split and
577 : : * not be the true root anymore. This is okay for the present uses of this
578 : : * routine; we only really need to be able to move up at least one tree level
579 : : * from whatever non-root page we were at. If we ever do need to lock the
580 : : * one true root page, we could loop here, re-reading the metapage on each
581 : : * failure. (Note that it wouldn't do to hold the lock on the metapage while
582 : : * moving to the root --- that'd deadlock against any concurrent root split.)
583 : : */
584 : : Buffer
1060 pg@bowt.ie 585 : 16 : _bt_gettrueroot(Relation rel)
586 : : {
587 : : Buffer metabuf;
588 : : Page metapg;
589 : : BTPageOpaque metaopaque;
590 : : Buffer rootbuf;
591 : : Page rootpage;
592 : : BTPageOpaque rootopaque;
593 : : BlockNumber rootblkno;
594 : : uint32 rootlevel;
595 : : BTMetaPageData *metad;
596 : :
597 : : /*
598 : : * We don't try to use cached metapage data here, since (a) this path is
599 : : * not performance-critical, and (b) if we are here it suggests our cache
600 : : * is out-of-date anyway. In light of point (b), it's probably safest to
601 : : * actively flush any cached metapage info.
602 : : */
7315 tgl@sss.pgh.pa.us 603 [ + - ]: 16 : if (rel->rd_amcache)
604 : 16 : pfree(rel->rd_amcache);
605 : 16 : rel->rd_amcache = NULL;
606 : :
1060 pg@bowt.ie 607 : 16 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
3667 kgrittn@postgresql.o 608 : 16 : metapg = BufferGetPage(metabuf);
1495 michael@paquier.xyz 609 : 16 : metaopaque = BTPageGetOpaque(metapg);
8474 tgl@sss.pgh.pa.us 610 : 16 : metad = BTPageGetMeta(metapg);
611 : :
3151 612 [ + - ]: 16 : if (!P_ISMETA(metaopaque) ||
8474 613 [ - + ]: 16 : metad->btm_magic != BTREE_MAGIC)
8324 tgl@sss.pgh.pa.us 614 [ # # ]:UBC 0 : ereport(ERROR,
615 : : (errcode(ERRCODE_INDEX_CORRUPTED),
616 : : errmsg("index \"%s\" is not a btree",
617 : : RelationGetRelationName(rel))));
618 : :
2953 teodor@sigaev.ru 619 [ + - ]:CBC 16 : if (metad->btm_version < BTREE_MIN_VERSION ||
620 [ - + ]: 16 : metad->btm_version > BTREE_VERSION)
8324 tgl@sss.pgh.pa.us 621 [ # # ]:UBC 0 : ereport(ERROR,
622 : : (errcode(ERRCODE_INDEX_CORRUPTED),
623 : : errmsg("version mismatch in index \"%s\": file version %d, "
624 : : "current version %d, minimal supported version %d",
625 : : RelationGetRelationName(rel),
626 : : metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
627 : :
628 : : /* if no root page initialized yet, fail */
8474 tgl@sss.pgh.pa.us 629 [ - + ]:CBC 16 : if (metad->btm_root == P_NONE)
630 : : {
8474 tgl@sss.pgh.pa.us 631 :UBC 0 : _bt_relbuf(rel, metabuf);
632 : 0 : return InvalidBuffer;
633 : : }
634 : :
8474 tgl@sss.pgh.pa.us 635 :CBC 16 : rootblkno = metad->btm_root;
8473 636 : 16 : rootlevel = metad->btm_level;
637 : :
638 : : /*
639 : : * We are done with the metapage; arrange to release it via first
640 : : * _bt_relandgetbuf call
641 : : */
8049 642 : 16 : rootbuf = metabuf;
643 : :
644 : : for (;;)
645 : : {
646 : 16 : rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
3667 kgrittn@postgresql.o 647 : 16 : rootpage = BufferGetPage(rootbuf);
1495 michael@paquier.xyz 648 : 16 : rootopaque = BTPageGetOpaque(rootpage);
649 : :
8473 tgl@sss.pgh.pa.us 650 [ + - ]: 16 : if (!P_IGNORE(rootopaque))
651 : 16 : break;
652 : :
653 : : /* it's dead, Jim. step right one page */
8473 tgl@sss.pgh.pa.us 654 [ # # ]:UBC 0 : if (P_RIGHTMOST(rootopaque))
6700 655 [ # # ]: 0 : elog(ERROR, "no live root page found in index \"%s\"",
656 : : RelationGetRelationName(rel));
8473 657 : 0 : rootblkno = rootopaque->btpo_next;
658 : : }
659 : :
1896 pg@bowt.ie 660 [ - + ]:CBC 16 : if (rootopaque->btpo_level != rootlevel)
6700 tgl@sss.pgh.pa.us 661 [ # # ]:UBC 0 : elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
662 : : rootblkno, RelationGetRelationName(rel),
663 : : rootopaque->btpo_level, rootlevel);
664 : :
10108 bruce@momjian.us 665 :CBC 16 : return rootbuf;
666 : : }
667 : :
668 : : /*
669 : : * _bt_getrootheight() -- Get the height of the btree search tree.
670 : : *
671 : : * We return the level (counting from zero) of the current fast root.
672 : : * This represents the number of tree levels we'd have to descend through
673 : : * to start any btree index search.
674 : : *
675 : : * This is used by the planner for cost-estimation purposes. Since it's
676 : : * only an estimate, slightly-stale data is fine, hence we don't worry
677 : : * about updating previously cached data.
678 : : */
679 : : int
1060 pg@bowt.ie 680 : 3783116 : _bt_getrootheight(Relation rel)
681 : : {
682 : : BTMetaPageData *metad;
683 : :
4862 tgl@sss.pgh.pa.us 684 [ + + ]: 3783116 : if (rel->rd_amcache == NULL)
685 : : {
686 : : Buffer metabuf;
687 : :
1060 pg@bowt.ie 688 : 66230 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
2603 689 : 66230 : metad = _bt_getmeta(rel, metabuf);
690 : :
691 : : /*
692 : : * If there's no root page yet, _bt_getroot() doesn't expect a cache
693 : : * to be made, so just stop here and report the index height is zero.
694 : : * (XXX perhaps _bt_getroot() should be changed to allow this case.)
695 : : */
4862 tgl@sss.pgh.pa.us 696 [ + + ]: 66230 : if (metad->btm_root == P_NONE)
697 : : {
698 : 38734 : _bt_relbuf(rel, metabuf);
699 : 38734 : return 0;
700 : : }
701 : :
702 : : /*
703 : : * Cache the metapage data for next time
704 : : */
2483 pg@bowt.ie 705 : 27496 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
706 : : sizeof(BTMetaPageData));
707 : 27496 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
4862 tgl@sss.pgh.pa.us 708 : 27496 : _bt_relbuf(rel, metabuf);
709 : : }
710 : :
711 : : /* Get cached page */
712 : 3744382 : metad = (BTMetaPageData *) rel->rd_amcache;
713 : : /* We shouldn't have cached it if any of these fail */
2483 pg@bowt.ie 714 [ - + ]: 3744382 : Assert(metad->btm_magic == BTREE_MAGIC);
715 [ - + ]: 3744382 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
716 [ - + ]: 3744382 : Assert(metad->btm_version <= BTREE_VERSION);
2260 717 [ + + - + ]: 3744382 : Assert(!metad->btm_allequalimage ||
718 : : metad->btm_version > BTREE_NOVAC_VERSION);
2483 719 [ - + ]: 3744382 : Assert(metad->btm_fastroot != P_NONE);
720 : :
4862 tgl@sss.pgh.pa.us 721 : 3744382 : return metad->btm_fastlevel;
722 : : }
723 : :
724 : : /*
725 : : * _bt_metaversion() -- Get version/status info from metapage.
726 : : *
727 : : * Sets caller's *heapkeyspace and *allequalimage arguments using data
728 : : * from the B-Tree metapage (could be locally-cached version). This
729 : : * information needs to be stashed in insertion scankey, so we provide a
730 : : * single function that fetches both at once.
731 : : *
732 : : * This is used to determine the rules that must be used to descend a
733 : : * btree. Version 4 indexes treat heap TID as a tiebreaker attribute.
734 : : * pg_upgrade'd version 3 indexes need extra steps to preserve reasonable
735 : : * performance when inserting a new BTScanInsert-wise duplicate tuple
736 : : * among many leaf pages already full of such duplicates.
737 : : *
738 : : * Also sets allequalimage field, which indicates whether or not it is
739 : : * safe to apply deduplication. We rely on the assumption that
740 : : * btm_allequalimage will be zero'ed on heapkeyspace indexes that were
741 : : * pg_upgrade'd from Postgres 12.
742 : : */
743 : : void
1060 pg@bowt.ie 744 : 18439139 : _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
745 : : {
746 : : BTMetaPageData *metad;
747 : :
2603 748 [ + + ]: 18439139 : if (rel->rd_amcache == NULL)
749 : : {
750 : : Buffer metabuf;
751 : :
1060 752 : 716434 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
2603 753 : 716434 : metad = _bt_getmeta(rel, metabuf);
754 : :
755 : : /*
756 : : * If there's no root page yet, _bt_getroot() doesn't expect a cache
757 : : * to be made, so just stop here. (XXX perhaps _bt_getroot() should
758 : : * be changed to allow this case.)
759 : : */
760 [ + + ]: 716434 : if (metad->btm_root == P_NONE)
761 : : {
2260 762 : 339132 : *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
763 : 339132 : *allequalimage = metad->btm_allequalimage;
764 : :
2603 765 : 339132 : _bt_relbuf(rel, metabuf);
2260 766 : 339132 : return;
767 : : }
768 : :
769 : : /*
770 : : * Cache the metapage data for next time
771 : : *
772 : : * An on-the-fly version upgrade performed by _bt_upgrademetapage()
773 : : * can change the nbtree version for an index without invalidating any
774 : : * local cache. This is okay because it can only happen when moving
775 : : * from version 2 to version 3, both of which are !heapkeyspace
776 : : * versions.
777 : : */
2483 778 : 377302 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
779 : : sizeof(BTMetaPageData));
780 : 377302 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
2603 781 : 377302 : _bt_relbuf(rel, metabuf);
782 : : }
783 : :
784 : : /* Get cached page */
785 : 18100007 : metad = (BTMetaPageData *) rel->rd_amcache;
786 : : /* We shouldn't have cached it if any of these fail */
2483 787 [ - + ]: 18100007 : Assert(metad->btm_magic == BTREE_MAGIC);
788 [ - + ]: 18100007 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
789 [ - + ]: 18100007 : Assert(metad->btm_version <= BTREE_VERSION);
2260 790 [ + + - + ]: 18100007 : Assert(!metad->btm_allequalimage ||
791 : : metad->btm_version > BTREE_NOVAC_VERSION);
2483 792 [ - + ]: 18100007 : Assert(metad->btm_fastroot != P_NONE);
793 : :
2260 794 : 18100007 : *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
795 : 18100007 : *allequalimage = metad->btm_allequalimage;
796 : : }
797 : :
798 : : /*
799 : : * _bt_checkpage() -- Verify that a freshly-read page looks sane.
800 : : */
801 : : void
7485 tgl@sss.pgh.pa.us 802 : 30603085 : _bt_checkpage(Relation rel, Buffer buf)
803 : : {
3667 kgrittn@postgresql.o 804 : 30603085 : Page page = BufferGetPage(buf);
805 : :
806 : : /*
807 : : * ReadBuffer verifies that every newly-read page passes
808 : : * PageHeaderIsValid, which means it either contains a reasonably sane
809 : : * page header or is all-zero. We have to defend against the all-zero
810 : : * case, however.
811 : : */
7485 tgl@sss.pgh.pa.us 812 [ - + ]: 30603085 : if (PageIsNew(page))
7485 tgl@sss.pgh.pa.us 813 [ # # ]:UBC 0 : ereport(ERROR,
814 : : (errcode(ERRCODE_INDEX_CORRUPTED),
815 : : errmsg("index \"%s\" contains unexpected zero page at block %u",
816 : : RelationGetRelationName(rel),
817 : : BufferGetBlockNumber(buf)),
818 : : errhint("Please REINDEX it.")));
819 : :
820 : : /*
821 : : * Additionally check that the special area looks sane.
822 : : */
6505 tgl@sss.pgh.pa.us 823 [ - + ]:CBC 30603085 : if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData)))
7485 tgl@sss.pgh.pa.us 824 [ # # ]:UBC 0 : ereport(ERROR,
825 : : (errcode(ERRCODE_INDEX_CORRUPTED),
826 : : errmsg("index \"%s\" contains corrupted page at block %u",
827 : : RelationGetRelationName(rel),
828 : : BufferGetBlockNumber(buf)),
829 : : errhint("Please REINDEX it.")));
7485 tgl@sss.pgh.pa.us 830 :CBC 30603085 : }
831 : :
832 : : /*
833 : : * _bt_getbuf() -- Get an existing block in a buffer, for read or write.
834 : : *
835 : : * The general rule in nbtree is that it's never okay to access a
836 : : * page without holding both a buffer pin and a buffer lock on
837 : : * the page's buffer.
838 : : *
839 : : * When this routine returns, the appropriate lock is set on the
840 : : * requested buffer and its reference count has been incremented
841 : : * (ie, the buffer is "locked and pinned"). Also, we apply
842 : : * _bt_checkpage to sanity-check the page, and perform Valgrind
843 : : * client requests that help Valgrind detect unsafe page accesses.
844 : : *
845 : : * Note: raw LockBuffer() calls are disallowed in nbtree; all
846 : : * buffer lock requests need to go through wrapper functions such
847 : : * as _bt_lockbuf().
848 : : */
849 : : Buffer
1060 pg@bowt.ie 850 : 16891791 : _bt_getbuf(Relation rel, BlockNumber blkno, int access)
851 : : {
852 : : Buffer buf;
853 : :
854 [ - + ]: 16891791 : Assert(BlockNumberIsValid(blkno));
855 : :
856 : : /* Read an existing block of the relation */
857 : 16891791 : buf = ReadBuffer(rel, blkno);
858 : 16891791 : _bt_lockbuf(rel, buf, access);
859 : 16891791 : _bt_checkpage(rel, buf);
860 : :
861 : 16891791 : return buf;
862 : : }
863 : :
864 : : /*
865 : : * _bt_allocbuf() -- Allocate a new block/page.
866 : : *
867 : : * Returns a write-locked buffer containing an unallocated nbtree page.
868 : : *
869 : : * Callers are required to pass a valid heaprel. We need heaprel so that we
870 : : * can handle generating a snapshotConflictHorizon that makes reusing a page
871 : : * from the FSM safe for queries that may be running on standbys.
872 : : */
873 : : Buffer
874 : 24551 : _bt_allocbuf(Relation rel, Relation heaprel)
875 : : {
876 : : Buffer buf;
877 : : BlockNumber blkno;
878 : : Page page;
879 : :
880 [ + - ]: 24551 : Assert(heaprel != NULL);
881 : :
882 : : /*
883 : : * First see if the FSM knows of any free pages.
884 : : *
885 : : * We can't trust the FSM's report unreservedly; we have to check that the
886 : : * page is still free. (For example, an already-free page could have been
887 : : * re-used between the time the last VACUUM scanned it and the time the
888 : : * VACUUM made its FSM updates.)
889 : : *
890 : : * In fact, it's worse than that: we can't even assume that it's safe to
891 : : * take a lock on the reported page. If somebody else has a lock on it,
892 : : * or even worse our own caller does, we could deadlock. (The own-caller
893 : : * scenario is actually not improbable. Consider an index on a serial or
894 : : * timestamp column. Nearly all splits will be at the rightmost page, so
895 : : * it's entirely likely that _bt_split will call us while holding a lock
896 : : * on the page most recently acquired from FSM. A VACUUM running
897 : : * concurrently with the previous split could well have placed that page
898 : : * back in FSM.)
899 : : *
900 : : * To get around that, we ask for only a conditional lock on the reported
901 : : * page. If we fail, then someone else is using the page, and we may
902 : : * reasonably assume it's not free. (If we happen to be wrong, the worst
903 : : * consequence is the page will be lost to use till the next VACUUM, which
904 : : * is no big problem.)
905 : : */
906 : : for (;;)
907 : : {
908 : 24551 : blkno = GetFreeIndexPage(rel);
909 [ + + ]: 24551 : if (blkno == InvalidBlockNumber)
910 : 24256 : break;
911 : 295 : buf = ReadBuffer(rel, blkno);
912 [ + - ]: 295 : if (_bt_conditionallockbuf(rel, buf))
913 : : {
914 : 295 : page = BufferGetPage(buf);
915 : :
916 : : /*
917 : : * It's possible to find an all-zeroes page in an index. For
918 : : * example, a backend might successfully extend the relation one
919 : : * page and then crash before it is able to make a WAL entry for
920 : : * adding the page. If we find a zeroed page then reclaim it
921 : : * immediately.
922 : : */
923 [ - + ]: 295 : if (PageIsNew(page))
924 : : {
925 : : /* Okay to use page. Initialize and return it. */
1060 pg@bowt.ie 926 :UBC 0 : _bt_pageinit(page, BufferGetPageSize(buf));
927 : 0 : return buf;
928 : : }
929 : :
1060 pg@bowt.ie 930 [ + - ]:CBC 295 : if (BTPageIsRecyclable(page, heaprel))
931 : : {
932 : : /*
933 : : * If we are generating WAL for Hot Standby then create a WAL
934 : : * record that will allow us to conflict with queries running
935 : : * on standby, in case they have snapshots older than safexid
936 : : * value
937 : : */
938 [ + - - + : 295 : if (RelationNeedsWAL(rel) && XLogStandbyInfoActive())
- - - - +
- ]
939 : : {
940 : : xl_btree_reuse_page xlrec_reuse;
941 : :
942 : : /*
943 : : * Note that we don't register the buffer with the record,
944 : : * because this operation doesn't modify the page (that
945 : : * already happened, back when VACUUM deleted the page).
946 : : * This record only exists to provide a conflict point for
947 : : * Hot Standby. See record REDO routine comments.
948 : : */
949 : 295 : xlrec_reuse.locator = rel->rd_locator;
950 : 295 : xlrec_reuse.block = blkno;
951 : 295 : xlrec_reuse.snapshotConflictHorizon = BTPageGetDeleteXid(page);
952 : 295 : xlrec_reuse.isCatalogRel =
953 [ + - - + : 295 : RelationIsAccessibleInLogicalDecoding(heaprel);
- - - - -
- - - - -
- - - - -
- - - ]
954 : :
955 : 295 : XLogBeginInsert();
448 peter@eisentraut.org 956 : 295 : XLogRegisterData(&xlrec_reuse, SizeOfBtreeReusePage);
957 : :
1060 pg@bowt.ie 958 : 295 : XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE);
959 : : }
960 : :
961 : : /* Okay to use page. Re-initialize and return it. */
962 : 295 : _bt_pageinit(page, BufferGetPageSize(buf));
963 : 295 : return buf;
964 : : }
1060 pg@bowt.ie 965 [ # # ]:UBC 0 : elog(DEBUG2, "FSM returned nonrecyclable page");
966 : 0 : _bt_relbuf(rel, buf);
967 : : }
968 : : else
969 : : {
970 [ # # ]: 0 : elog(DEBUG2, "FSM returned nonlockable page");
971 : : /* couldn't get lock, so just drop pin */
972 : 0 : ReleaseBuffer(buf);
973 : : }
974 : : }
975 : :
976 : : /*
977 : : * Extend the relation by one page. Need to use RBM_ZERO_AND_LOCK or we
978 : : * risk a race condition against btvacuumscan --- see comments therein.
979 : : * This forces us to repeat the valgrind request that _bt_lockbuf()
980 : : * otherwise would make, as we can't use _bt_lockbuf() without introducing
981 : : * a race.
982 : : */
986 tmunro@postgresql.or 983 :CBC 24256 : buf = ExtendBufferedRel(BMR_REL(rel), MAIN_FORKNUM, NULL, EB_LOCK_FIRST);
1060 pg@bowt.ie 984 : 24256 : if (!RelationUsesLocalBuffers(rel))
985 : : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
986 : :
987 : : /* Initialize the new page before returning it */
988 : 24256 : page = BufferGetPage(buf);
989 [ - + ]: 24256 : Assert(PageIsNew(page));
990 : 24256 : _bt_pageinit(page, BufferGetPageSize(buf));
991 : :
10108 bruce@momjian.us 992 : 24256 : return buf;
993 : : }
994 : :
995 : : /*
996 : : * _bt_relandgetbuf() -- release a locked buffer and get another one.
997 : : *
998 : : * This is equivalent to _bt_relbuf followed by _bt_getbuf. Also, if obuf is
999 : : * InvalidBuffer then it reduces to just _bt_getbuf; allowing this case
1000 : : * simplifies some callers.
1001 : : *
1002 : : * The original motivation for using this was to avoid two entries to the
1003 : : * bufmgr when one would do. However, now it's mainly just a notational
1004 : : * convenience. The only case where it saves work over _bt_relbuf/_bt_getbuf
1005 : : * is when the target page is the same one already in the buffer.
1006 : : */
1007 : : Buffer
8049 tgl@sss.pgh.pa.us 1008 : 13627353 : _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
1009 : : {
1010 : : Buffer buf;
1011 : :
1060 pg@bowt.ie 1012 [ - + ]: 13627353 : Assert(BlockNumberIsValid(blkno));
8049 tgl@sss.pgh.pa.us 1013 [ + + ]: 13627353 : if (BufferIsValid(obuf))
1014 : : {
39 andres@anarazel.de 1015 [ - + ]:GNC 13616817 : if (BufferGetBlockNumber(obuf) == blkno)
1016 : : {
1017 : : /* trade in old lock mode for new lock */
39 andres@anarazel.de 1018 :UNC 0 : _bt_unlockbuf(rel, obuf);
1019 : 0 : buf = obuf;
1020 : : }
1021 : : else
1022 : : {
1023 : : /* release lock and pin at once, that's a bit more efficient */
39 andres@anarazel.de 1024 :GNC 13616817 : _bt_relbuf(rel, obuf);
1025 : 13616817 : buf = ReadBuffer(rel, blkno);
1026 : : }
1027 : : }
1028 : : else
1029 : 10536 : buf = ReadBuffer(rel, blkno);
1030 : :
1031 : 13627353 : _bt_lockbuf(rel, buf, access);
7485 tgl@sss.pgh.pa.us 1032 :CBC 13627353 : _bt_checkpage(rel, buf);
1033 : :
8049 1034 : 13627353 : return buf;
1035 : : }
1036 : :
1037 : : /*
1038 : : * _bt_relbuf() -- release a locked buffer.
1039 : : *
1040 : : * Lock and pin (refcount) are both dropped. This is a bit more efficient than
1041 : : * doing the two operations separately.
1042 : : */
1043 : : void
9060 1044 : 27688061 : _bt_relbuf(Relation rel, Buffer buf)
1045 : : {
1046 : : /*
1047 : : * Buffer is pinned and locked, which means that it is expected to be
1048 : : * defined and addressable. Check that proactively.
1049 : : */
1050 : : VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
39 andres@anarazel.de 1051 :GNC 27688061 : if (!RelationUsesLocalBuffers(rel))
1052 : : VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
1053 : :
1054 : 27688061 : UnlockReleaseBuffer(buf);
2114 pg@bowt.ie 1055 :CBC 27688061 : }
1056 : :
1057 : : /*
1058 : : * _bt_lockbuf() -- lock a pinned buffer.
1059 : : *
1060 : : * Lock is acquired without acquiring another pin. This is like a raw
1061 : : * LockBuffer() call, but performs extra steps needed by Valgrind.
1062 : : *
1063 : : * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1064 : : * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1065 : : */
1066 : : void
1067 : 31153289 : _bt_lockbuf(Relation rel, Buffer buf, int access)
1068 : : {
1069 : : /* LockBuffer() asserts that pin is held by this backend */
1070 : 31153289 : LockBuffer(buf, access);
1071 : :
1072 : : /*
1073 : : * It doesn't matter that _bt_unlockbuf() won't get called in the event of
1074 : : * an nbtree error (e.g. a unique violation error). That won't cause
1075 : : * Valgrind false positives.
1076 : : *
1077 : : * The nbtree client requests are superimposed on top of the bufmgr.c
1078 : : * buffer pin client requests. In the event of an nbtree error the buffer
1079 : : * will certainly get marked as defined when the backend once again
1080 : : * acquires its first pin on the buffer. (Of course, if the backend never
1081 : : * touches the buffer again then it doesn't matter that it remains
1082 : : * non-accessible to Valgrind.)
1083 : : *
1084 : : * Note: When an IndexTuple C pointer gets computed using an ItemId read
1085 : : * from a page while a lock was held, the C pointer becomes unsafe to
1086 : : * dereference forever as soon as the lock is released. Valgrind can only
1087 : : * detect cases where the pointer gets dereferenced with no _current_
1088 : : * lock/pin held, though.
1089 : : */
1090 : 31153289 : if (!RelationUsesLocalBuffers(rel))
1091 : : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
1092 : 31153289 : }
1093 : :
1094 : : /*
1095 : : * _bt_unlockbuf() -- unlock a pinned buffer.
1096 : : */
1097 : : void
1098 : 3524286 : _bt_unlockbuf(Relation rel, Buffer buf)
1099 : : {
1100 : : /*
1101 : : * Buffer is pinned and locked, which means that it is expected to be
1102 : : * defined and addressable. Check that proactively.
1103 : : */
1104 : : VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
1105 : :
1106 : : /* LockBuffer() asserts that pin is held by this backend */
1107 : 3524286 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1108 : :
1109 : 3524286 : if (!RelationUsesLocalBuffers(rel))
1110 : : VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
1111 : 3524286 : }
1112 : :
1113 : : /*
1114 : : * _bt_conditionallockbuf() -- conditionally BT_WRITE lock pinned
1115 : : * buffer.
1116 : : *
1117 : : * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1118 : : * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1119 : : */
1120 : : bool
1121 : 35546 : _bt_conditionallockbuf(Relation rel, Buffer buf)
1122 : : {
1123 : : /* ConditionalLockBuffer() asserts that pin is held by this backend */
1124 [ + + ]: 35546 : if (!ConditionalLockBuffer(buf))
1125 : 733 : return false;
1126 : :
1127 : 34813 : if (!RelationUsesLocalBuffers(rel))
1128 : : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
1129 : :
1130 : 34813 : return true;
1131 : : }
1132 : :
1133 : : /*
1134 : : * _bt_upgradelockbufcleanup() -- upgrade lock to a full cleanup lock.
1135 : : */
1136 : : void
1137 : 20059 : _bt_upgradelockbufcleanup(Relation rel, Buffer buf)
1138 : : {
1139 : : /*
1140 : : * Buffer is pinned and locked, which means that it is expected to be
1141 : : * defined and addressable. Check that proactively.
1142 : : */
1143 : : VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
1144 : :
1145 : : /* LockBuffer() asserts that pin is held by this backend */
1146 : 20059 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1147 : 20059 : LockBufferForCleanup(buf);
10892 scrappy@hub.org 1148 : 20059 : }
1149 : :
1150 : : /*
1151 : : * _bt_pageinit() -- Initialize a new page.
1152 : : *
1153 : : * On return, the page header is initialized; data space is empty;
1154 : : * special space is zeroed out.
1155 : : */
1156 : : void
1157 : 110543 : _bt_pageinit(Page page, Size size)
1158 : : {
10467 bruce@momjian.us 1159 : 110543 : PageInit(page, size, sizeof(BTPageOpaqueData));
10892 scrappy@hub.org 1160 : 110543 : }
1161 : :
1162 : : /*
1163 : : * Delete item(s) from a btree leaf page during VACUUM.
1164 : : *
1165 : : * This routine assumes that the caller already has a full cleanup lock on
1166 : : * the buffer. Also, the given deletable and updatable arrays *must* be
1167 : : * sorted in ascending order.
1168 : : *
1169 : : * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1170 : : * in an existing posting list item are to be removed. This works by
1171 : : * updating/overwriting an existing item with caller's new version of the item
1172 : : * (a version that lacks the TIDs that are to be deleted).
1173 : : *
1174 : : * We record VACUUMs and b-tree deletes differently in WAL. Deletes must
1175 : : * generate their own snapshotConflictHorizon directly from the tableam,
1176 : : * whereas VACUUMs rely on the initial VACUUM table scan performing
1177 : : * WAL-logging that takes care of the issue for the table's indexes
1178 : : * indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree
1179 : : * deletes don't do.
1180 : : */
1181 : : void
5882 simon@2ndQuadrant.co 1182 : 11042 : _bt_delitems_vacuum(Relation rel, Buffer buf,
1183 : : OffsetNumber *deletable, int ndeletable,
1184 : : BTVacuumPosting *updatable, int nupdatable)
1185 : : {
3667 kgrittn@postgresql.o 1186 : 11042 : Page page = BufferGetPage(buf);
1187 : : BTPageOpaque opaque;
1938 pg@bowt.ie 1188 [ + + + + : 11042 : bool needswal = RelationNeedsWAL(rel);
+ - + - ]
2260 1189 : 11042 : char *updatedbuf = NULL;
1190 : 11042 : Size updatedbuflen = 0;
1191 : : OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
1192 : : XLogRecPtr recptr;
1193 : :
1194 : : /* Shouldn't be called unless there's something to do */
1195 [ + + - + ]: 11042 : Assert(ndeletable > 0 || nupdatable > 0);
1196 : :
1197 : : /* Generate new version of posting lists without deleted TIDs */
1938 1198 [ + + ]: 11042 : if (nupdatable > 0)
1199 : 1511 : updatedbuf = _bt_delitems_update(updatable, nupdatable,
1200 : : updatedoffsets, &updatedbuflen,
1201 : : needswal);
1202 : :
1203 : : /* No ereport(ERROR) until changes are logged */
9244 tgl@sss.pgh.pa.us 1204 : 11042 : START_CRIT_SECTION();
1205 : :
1206 : : /*
1207 : : * Handle posting tuple updates.
1208 : : *
1209 : : * Deliberately do this before handling simple deletes. If we did it the
1210 : : * other way around (i.e. WAL record order -- simple deletes before
1211 : : * updates) then we'd have to make compensating changes to the 'updatable'
1212 : : * array of offset numbers.
1213 : : *
1214 : : * PageIndexTupleOverwrite() won't unset each item's LP_DEAD bit when it
1215 : : * happens to already be set. It's important that we not interfere with
1216 : : * any future simple index tuple deletion operations.
1217 : : */
2260 pg@bowt.ie 1218 [ + + ]: 73045 : for (int i = 0; i < nupdatable; i++)
1219 : : {
1220 : 62003 : OffsetNumber updatedoffset = updatedoffsets[i];
1221 : : IndexTuple itup;
1222 : : Size itemsz;
1223 : :
1224 : 62003 : itup = updatable[i]->itup;
1225 : 62003 : itemsz = MAXALIGN(IndexTupleSize(itup));
190 peter@eisentraut.org 1226 [ - + ]:GNC 62003 : if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
2260 pg@bowt.ie 1227 [ # # ]:UBC 0 : elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1228 : : BufferGetBlockNumber(buf), RelationGetRelationName(rel));
1229 : : }
1230 : :
1231 : : /* Now handle simple deletes of entire tuples */
2260 pg@bowt.ie 1232 [ + + ]:CBC 11042 : if (ndeletable > 0)
1233 : 10712 : PageIndexMultiDelete(page, deletable, ndeletable);
1234 : :
1235 : : /*
1236 : : * We can clear the vacuum cycle ID since this page has certainly been
1237 : : * processed by the current vacuum scan.
1238 : : */
1495 michael@paquier.xyz 1239 : 11042 : opaque = BTPageGetOpaque(page);
7302 tgl@sss.pgh.pa.us 1240 : 11042 : opaque->btpo_cycleid = 0;
1241 : :
1242 : : /*
1243 : : * Clear the BTP_HAS_GARBAGE page flag.
1244 : : *
1245 : : * This flag indicates the presence of LP_DEAD items on the page (though
1246 : : * not reliably). Note that we only rely on it with pg_upgrade'd
1247 : : * !heapkeyspace indexes. That's why clearing it here won't usually
1248 : : * interfere with simple index tuple deletion.
1249 : : */
7224 1250 : 11042 : opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
1251 : :
5278 simon@2ndQuadrant.co 1252 : 11042 : MarkBufferDirty(buf);
1253 : :
1254 : : /* XLOG stuff */
1938 pg@bowt.ie 1255 [ + + ]: 11042 : if (needswal)
1256 : : {
1257 : : xl_btree_vacuum xlrec_vacuum;
1258 : :
2329 1259 : 11041 : xlrec_vacuum.ndeleted = ndeletable;
2260 1260 : 11041 : xlrec_vacuum.nupdated = nupdatable;
1261 : :
4184 heikki.linnakangas@i 1262 : 11041 : XLogBeginInsert();
1263 : 11041 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
448 peter@eisentraut.org 1264 : 11041 : XLogRegisterData(&xlrec_vacuum, SizeOfBtreeVacuum);
1265 : :
2260 pg@bowt.ie 1266 [ + + ]: 11041 : if (ndeletable > 0)
448 peter@eisentraut.org 1267 : 10711 : XLogRegisterBufData(0, deletable,
1268 : : ndeletable * sizeof(OffsetNumber));
1269 : :
2260 pg@bowt.ie 1270 [ + + ]: 11041 : if (nupdatable > 0)
1271 : : {
448 peter@eisentraut.org 1272 : 1511 : XLogRegisterBufData(0, updatedoffsets,
1273 : : nupdatable * sizeof(OffsetNumber));
2260 pg@bowt.ie 1274 : 1511 : XLogRegisterBufData(0, updatedbuf, updatedbuflen);
1275 : : }
1276 : :
4184 heikki.linnakangas@i 1277 : 11041 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM);
1278 : : }
1279 : : else
53 pg@bowt.ie 1280 :GNC 1 : recptr = XLogGetFakeLSN(rel);
1281 : :
1282 : 11042 : PageSetLSN(page, recptr);
1283 : :
5882 simon@2ndQuadrant.co 1284 [ - + ]:CBC 11042 : END_CRIT_SECTION();
1285 : :
1286 : : /* can't leak memory here */
2260 pg@bowt.ie 1287 [ + + ]: 11042 : if (updatedbuf != NULL)
1288 : 1511 : pfree(updatedbuf);
1289 : : /* free tuples allocated within _bt_delitems_update() */
1290 [ + + ]: 73045 : for (int i = 0; i < nupdatable; i++)
1291 : 62003 : pfree(updatable[i]->itup);
5882 simon@2ndQuadrant.co 1292 : 11042 : }
1293 : :
1294 : : /*
1295 : : * Delete item(s) from a btree leaf page during single-page cleanup.
1296 : : *
1297 : : * This routine assumes that the caller has pinned and write locked the
1298 : : * buffer. Also, the given deletable and updatable arrays *must* be sorted in
1299 : : * ascending order.
1300 : : *
1301 : : * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1302 : : * in an existing posting list item are to be removed. This works by
1303 : : * updating/overwriting an existing item with caller's new version of the item
1304 : : * (a version that lacks the TIDs that are to be deleted).
1305 : : *
1306 : : * This is nearly the same as _bt_delitems_vacuum as far as what it does to
1307 : : * the page, but it needs its own snapshotConflictHorizon and isCatalogRel
1308 : : * (from the tableam). This is used by the REDO routine to generate recovery
1309 : : * conflicts. The other difference is that only _bt_delitems_vacuum will
1310 : : * clear page's VACUUM cycle ID.
1311 : : */
1312 : : static void
1060 pg@bowt.ie 1313 : 5508 : _bt_delitems_delete(Relation rel, Buffer buf,
1314 : : TransactionId snapshotConflictHorizon, bool isCatalogRel,
1315 : : OffsetNumber *deletable, int ndeletable,
1316 : : BTVacuumPosting *updatable, int nupdatable)
1317 : : {
3667 kgrittn@postgresql.o 1318 : 5508 : Page page = BufferGetPage(buf);
1319 : : BTPageOpaque opaque;
1938 pg@bowt.ie 1320 [ + - + + : 5508 : bool needswal = RelationNeedsWAL(rel);
+ - + - ]
1321 : 5508 : char *updatedbuf = NULL;
1322 : 5508 : Size updatedbuflen = 0;
1323 : : OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
1324 : : XLogRecPtr recptr;
1325 : :
1326 : : /* Shouldn't be called unless there's something to do */
1327 [ + + - + ]: 5508 : Assert(ndeletable > 0 || nupdatable > 0);
1328 : :
1329 : : /* Generate new versions of posting lists without deleted TIDs */
1330 [ + + ]: 5508 : if (nupdatable > 0)
1331 : 504 : updatedbuf = _bt_delitems_update(updatable, nupdatable,
1332 : : updatedoffsets, &updatedbuflen,
1333 : : needswal);
1334 : :
1335 : : /* No ereport(ERROR) until changes are logged */
5882 simon@2ndQuadrant.co 1336 : 5508 : START_CRIT_SECTION();
1337 : :
1338 : : /* Handle updates and deletes just like _bt_delitems_vacuum */
1938 pg@bowt.ie 1339 [ + + ]: 11495 : for (int i = 0; i < nupdatable; i++)
1340 : : {
1341 : 5987 : OffsetNumber updatedoffset = updatedoffsets[i];
1342 : : IndexTuple itup;
1343 : : Size itemsz;
1344 : :
1345 : 5987 : itup = updatable[i]->itup;
1346 : 5987 : itemsz = MAXALIGN(IndexTupleSize(itup));
190 peter@eisentraut.org 1347 [ - + ]:GNC 5987 : if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
1938 pg@bowt.ie 1348 [ # # ]:UBC 0 : elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1349 : : BufferGetBlockNumber(buf), RelationGetRelationName(rel));
1350 : : }
1351 : :
1938 pg@bowt.ie 1352 [ + + ]:CBC 5508 : if (ndeletable > 0)
1353 : 5454 : PageIndexMultiDelete(page, deletable, ndeletable);
1354 : :
1355 : : /*
1356 : : * Unlike _bt_delitems_vacuum, we *must not* clear the vacuum cycle ID at
1357 : : * this point. The VACUUM command alone controls vacuum cycle IDs.
1358 : : */
1495 michael@paquier.xyz 1359 : 5508 : opaque = BTPageGetOpaque(page);
1360 : :
1361 : : /*
1362 : : * Clear the BTP_HAS_GARBAGE page flag.
1363 : : *
1364 : : * This flag indicates the presence of LP_DEAD items on the page (though
1365 : : * not reliably). Note that we only rely on it with pg_upgrade'd
1366 : : * !heapkeyspace indexes.
1367 : : */
5882 simon@2ndQuadrant.co 1368 : 5508 : opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
1369 : :
1370 : 5508 : MarkBufferDirty(buf);
1371 : :
1372 : : /* XLOG stuff */
1938 pg@bowt.ie 1373 [ + - ]: 5508 : if (needswal)
1374 : : {
1375 : : xl_btree_delete xlrec_delete;
1376 : :
1265 1377 : 5508 : xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon;
2314 1378 : 5508 : xlrec_delete.ndeleted = ndeletable;
1938 1379 : 5508 : xlrec_delete.nupdated = nupdatable;
1060 1380 : 5508 : xlrec_delete.isCatalogRel = isCatalogRel;
1381 : :
4184 heikki.linnakangas@i 1382 : 5508 : XLogBeginInsert();
1383 : 5508 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
448 peter@eisentraut.org 1384 : 5508 : XLogRegisterData(&xlrec_delete, SizeOfBtreeDelete);
1385 : :
1938 pg@bowt.ie 1386 [ + + ]: 5508 : if (ndeletable > 0)
448 peter@eisentraut.org 1387 : 5454 : XLogRegisterBufData(0, deletable,
1388 : : ndeletable * sizeof(OffsetNumber));
1389 : :
1938 pg@bowt.ie 1390 [ + + ]: 5508 : if (nupdatable > 0)
1391 : : {
448 peter@eisentraut.org 1392 : 504 : XLogRegisterBufData(0, updatedoffsets,
1393 : : nupdatable * sizeof(OffsetNumber));
1938 pg@bowt.ie 1394 : 504 : XLogRegisterBufData(0, updatedbuf, updatedbuflen);
1395 : : }
1396 : :
4184 heikki.linnakangas@i 1397 : 5508 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE);
1398 : : }
1399 : : else
53 pg@bowt.ie 1400 :UNC 0 : recptr = XLogGetFakeLSN(rel);
1401 : :
53 pg@bowt.ie 1402 :GNC 5508 : PageSetLSN(page, recptr);
1403 : :
9060 tgl@sss.pgh.pa.us 1404 [ - + ]:CBC 5508 : END_CRIT_SECTION();
1405 : :
1406 : : /* can't leak memory here */
1938 pg@bowt.ie 1407 [ + + ]: 5508 : if (updatedbuf != NULL)
1408 : 504 : pfree(updatedbuf);
1409 : : /* free tuples allocated within _bt_delitems_update() */
1410 [ + + ]: 11495 : for (int i = 0; i < nupdatable; i++)
1411 : 5987 : pfree(updatable[i]->itup);
10892 scrappy@hub.org 1412 : 5508 : }
1413 : :
1414 : : /*
1415 : : * Set up state needed to delete TIDs from posting list tuples via "updating"
1416 : : * the tuple. Performs steps common to both _bt_delitems_vacuum and
1417 : : * _bt_delitems_delete. These steps must take place before each function's
1418 : : * critical section begins.
1419 : : *
1420 : : * updatable and nupdatable are inputs, though note that we will use
1421 : : * _bt_update_posting() to replace the original itup with a pointer to a final
1422 : : * version in palloc()'d memory. Caller should free the tuples when its done.
1423 : : *
1424 : : * The first nupdatable entries from updatedoffsets are set to the page offset
1425 : : * number for posting list tuples that caller updates. This is mostly useful
1426 : : * because caller may need to WAL-log the page offsets (though we always do
1427 : : * this for caller out of convenience).
1428 : : *
1429 : : * Returns buffer consisting of an array of xl_btree_update structs that
1430 : : * describe the steps we perform here for caller (though only when needswal is
1431 : : * true). Also sets *updatedbuflen to the final size of the buffer. This
1432 : : * buffer is used by caller when WAL logging is required.
1433 : : */
1434 : : static char *
1938 pg@bowt.ie 1435 : 2015 : _bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
1436 : : OffsetNumber *updatedoffsets, Size *updatedbuflen,
1437 : : bool needswal)
1438 : : {
1439 : 2015 : char *updatedbuf = NULL;
1440 : 2015 : Size buflen = 0;
1441 : :
1442 : : /* Shouldn't be called unless there's something to do */
1443 [ - + ]: 2015 : Assert(nupdatable > 0);
1444 : :
1445 [ + + ]: 70005 : for (int i = 0; i < nupdatable; i++)
1446 : : {
1447 : 67990 : BTVacuumPosting vacposting = updatable[i];
1448 : : Size itemsz;
1449 : :
1450 : : /* Replace work area IndexTuple with updated version */
1451 : 67990 : _bt_update_posting(vacposting);
1452 : :
1453 : : /* Keep track of size of xl_btree_update for updatedbuf in passing */
1454 : 67990 : itemsz = SizeOfBtreeUpdate + vacposting->ndeletedtids * sizeof(uint16);
1455 : 67990 : buflen += itemsz;
1456 : :
1457 : : /* Build updatedoffsets buffer in passing */
1458 : 67990 : updatedoffsets[i] = vacposting->updatedoffset;
1459 : : }
1460 : :
1461 : : /* XLOG stuff */
1462 [ + - ]: 2015 : if (needswal)
1463 : : {
1464 : 2015 : Size offset = 0;
1465 : :
1466 : : /* Allocate, set final size for caller */
1467 : 2015 : updatedbuf = palloc(buflen);
1468 : 2015 : *updatedbuflen = buflen;
1469 [ + + ]: 70005 : for (int i = 0; i < nupdatable; i++)
1470 : : {
1471 : 67990 : BTVacuumPosting vacposting = updatable[i];
1472 : : Size itemsz;
1473 : : xl_btree_update update;
1474 : :
1475 : 67990 : update.ndeletedtids = vacposting->ndeletedtids;
1476 : 67990 : memcpy(updatedbuf + offset, &update.ndeletedtids,
1477 : : SizeOfBtreeUpdate);
1478 : 67990 : offset += SizeOfBtreeUpdate;
1479 : :
1480 : 67990 : itemsz = update.ndeletedtids * sizeof(uint16);
1481 : 67990 : memcpy(updatedbuf + offset, vacposting->deletetids, itemsz);
1482 : 67990 : offset += itemsz;
1483 : : }
1484 : : }
1485 : :
1486 : 2015 : return updatedbuf;
1487 : : }
1488 : :
1489 : : /*
1490 : : * Comparator used by _bt_delitems_delete_check() to restore deltids array
1491 : : * back to its original leaf-page-wise sort order
1492 : : */
1493 : : static int
1494 : 3145794 : _bt_delitems_cmp(const void *a, const void *b)
1495 : : {
113 peter@eisentraut.org 1496 :GNC 3145794 : const TM_IndexDelete *indexdelete1 = a;
1497 : 3145794 : const TM_IndexDelete *indexdelete2 = b;
1498 : :
809 nathan@postgresql.or 1499 [ - + ]:CBC 3145794 : Assert(indexdelete1->id != indexdelete2->id);
1500 : :
1501 : 3145794 : return pg_cmp_s16(indexdelete1->id, indexdelete2->id);
1502 : : }
1503 : :
1504 : : /*
1505 : : * Try to delete item(s) from a btree leaf page during single-page cleanup.
1506 : : *
1507 : : * nbtree interface to table_index_delete_tuples(). Deletes a subset of index
1508 : : * tuples from caller's deltids array: those whose TIDs are found safe to
1509 : : * delete by the tableam (or already marked LP_DEAD in index, and so already
1510 : : * known to be deletable by our simple index deletion caller). We physically
1511 : : * delete index tuples from buf leaf page last of all (for index tuples where
1512 : : * that is known to be safe following our table_index_delete_tuples() call).
1513 : : *
1514 : : * Simple index deletion caller only includes TIDs from index tuples marked
1515 : : * LP_DEAD, as well as extra TIDs it found on the same leaf page that can be
1516 : : * included without increasing the total number of distinct table blocks for
1517 : : * the deletion operation as a whole. This approach often allows us to delete
1518 : : * some extra index tuples that were practically free for tableam to check in
1519 : : * passing (when they actually turn out to be safe to delete). It probably
1520 : : * only makes sense for the tableam to go ahead with these extra checks when
1521 : : * it is block-oriented (otherwise the checks probably won't be practically
1522 : : * free, which we rely on). The tableam interface requires the tableam side
1523 : : * to handle the problem, though, so this is okay (we as an index AM are free
1524 : : * to make the simplifying assumption that all tableams must be block-based).
1525 : : *
1526 : : * Bottom-up index deletion caller provides all the TIDs from the leaf page,
1527 : : * without expecting that tableam will check most of them. The tableam has
1528 : : * considerable discretion around which entries/blocks it checks. Our role in
1529 : : * costing the bottom-up deletion operation is strictly advisory.
1530 : : *
1531 : : * Note: Caller must have added deltids entries (i.e. entries that go in
1532 : : * delstate's main array) in leaf-page-wise order: page offset number order,
1533 : : * TID order among entries taken from the same posting list tuple (tiebreak on
1534 : : * TID). This order is convenient to work with here.
1535 : : *
1536 : : * Note: We also rely on the id field of each deltids element "capturing" this
1537 : : * original leaf-page-wise order. That is, we expect to be able to get back
1538 : : * to the original leaf-page-wise order just by sorting deltids on the id
1539 : : * field (tableam will sort deltids for its own reasons, so we'll need to put
1540 : : * it back in leaf-page-wise order afterwards).
1541 : : */
1542 : : void
1938 pg@bowt.ie 1543 : 7873 : _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel,
1544 : : TM_IndexDeleteOp *delstate)
1545 : : {
1546 : 7873 : Page page = BufferGetPage(buf);
1547 : : TransactionId snapshotConflictHorizon;
1548 : : bool isCatalogRel;
1549 : 7873 : OffsetNumber postingidxoffnum = InvalidOffsetNumber;
1550 : 7873 : int ndeletable = 0,
1551 : 7873 : nupdatable = 0;
1552 : : OffsetNumber deletable[MaxIndexTuplesPerPage];
1553 : : BTVacuumPosting updatable[MaxIndexTuplesPerPage];
1554 : :
1555 : : /* Use tableam interface to determine which tuples to delete first */
1265 1556 : 7873 : snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate);
1060 1557 [ + + + + : 7873 : isCatalogRel = RelationIsAccessibleInLogicalDecoding(heapRel);
+ - - + -
- - - + +
- + - - -
- - - ]
1558 : :
1559 : : /* Should not WAL-log snapshotConflictHorizon unless it's required */
1265 1560 [ + + ]: 7873 : if (!XLogStandbyInfoActive())
1561 : 1292 : snapshotConflictHorizon = InvalidTransactionId;
1562 : :
1563 : : /*
1564 : : * Construct a leaf-page-wise description of what _bt_delitems_delete()
1565 : : * needs to do to physically delete index tuples from the page.
1566 : : *
1567 : : * Must sort deltids array to restore leaf-page-wise order (original order
1568 : : * before call to tableam). This is the order that the loop expects.
1569 : : *
1570 : : * Note that deltids array might be a lot smaller now. It might even have
1571 : : * no entries at all (with bottom-up deletion caller), in which case there
1572 : : * is nothing left to do.
1573 : : */
1938 1574 : 7873 : qsort(delstate->deltids, delstate->ndeltids, sizeof(TM_IndexDelete),
1575 : : _bt_delitems_cmp);
1576 [ + + ]: 7873 : if (delstate->ndeltids == 0)
1577 : : {
1578 [ - + ]: 2365 : Assert(delstate->bottomup);
1579 : 2365 : return;
1580 : : }
1581 : :
1582 : : /* We definitely have to delete at least one index tuple (or one TID) */
1583 [ + + ]: 464922 : for (int i = 0; i < delstate->ndeltids; i++)
1584 : : {
1585 : 459414 : TM_IndexStatus *dstatus = delstate->status + delstate->deltids[i].id;
1586 : 459414 : OffsetNumber idxoffnum = dstatus->idxoffnum;
1587 : 459414 : ItemId itemid = PageGetItemId(page, idxoffnum);
1588 : 459414 : IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
1589 : : int nestedi,
1590 : : nitem;
1591 : : BTVacuumPosting vacposting;
1592 : :
1593 [ + - + - : 459414 : Assert(OffsetNumberIsValid(idxoffnum));
- + ]
1594 : :
1595 [ + + ]: 459414 : if (idxoffnum == postingidxoffnum)
1596 : : {
1597 : : /*
1598 : : * This deltid entry is a TID from a posting list tuple that has
1599 : : * already been completely processed
1600 : : */
1601 [ - + ]: 18281 : Assert(BTreeTupleIsPosting(itup));
1602 [ - + ]: 18281 : Assert(ItemPointerCompare(BTreeTupleGetHeapTID(itup),
1603 : : &delstate->deltids[i].tid) < 0);
1604 [ - + ]: 18281 : Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(itup),
1605 : : &delstate->deltids[i].tid) >= 0);
1606 : 18281 : continue;
1607 : : }
1608 : :
1609 [ + + ]: 441133 : if (!BTreeTupleIsPosting(itup))
1610 : : {
1611 : : /* Plain non-pivot tuple */
1612 [ - + ]: 425709 : Assert(ItemPointerEquals(&itup->t_tid, &delstate->deltids[i].tid));
1613 [ + + ]: 425709 : if (dstatus->knowndeletable)
1614 : 342733 : deletable[ndeletable++] = idxoffnum;
1615 : 425709 : continue;
1616 : : }
1617 : :
1618 : : /*
1619 : : * itup is a posting list tuple whose lowest deltids entry (which may
1620 : : * or may not be for the first TID from itup) is considered here now.
1621 : : * We should process all of the deltids entries for the posting list
1622 : : * together now, though (not just the lowest). Remember to skip over
1623 : : * later itup-related entries during later iterations of outermost
1624 : : * loop.
1625 : : */
1626 : 15424 : postingidxoffnum = idxoffnum; /* Remember work in outermost loop */
1627 : 15424 : nestedi = i; /* Initialize for first itup deltids entry */
1628 : 15424 : vacposting = NULL; /* Describes final action for itup */
1629 : 15424 : nitem = BTreeTupleGetNPosting(itup);
1630 [ + + ]: 71526 : for (int p = 0; p < nitem; p++)
1631 : : {
1632 : 56102 : ItemPointer ptid = BTreeTupleGetPostingN(itup, p);
1633 : 56102 : int ptidcmp = -1;
1634 : :
1635 : : /*
1636 : : * This nested loop reuses work across ptid TIDs taken from itup.
1637 : : * We take advantage of the fact that both itup's TIDs and deltids
1638 : : * entries (within a single itup/posting list grouping) must both
1639 : : * be in ascending TID order.
1640 : : */
1641 [ + + ]: 80710 : for (; nestedi < delstate->ndeltids; nestedi++)
1642 : : {
1643 : 76923 : TM_IndexDelete *tcdeltid = &delstate->deltids[nestedi];
1644 : 76923 : TM_IndexStatus *tdstatus = (delstate->status + tcdeltid->id);
1645 : :
1646 : : /* Stop once we get past all itup related deltids entries */
1647 [ - + ]: 76923 : Assert(tdstatus->idxoffnum >= idxoffnum);
1648 [ + + ]: 76923 : if (tdstatus->idxoffnum != idxoffnum)
1649 : 15782 : break;
1650 : :
1651 : : /* Skip past non-deletable itup related entries up front */
1652 [ + + ]: 61141 : if (!tdstatus->knowndeletable)
1653 : 6071 : continue;
1654 : :
1655 : : /* Entry is first partial ptid match (or an exact match)? */
1656 : 55070 : ptidcmp = ItemPointerCompare(&tcdeltid->tid, ptid);
1657 [ + + ]: 55070 : if (ptidcmp >= 0)
1658 : : {
1659 : : /* Greater than or equal (partial or exact) match... */
1660 : 36533 : break;
1661 : : }
1662 : : }
1663 : :
1664 : : /* ...exact ptid match to a deletable deltids entry? */
1665 [ + + ]: 56102 : if (ptidcmp != 0)
1666 : 28468 : continue;
1667 : :
1668 : : /* Exact match for deletable deltids entry -- ptid gets deleted */
1669 [ + + ]: 27634 : if (vacposting == NULL)
1670 : : {
1671 : 13531 : vacposting = palloc(offsetof(BTVacuumPostingData, deletetids) +
1672 : : nitem * sizeof(uint16));
1673 : 13531 : vacposting->itup = itup;
1674 : 13531 : vacposting->updatedoffset = idxoffnum;
1675 : 13531 : vacposting->ndeletedtids = 0;
1676 : : }
1677 : 27634 : vacposting->deletetids[vacposting->ndeletedtids++] = p;
1678 : : }
1679 : :
1680 : : /* Final decision on itup, a posting list tuple */
1681 : :
1682 [ + + ]: 15424 : if (vacposting == NULL)
1683 : : {
1684 : : /* No TIDs to delete from itup -- do nothing */
1685 : : }
1686 [ + + ]: 13531 : else if (vacposting->ndeletedtids == nitem)
1687 : : {
1688 : : /* Straight delete of itup (to delete all TIDs) */
1689 : 7544 : deletable[ndeletable++] = idxoffnum;
1690 : : /* Turns out we won't need granular information */
1691 : 7544 : pfree(vacposting);
1692 : : }
1693 : : else
1694 : : {
1695 : : /* Delete some (but not all) TIDs from itup */
1696 [ + - - + ]: 5987 : Assert(vacposting->ndeletedtids > 0 &&
1697 : : vacposting->ndeletedtids < nitem);
1698 : 5987 : updatable[nupdatable++] = vacposting;
1699 : : }
1700 : : }
1701 : :
1702 : : /* Physically delete tuples (or TIDs) using deletable (or updatable) */
1060 1703 : 5508 : _bt_delitems_delete(rel, buf, snapshotConflictHorizon, isCatalogRel,
1704 : : deletable, ndeletable, updatable, nupdatable);
1705 : :
1706 : : /* be tidy */
1938 1707 [ + + ]: 11495 : for (int i = 0; i < nupdatable; i++)
1708 : 5987 : pfree(updatable[i]);
1709 : : }
1710 : :
1711 : : /*
1712 : : * Check that leftsib page (the btpo_prev of target page) is not marked with
1713 : : * INCOMPLETE_SPLIT flag. Used during page deletion.
1714 : : *
1715 : : * Returning true indicates that page flag is set in leftsib (which is
1716 : : * definitely still the left sibling of target). When that happens, the
1717 : : * target doesn't have a downlink in parent, and the page deletion algorithm
1718 : : * isn't prepared to handle that. Deletion of the target page (or the whole
1719 : : * subtree that contains the target page) cannot take place.
1720 : : *
1721 : : * Caller should not have a lock on the target page itself, since pages on the
1722 : : * same level must always be locked left to right to avoid deadlocks.
1723 : : */
1724 : : static bool
1060 1725 : 3787 : _bt_leftsib_splitflag(Relation rel, BlockNumber leftsib, BlockNumber target)
1726 : : {
1727 : : Buffer buf;
1728 : : Page page;
1729 : : BTPageOpaque opaque;
1730 : : bool result;
1731 : :
1732 : : /* Easy case: No left sibling */
2189 1733 [ + + ]: 3787 : if (leftsib == P_NONE)
1734 : 2843 : return false;
1735 : :
1060 1736 : 944 : buf = _bt_getbuf(rel, leftsib, BT_READ);
2189 1737 : 944 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 1738 : 944 : opaque = BTPageGetOpaque(page);
1739 : :
1740 : : /*
1741 : : * If the left sibling was concurrently split, so that its next-pointer
1742 : : * doesn't point to the current page anymore, the split that created
1743 : : * target must be completed. Caller can reasonably expect that there will
1744 : : * be a downlink to the target page that it can relocate using its stack.
1745 : : * (We don't allow splitting an incompletely split page again until the
1746 : : * previous split has been completed.)
1747 : : */
2189 pg@bowt.ie 1748 [ + - - + ]: 944 : result = (opaque->btpo_next == target && P_INCOMPLETE_SPLIT(opaque));
1749 : 944 : _bt_relbuf(rel, buf);
1750 : :
1751 : 944 : return result;
1752 : : }
1753 : :
1754 : : /*
1755 : : * Check that leafrightsib page (the btpo_next of target leaf page) is not
1756 : : * marked with ISHALFDEAD flag. Used during page deletion.
1757 : : *
1758 : : * Returning true indicates that page flag is set in leafrightsib, so page
1759 : : * deletion cannot go ahead. Our caller is not prepared to deal with the case
1760 : : * where the parent page does not have a pivot tuples whose downlink points to
1761 : : * leafrightsib (due to an earlier interrupted VACUUM operation). It doesn't
1762 : : * seem worth going to the trouble of teaching our caller to deal with it.
1763 : : * The situation will be resolved after VACUUM finishes the deletion of the
1764 : : * half-dead page (when a future VACUUM operation reaches the target page
1765 : : * again).
1766 : : *
1767 : : * _bt_leftsib_splitflag() is called for both leaf pages and internal pages.
1768 : : * _bt_rightsib_halfdeadflag() is only called for leaf pages, though. This is
1769 : : * okay because of the restriction on deleting pages that are the rightmost
1770 : : * page of their parent (i.e. that such deletions can only take place when the
1771 : : * entire subtree must be deleted). The leaf level check made here will apply
1772 : : * to a right "cousin" leaf page rather than a simple right sibling leaf page
1773 : : * in cases where caller actually goes on to attempt deleting pages that are
1774 : : * above the leaf page. The right cousin leaf page is representative of the
1775 : : * left edge of the subtree to the right of the to-be-deleted subtree as a
1776 : : * whole, which is exactly the condition that our caller cares about.
1777 : : * (Besides, internal pages are never marked half-dead, so it isn't even
1778 : : * possible to _directly_ assess if an internal page is part of some other
1779 : : * to-be-deleted subtree.)
1780 : : */
1781 : : static bool
1060 1782 : 3660 : _bt_rightsib_halfdeadflag(Relation rel, BlockNumber leafrightsib)
1783 : : {
1784 : : Buffer buf;
1785 : : Page page;
1786 : : BTPageOpaque opaque;
1787 : : bool result;
1788 : :
2189 1789 [ - + ]: 3660 : Assert(leafrightsib != P_NONE);
1790 : :
1060 1791 : 3660 : buf = _bt_getbuf(rel, leafrightsib, BT_READ);
3667 kgrittn@postgresql.o 1792 : 3660 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 1793 : 3660 : opaque = BTPageGetOpaque(page);
1794 : :
2189 pg@bowt.ie 1795 [ + - - + ]: 3660 : Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque));
4363 heikki.linnakangas@i 1796 : 3660 : result = P_ISHALFDEAD(opaque);
1797 : 3660 : _bt_relbuf(rel, buf);
1798 : :
1799 : 3660 : return result;
1800 : : }
1801 : :
1802 : : /*
1803 : : * _bt_pagedel() -- Delete a leaf page from the b-tree, if legal to do so.
1804 : : *
1805 : : * This action unlinks the leaf page from the b-tree structure, removing all
1806 : : * pointers leading to it --- but not touching its own left and right links.
1807 : : * The page cannot be physically reclaimed right away, since other processes
1808 : : * may currently be trying to follow links leading to the page; they have to
1809 : : * be allowed to use its right-link to recover. See nbtree/README.
1810 : : *
1811 : : * On entry, the target buffer must be pinned and locked (either read or write
1812 : : * lock is OK). The page must be an empty leaf page, which may be half-dead
1813 : : * already (a half-dead page should only be passed to us when an earlier
1814 : : * VACUUM operation was interrupted, though). Note in particular that caller
1815 : : * should never pass a buffer containing an existing deleted page here. The
1816 : : * lock and pin on caller's buffer will be dropped before we return.
1817 : : *
1818 : : * Maintains bulk delete stats for caller, which are taken from vstate. We
1819 : : * need to cooperate closely with caller here so that whole VACUUM operation
1820 : : * reliably avoids any double counting of subsidiary-to-leafbuf pages that we
1821 : : * delete in passing. If such pages happen to be from a block number that is
1822 : : * ahead of the current scanblkno position, then caller is expected to count
1823 : : * them directly later on. It's simpler for us to understand caller's
1824 : : * requirements than it would be for caller to understand when or how a
1825 : : * deleted page became deleted after the fact.
1826 : : *
1827 : : * NOTE: this leaks memory. Rather than trying to clean up everything
1828 : : * carefully, it's better to run it in a temp context that can be reset
1829 : : * frequently.
1830 : : */
1831 : : void
1895 pg@bowt.ie 1832 : 3785 : _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
1833 : : {
1834 : : BlockNumber rightsib;
1835 : : bool rightsib_empty;
1836 : : Page page;
1837 : : BTPageOpaque opaque;
1838 : :
1839 : : /*
1840 : : * Save original leafbuf block number from caller. Only deleted blocks
1841 : : * that are <= scanblkno are added to bulk delete stat's pages_deleted
1842 : : * count.
1843 : : */
2195 1844 : 3785 : BlockNumber scanblkno = BufferGetBlockNumber(leafbuf);
1845 : :
1846 : : /*
1847 : : * "stack" is a search stack leading (approximately) to the target page.
1848 : : * It is initially NULL, but when iterating, we keep it to avoid
1849 : : * duplicated search effort.
1850 : : *
1851 : : * Also, when "stack" is not NULL, we have already checked that the
1852 : : * current page is not the right half of an incomplete split, i.e. the
1853 : : * left sibling does not have its INCOMPLETE_SPLIT flag set, including
1854 : : * when the current target page is to the right of caller's initial page
1855 : : * (the scanblkno page).
1856 : : */
4435 heikki.linnakangas@i 1857 : 3785 : BTStack stack = NULL;
1858 : :
1859 : : for (;;)
1860 : : {
2195 pg@bowt.ie 1861 : 7447 : page = BufferGetPage(leafbuf);
1495 michael@paquier.xyz 1862 : 7447 : opaque = BTPageGetOpaque(page);
1863 : :
1864 : : /*
1865 : : * Internal pages are never deleted directly, only as part of deleting
1866 : : * the whole subtree all the way down to leaf level.
1867 : : *
1868 : : * Also check for deleted pages here. Caller never passes us a fully
1869 : : * deleted page. Only VACUUM can delete pages, so there can't have
1870 : : * been a concurrent deletion. Assume that we reached any deleted
1871 : : * page encountered here by following a sibling link, and that the
1872 : : * index is corrupt.
1873 : : */
2195 pg@bowt.ie 1874 [ - + ]: 7447 : Assert(!P_ISDELETED(opaque));
1875 [ + - - + ]: 7447 : if (!P_ISLEAF(opaque) || P_ISDELETED(opaque))
1876 : : {
1877 : : /*
1878 : : * Pre-9.4 page deletion only marked internal pages as half-dead,
1879 : : * but now we only use that flag on leaf pages. The old algorithm
1880 : : * was never supposed to leave half-dead pages in the tree, it was
1881 : : * just a transient state, but it was nevertheless possible in
1882 : : * error scenarios. We don't know how to deal with them here. They
1883 : : * are harmless as far as searches are considered, but inserts
1884 : : * into the deleted keyspace could add out-of-order downlinks in
1885 : : * the upper levels. Log a notice, hopefully the admin will notice
1886 : : * and reindex.
1887 : : */
4435 heikki.linnakangas@i 1888 [ # # ]:UBC 0 : if (P_ISHALFDEAD(opaque))
1889 [ # # ]: 0 : ereport(LOG,
1890 : : (errcode(ERRCODE_INDEX_CORRUPTED),
1891 : : errmsg("index \"%s\" contains a half-dead internal page",
1892 : : RelationGetRelationName(rel)),
1893 : : errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
1894 : :
2195 pg@bowt.ie 1895 [ # # ]: 0 : if (P_ISDELETED(opaque))
1896 [ # # ]: 0 : ereport(LOG,
1897 : : (errcode(ERRCODE_INDEX_CORRUPTED),
1898 : : errmsg_internal("found deleted block %u while following right link from block %u in index \"%s\"",
1899 : : BufferGetBlockNumber(leafbuf),
1900 : : scanblkno,
1901 : : RelationGetRelationName(rel))));
1902 : :
1903 : 0 : _bt_relbuf(rel, leafbuf);
1895 pg@bowt.ie 1904 :CBC 139 : return;
1905 : : }
1906 : :
1907 : : /*
1908 : : * We can never delete rightmost pages nor root pages. While at it,
1909 : : * check that page is empty, since it's possible that the leafbuf page
1910 : : * was empty a moment ago, but has since had some inserts.
1911 : : *
1912 : : * To keep the algorithm simple, we also never delete an incompletely
1913 : : * split page (they should be rare enough that this doesn't make any
1914 : : * meaningful difference to disk usage):
1915 : : *
1916 : : * The INCOMPLETE_SPLIT flag on the page tells us if the page is the
1917 : : * left half of an incomplete split, but ensuring that it's not the
1918 : : * right half is more complicated. For that, we have to check that
1919 : : * the left sibling doesn't have its INCOMPLETE_SPLIT flag set using
1920 : : * _bt_leftsib_splitflag(). On the first iteration, we temporarily
1921 : : * release the lock on scanblkno/leafbuf, check the left sibling, and
1922 : : * construct a search stack to scanblkno. On subsequent iterations,
1923 : : * we know we stepped right from a page that passed these tests, so
1924 : : * it's OK.
1925 : : */
2195 1926 [ + + + - ]: 7447 : if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) ||
4431 heikki.linnakangas@i 1927 [ - + + - ]: 7315 : P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
1928 [ - + ]: 7315 : P_INCOMPLETE_SPLIT(opaque))
1929 : : {
1930 : : /* Should never fail to delete a half-dead page */
4435 1931 [ - + ]: 132 : Assert(!P_ISHALFDEAD(opaque));
1932 : :
2195 pg@bowt.ie 1933 : 132 : _bt_relbuf(rel, leafbuf);
1895 1934 : 132 : return;
1935 : : }
1936 : :
1937 : : /*
1938 : : * First, remove downlink pointing to the page (or a parent of the
1939 : : * page, if we are going to delete a taller subtree), and mark the
1940 : : * leafbuf page half-dead
1941 : : */
4435 heikki.linnakangas@i 1942 [ + + ]: 7315 : if (!P_ISHALFDEAD(opaque))
1943 : : {
1944 : : /*
1945 : : * We need an approximate pointer to the page's parent page. We
1946 : : * use a variant of the standard search mechanism to search for
1947 : : * the page's high key; this will give us a link to either the
1948 : : * current parent or someplace to its left (if there are multiple
1949 : : * equal high keys, which is possible with !heapkeyspace indexes).
1950 : : *
1951 : : * Also check if this is the right-half of an incomplete split
1952 : : * (see comment above).
1953 : : */
1954 [ + + ]: 7314 : if (!stack)
1955 : 3654 : {
1956 : : BTScanInsert itup_key;
1957 : : ItemId itemid;
1958 : : IndexTuple targetkey;
1959 : : BlockNumber leftsib,
1960 : : leafblkno;
1961 : : Buffer sleafbuf;
1962 : :
1963 : 3654 : itemid = PageGetItemId(page, P_HIKEY);
1964 : 3654 : targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
1965 : :
4431 1966 : 3654 : leftsib = opaque->btpo_prev;
2185 pg@bowt.ie 1967 : 3654 : leafblkno = BufferGetBlockNumber(leafbuf);
1968 : :
1969 : : /*
1970 : : * To avoid deadlocks, we'd better drop the leaf page lock
1971 : : * before going further.
1972 : : */
2114 1973 : 3654 : _bt_unlockbuf(rel, leafbuf);
1974 : :
1975 : : /*
1976 : : * Check that the left sibling of leafbuf (if any) is not
1977 : : * marked with INCOMPLETE_SPLIT flag before proceeding
1978 : : */
2185 1979 [ - + ]: 3654 : Assert(leafblkno == scanblkno);
1060 1980 [ - + ]: 3654 : if (_bt_leftsib_splitflag(rel, leftsib, leafblkno))
1981 : : {
2189 pg@bowt.ie 1982 :UBC 0 : ReleaseBuffer(leafbuf);
1895 1983 : 0 : return;
1984 : : }
1985 : :
1986 : : /*
1987 : : * We need an insertion scan key, so build one.
1988 : : *
1989 : : * _bt_search searches for the leaf page that contains any
1990 : : * matching non-pivot tuples, but we need it to "search" for
1991 : : * the high key pivot from the page that we're set to delete.
1992 : : * Compensate for the mismatch by having _bt_search locate the
1993 : : * last position < equal-to-untruncated-prefix non-pivots.
1994 : : */
1060 pg@bowt.ie 1995 :CBC 3654 : itup_key = _bt_mkscankey(rel, targetkey);
1996 : :
1997 : : /* Set up a BTLessStrategyNumber-like insertion scan key */
879 1998 : 3654 : itup_key->nextkey = false;
1999 : 3654 : itup_key->backward = true;
54 pg@bowt.ie 2000 :GNC 3654 : stack = _bt_search(rel, NULL, itup_key, &sleafbuf, BT_READ, true);
2001 : : /* won't need a second lock or pin on leafbuf */
2185 pg@bowt.ie 2002 :CBC 3654 : _bt_relbuf(rel, sleafbuf);
2003 : :
2004 : : /*
2005 : : * Re-lock the leaf page, and start over to use our stack
2006 : : * within _bt_mark_page_halfdead. We must do it that way
2007 : : * because it's possible that leafbuf can no longer be
2008 : : * deleted. We need to recheck.
2009 : : *
2010 : : * Note: We can't simply hold on to the sleafbuf lock instead,
2011 : : * because it's barely possible that sleafbuf is not the same
2012 : : * page as leafbuf. This happens when leafbuf split after our
2013 : : * original lock was dropped, but before _bt_search finished
2014 : : * its descent. We rely on the assumption that we'll find
2015 : : * leafbuf isn't safe to delete anymore in this scenario.
2016 : : * (Page deletion can cope with the stack being to the left of
2017 : : * leafbuf, but not to the right of leafbuf.)
2018 : : */
2114 2019 : 3654 : _bt_lockbuf(rel, leafbuf, BT_WRITE);
4435 heikki.linnakangas@i 2020 : 3654 : continue;
2021 : : }
2022 : :
2023 : : /*
2024 : : * See if it's safe to delete the leaf page, and determine how
2025 : : * many parent/internal pages above the leaf level will be
2026 : : * deleted. If it's safe then _bt_mark_page_halfdead will also
2027 : : * perform the first phase of deletion, which includes marking the
2028 : : * leafbuf page half-dead.
2029 : : */
2195 pg@bowt.ie 2030 [ + - - + ]: 3660 : Assert(P_ISLEAF(opaque) && !P_IGNORE(opaque));
1060 2031 [ + + ]: 3660 : if (!_bt_mark_page_halfdead(rel, vstate->info->heaprel, leafbuf,
2032 : : stack))
2033 : : {
2195 2034 : 7 : _bt_relbuf(rel, leafbuf);
1895 2035 : 7 : return;
2036 : : }
2037 : : }
2038 : : else
2039 : : {
154 heikki.linnakangas@i 2040 :GNC 1 : INJECTION_POINT("nbtree-finish-half-dead-page-vacuum", NULL);
2041 : : }
2042 : :
2043 : : /*
2044 : : * Then unlink it from its siblings. Each call to
2045 : : * _bt_unlink_halfdead_page unlinks the topmost page from the subtree,
2046 : : * making it shallower. Iterate until the leafbuf page is deleted.
2047 : : */
4435 heikki.linnakangas@i 2048 :CBC 3654 : rightsib_empty = false;
2195 pg@bowt.ie 2049 [ + - - + ]: 3654 : Assert(P_ISLEAF(opaque) && P_ISHALFDEAD(opaque));
4435 heikki.linnakangas@i 2050 [ + + ]: 7426 : while (P_ISHALFDEAD(opaque))
2051 : : {
2052 : : /* Check for interrupts in _bt_unlink_halfdead_page */
2195 pg@bowt.ie 2053 [ - + ]: 3773 : if (!_bt_unlink_halfdead_page(rel, leafbuf, scanblkno,
2054 : : &rightsib_empty, vstate))
2055 : : {
2056 : : /*
2057 : : * _bt_unlink_halfdead_page should never fail, since we
2058 : : * established that deletion is generally safe in
2059 : : * _bt_mark_page_halfdead -- index must be corrupt.
2060 : : *
2061 : : * Note that _bt_unlink_halfdead_page already released the
2062 : : * lock and pin on leafbuf for us.
2063 : : */
1916 pg@bowt.ie 2064 :UBC 0 : Assert(false);
2065 : : return;
2066 : : }
2067 : : }
2068 : :
2195 pg@bowt.ie 2069 [ + - - + ]:CBC 3653 : Assert(P_ISLEAF(opaque) && P_ISDELETED(opaque));
2070 : :
4435 heikki.linnakangas@i 2071 : 3653 : rightsib = opaque->btpo_next;
2072 : :
2195 pg@bowt.ie 2073 : 3653 : _bt_relbuf(rel, leafbuf);
2074 : :
2075 : : /*
2076 : : * Check here, as calling loops will have locks held, preventing
2077 : : * interrupts from being processed.
2078 : : */
2862 andres@anarazel.de 2079 [ - + ]: 3653 : CHECK_FOR_INTERRUPTS();
2080 : :
2081 : : /*
2082 : : * The page has now been deleted. If its right sibling is completely
2083 : : * empty, it's possible that the reason we haven't deleted it earlier
2084 : : * is that it was the rightmost child of the parent. Now that we
2085 : : * removed the downlink for this page, the right sibling might now be
2086 : : * the only child of the parent, and could be removed. It would be
2087 : : * picked up by the next vacuum anyway, but might as well try to
2088 : : * remove it now, so loop back to process the right sibling.
2089 : : *
2090 : : * Note: This relies on the assumption that _bt_getstackbuf() will be
2091 : : * able to reuse our original descent stack with a different child
2092 : : * block (provided that the child block is to the right of the
2093 : : * original leaf page reached by _bt_search()). It will even update
2094 : : * the descent stack each time we loop around, avoiding repeated work.
2095 : : */
4435 heikki.linnakangas@i 2096 [ + + ]: 3653 : if (!rightsib_empty)
2097 : 3645 : break;
2098 : :
1060 pg@bowt.ie 2099 : 8 : leafbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
2100 : : }
2101 : : }
2102 : :
2103 : : /*
2104 : : * First stage of page deletion.
2105 : : *
2106 : : * Establish the height of the to-be-deleted subtree with leafbuf at its
2107 : : * lowest level, remove the downlink to the subtree, and mark leafbuf
2108 : : * half-dead. The final to-be-deleted subtree is usually just leafbuf itself,
2109 : : * but may include additional internal pages (at most one per level of the
2110 : : * tree below the root).
2111 : : *
2112 : : * Caller must pass a valid heaprel, since it's just about possible that our
2113 : : * call to _bt_lock_subtree_parent will need to allocate a new index page to
2114 : : * complete a page split. Every call to _bt_allocbuf needs to pass a heaprel.
2115 : : *
2116 : : * Returns 'false' if leafbuf is unsafe to delete, usually because leafbuf is
2117 : : * the rightmost child of its parent (and parent has more than one downlink).
2118 : : * Returns 'true' when the first stage of page deletion completed
2119 : : * successfully.
2120 : : */
2121 : : static bool
1130 andres@anarazel.de 2122 : 3660 : _bt_mark_page_halfdead(Relation rel, Relation heaprel, Buffer leafbuf,
2123 : : BTStack stack)
2124 : : {
2125 : : BlockNumber leafblkno;
2126 : : BlockNumber leafrightsib;
2127 : : BlockNumber topparent;
2128 : : BlockNumber topparentrightsib;
2129 : : ItemId itemid;
2130 : : Page page;
2131 : : BTPageOpaque opaque;
2132 : : Buffer subtreeparent;
2133 : : OffsetNumber poffset;
2134 : : OffsetNumber nextoffset;
2135 : : IndexTuple itup;
2136 : : IndexTupleData trunctuple;
2137 : : XLogRecPtr recptr;
2138 : :
3667 kgrittn@postgresql.o 2139 : 3660 : page = BufferGetPage(leafbuf);
1495 michael@paquier.xyz 2140 : 3660 : opaque = BTPageGetOpaque(page);
2141 : :
2185 pg@bowt.ie 2142 [ + - + - : 3660 : Assert(!P_RIGHTMOST(opaque) && !P_ISROOT(opaque) &&
+ - + - -
+ - + ]
2143 : : P_ISLEAF(opaque) && !P_IGNORE(opaque) &&
2144 : : P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
1060 2145 [ - + ]: 3660 : Assert(heaprel != NULL);
2146 : :
2147 : : /*
2148 : : * Save info about the leaf page.
2149 : : */
4435 heikki.linnakangas@i 2150 : 3660 : leafblkno = BufferGetBlockNumber(leafbuf);
2151 : 3660 : leafrightsib = opaque->btpo_next;
2152 : :
2153 : : /*
2154 : : * Before attempting to lock the parent page, check that the right sibling
2155 : : * is not in half-dead state. A half-dead right sibling would have no
2156 : : * downlink in the parent, which would be highly confusing later when we
2157 : : * delete the downlink. It would fail the "right sibling of target page
2158 : : * is also the next child in parent page" cross-check below.
2159 : : */
1060 pg@bowt.ie 2160 [ - + ]: 3660 : if (_bt_rightsib_halfdeadflag(rel, leafrightsib))
2161 : : {
4363 heikki.linnakangas@i 2162 [ # # ]:UBC 0 : elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
2163 : : leafblkno, leafrightsib);
2164 : 0 : return false;
2165 : : }
2166 : :
2167 : : /*
2168 : : * We cannot delete a page that is the rightmost child of its immediate
2169 : : * parent, unless it is the only child --- in which case the parent has to
2170 : : * be deleted too, and the same condition applies recursively to it. We
2171 : : * have to check this condition all the way up before trying to delete,
2172 : : * and lock the parent of the root of the to-be-deleted subtree (the
2173 : : * "subtree parent"). _bt_lock_subtree_parent() locks the subtree parent
2174 : : * for us. We remove the downlink to the "top parent" page (subtree root
2175 : : * page) from the subtree parent page below.
2176 : : *
2177 : : * Initialize topparent to be leafbuf page now. The final to-be-deleted
2178 : : * subtree is often a degenerate one page subtree consisting only of the
2179 : : * leafbuf page. When that happens, the leafbuf page is the final subtree
2180 : : * root page/top parent page.
2181 : : */
2185 pg@bowt.ie 2182 :CBC 3660 : topparent = leafblkno;
2183 : 3660 : topparentrightsib = leafrightsib;
1130 andres@anarazel.de 2184 [ + + ]: 3660 : if (!_bt_lock_subtree_parent(rel, heaprel, leafblkno, stack,
2185 : : &subtreeparent, &poffset,
2186 : : &topparent, &topparentrightsib))
4435 heikki.linnakangas@i 2187 : 7 : return false;
2188 : :
2185 pg@bowt.ie 2189 : 3653 : page = BufferGetPage(subtreeparent);
1495 michael@paquier.xyz 2190 : 3653 : opaque = BTPageGetOpaque(page);
2191 : :
2192 : : #ifdef USE_ASSERT_CHECKING
2193 : :
2194 : : /*
2195 : : * This is just an assertion because _bt_lock_subtree_parent should have
2196 : : * guaranteed tuple has the expected contents
2197 : : */
2185 pg@bowt.ie 2198 : 3653 : itemid = PageGetItemId(page, poffset);
4435 heikki.linnakangas@i 2199 : 3653 : itup = (IndexTuple) PageGetItem(page, itemid);
2185 pg@bowt.ie 2200 [ - + ]: 3653 : Assert(BTreeTupleGetDownLink(itup) == topparent);
2201 : : #endif
2202 : :
2203 : 3653 : nextoffset = OffsetNumberNext(poffset);
4435 heikki.linnakangas@i 2204 : 3653 : itemid = PageGetItemId(page, nextoffset);
2205 : 3653 : itup = (IndexTuple) PageGetItem(page, itemid);
2206 : :
2207 : : /*
2208 : : * Check that the parent-page index items we're about to delete/overwrite
2209 : : * in subtree parent page contain what we expect. This can fail if the
2210 : : * index has become corrupt for some reason. When that happens we back
2211 : : * out of deletion of the leafbuf subtree. (This is just like the case
2212 : : * where _bt_lock_subtree_parent() cannot "re-find" leafbuf's downlink.)
2213 : : */
2185 pg@bowt.ie 2214 [ - + ]: 3653 : if (BTreeTupleGetDownLink(itup) != topparentrightsib)
2215 : : {
1049 pg@bowt.ie 2216 [ # # ]:UBC 0 : ereport(LOG,
2217 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2218 : : errmsg_internal("right sibling %u of block %u is not next child %u of block %u in index \"%s\"",
2219 : : topparentrightsib, topparent,
2220 : : BTreeTupleGetDownLink(itup),
2221 : : BufferGetBlockNumber(subtreeparent),
2222 : : RelationGetRelationName(rel))));
2223 : :
2224 : 0 : _bt_relbuf(rel, subtreeparent);
2225 : 0 : Assert(false);
2226 : : return false;
2227 : : }
2228 : :
2229 : : /*
2230 : : * Any insert which would have gone on the leaf block will now go to its
2231 : : * right sibling. In other words, the key space moves right.
2232 : : */
4435 heikki.linnakangas@i 2233 :CBC 3653 : PredicateLockPageCombine(rel, leafblkno, leafrightsib);
2234 : :
2235 : : /* No ereport(ERROR) until changes are logged */
2236 : 3653 : START_CRIT_SECTION();
2237 : :
2238 : : /*
2239 : : * Update parent of subtree. We want to delete the downlink to the top
2240 : : * parent page/root of the subtree, and the *following* key. Easiest way
2241 : : * is to copy the right sibling's downlink over the downlink that points
2242 : : * to top parent page, and then delete the right sibling's original pivot
2243 : : * tuple.
2244 : : *
2245 : : * Lanin and Shasha make the key space move left when deleting a page,
2246 : : * whereas the key space moves right here. That's why we cannot simply
2247 : : * delete the pivot tuple with the downlink to the top parent page. See
2248 : : * nbtree/README.
2249 : : */
2185 pg@bowt.ie 2250 : 3653 : page = BufferGetPage(subtreeparent);
1495 michael@paquier.xyz 2251 : 3653 : opaque = BTPageGetOpaque(page);
2252 : :
2185 pg@bowt.ie 2253 : 3653 : itemid = PageGetItemId(page, poffset);
4435 heikki.linnakangas@i 2254 : 3653 : itup = (IndexTuple) PageGetItem(page, itemid);
2185 pg@bowt.ie 2255 : 3653 : BTreeTupleSetDownLink(itup, topparentrightsib);
2256 : :
2257 : 3653 : nextoffset = OffsetNumberNext(poffset);
4435 heikki.linnakangas@i 2258 : 3653 : PageIndexTupleDelete(page, nextoffset);
2259 : :
2260 : : /*
2261 : : * Mark the leaf page as half-dead, and stamp it with a link to the top
2262 : : * parent page. When the leaf page is also the top parent page, the link
2263 : : * is set to InvalidBlockNumber.
2264 : : */
3667 kgrittn@postgresql.o 2265 : 3653 : page = BufferGetPage(leafbuf);
1495 michael@paquier.xyz 2266 : 3653 : opaque = BTPageGetOpaque(page);
4435 heikki.linnakangas@i 2267 : 3653 : opaque->btpo_flags |= BTP_HALF_DEAD;
2268 : :
2457 pg@bowt.ie 2269 [ - + ]: 3653 : Assert(PageGetMaxOffsetNumber(page) == P_HIKEY);
4395 heikki.linnakangas@i 2270 [ + - + - : 7306 : MemSet(&trunctuple, 0, sizeof(IndexTupleData));
+ - + - +
+ ]
2271 : 3653 : trunctuple.t_info = sizeof(IndexTupleData);
2185 pg@bowt.ie 2272 [ + + ]: 3653 : if (topparent != leafblkno)
2273 : 59 : BTreeTupleSetTopParent(&trunctuple, topparent);
2274 : : else
2934 teodor@sigaev.ru 2275 : 3594 : BTreeTupleSetTopParent(&trunctuple, InvalidBlockNumber);
2276 : :
190 peter@eisentraut.org 2277 [ - + ]:GNC 3653 : if (!PageIndexTupleOverwrite(page, P_HIKEY, &trunctuple, IndexTupleSize(&trunctuple)))
2457 pg@bowt.ie 2278 [ # # ]:UBC 0 : elog(ERROR, "could not overwrite high key in half-dead page");
2279 : :
2280 : : /* Must mark buffers dirty before XLogInsert */
2185 pg@bowt.ie 2281 :CBC 3653 : MarkBufferDirty(subtreeparent);
4435 heikki.linnakangas@i 2282 : 3653 : MarkBufferDirty(leafbuf);
2283 : :
2284 : : /* XLOG stuff */
2285 [ + - + + : 3653 : if (RelationNeedsWAL(rel))
+ - + - ]
4435 heikki.linnakangas@i 2286 :GIC 3653 : {
2287 : : xl_btree_mark_page_halfdead xlrec;
2288 : :
2185 pg@bowt.ie 2289 :CBC 3653 : xlrec.poffset = poffset;
4435 heikki.linnakangas@i 2290 : 3653 : xlrec.leafblk = leafblkno;
2185 pg@bowt.ie 2291 [ + + ]: 3653 : if (topparent != leafblkno)
2292 : 59 : xlrec.topparent = topparent;
2293 : : else
4395 heikki.linnakangas@i 2294 : 3594 : xlrec.topparent = InvalidBlockNumber;
2295 : :
4184 2296 : 3653 : XLogBeginInsert();
2297 : 3653 : XLogRegisterBuffer(0, leafbuf, REGBUF_WILL_INIT);
2185 pg@bowt.ie 2298 : 3653 : XLogRegisterBuffer(1, subtreeparent, REGBUF_STANDARD);
2299 : :
3667 kgrittn@postgresql.o 2300 : 3653 : page = BufferGetPage(leafbuf);
1495 michael@paquier.xyz 2301 : 3653 : opaque = BTPageGetOpaque(page);
4435 heikki.linnakangas@i 2302 : 3653 : xlrec.leftblk = opaque->btpo_prev;
2303 : 3653 : xlrec.rightblk = opaque->btpo_next;
2304 : :
448 peter@eisentraut.org 2305 : 3653 : XLogRegisterData(&xlrec, SizeOfBtreeMarkPageHalfDead);
2306 : :
4184 heikki.linnakangas@i 2307 : 3653 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_MARK_PAGE_HALFDEAD);
2308 : : }
2309 : : else
53 pg@bowt.ie 2310 :UNC 0 : recptr = XLogGetFakeLSN(rel);
2311 : :
53 pg@bowt.ie 2312 :GNC 3653 : page = BufferGetPage(subtreeparent);
2313 : 3653 : PageSetLSN(page, recptr);
2314 : 3653 : page = BufferGetPage(leafbuf);
2315 : 3653 : PageSetLSN(page, recptr);
2316 : :
4435 heikki.linnakangas@i 2317 [ - + ]:CBC 3653 : END_CRIT_SECTION();
2318 : :
2185 pg@bowt.ie 2319 : 3653 : _bt_relbuf(rel, subtreeparent);
4435 heikki.linnakangas@i 2320 : 3653 : return true;
2321 : : }
2322 : :
2323 : : /*
2324 : : * Second stage of page deletion.
2325 : : *
2326 : : * Unlinks a single page (in the subtree undergoing deletion) from its
2327 : : * siblings. Also marks the page deleted.
2328 : : *
2329 : : * To get rid of the whole subtree, including the leaf page itself, call here
2330 : : * until the leaf page is deleted. The original "top parent" established in
2331 : : * the first stage of deletion is deleted in the first call here, while the
2332 : : * leaf page is deleted in the last call here. Note that the leaf page itself
2333 : : * is often the initial top parent page.
2334 : : *
2335 : : * Returns 'false' if the page could not be unlinked (shouldn't happen). If
2336 : : * the right sibling of the current target page is empty, *rightsib_empty is
2337 : : * set to true, allowing caller to delete the target's right sibling page in
2338 : : * passing. Note that *rightsib_empty is only actually used by caller when
2339 : : * target page is leafbuf, following last call here for leafbuf/the subtree
2340 : : * containing leafbuf. (We always set *rightsib_empty for caller, just to be
2341 : : * consistent.)
2342 : : *
2343 : : * Must hold pin and lock on leafbuf at entry (read or write doesn't matter).
2344 : : * On success exit, we'll be holding pin and write lock. On failure exit,
2345 : : * we'll release both pin and lock before returning (we define it that way
2346 : : * to avoid having to reacquire a lock we already released).
2347 : : */
2348 : : static bool
2195 pg@bowt.ie 2349 : 3773 : _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
2350 : : bool *rightsib_empty, BTVacState *vstate)
2351 : : {
4435 heikki.linnakangas@i 2352 : 3773 : BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
1895 pg@bowt.ie 2353 : 3773 : IndexBulkDeleteResult *stats = vstate->stats;
2354 : : BlockNumber leafleftsib;
2355 : : BlockNumber leafrightsib;
2356 : : BlockNumber target;
2357 : : BlockNumber leftsib;
2358 : : BlockNumber rightsib;
4435 heikki.linnakangas@i 2359 : 3773 : Buffer lbuf = InvalidBuffer;
2360 : : Buffer buf;
2361 : : Buffer rbuf;
2362 : 3773 : Buffer metabuf = InvalidBuffer;
2363 : 3773 : Page metapg = NULL;
2364 : 3773 : BTMetaPageData *metad = NULL;
2365 : : ItemId itemid;
2366 : : Page page;
2367 : : BTPageOpaque opaque;
2368 : : FullTransactionId safexid;
2369 : : bool rightsib_is_rightmost;
2370 : : uint32 targetlevel;
2371 : : IndexTuple leafhikey;
2372 : : BlockNumber leaftopparent;
2373 : : XLogRecPtr recptr;
2374 : :
3667 kgrittn@postgresql.o 2375 : 3773 : page = BufferGetPage(leafbuf);
1495 michael@paquier.xyz 2376 : 3773 : opaque = BTPageGetOpaque(page);
2377 : :
2185 pg@bowt.ie 2378 [ + - + - : 3773 : Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque) && P_ISHALFDEAD(opaque));
- + ]
2379 : :
2380 : : /*
2381 : : * Remember some information about the leaf page.
2382 : : */
4435 heikki.linnakangas@i 2383 : 3773 : itemid = PageGetItemId(page, P_HIKEY);
2934 teodor@sigaev.ru 2384 : 3773 : leafhikey = (IndexTuple) PageGetItem(page, itemid);
2201 pg@bowt.ie 2385 : 3773 : target = BTreeTupleGetTopParent(leafhikey);
4435 heikki.linnakangas@i 2386 : 3773 : leafleftsib = opaque->btpo_prev;
2387 : 3773 : leafrightsib = opaque->btpo_next;
2388 : :
2114 pg@bowt.ie 2389 : 3773 : _bt_unlockbuf(rel, leafbuf);
2390 : :
154 heikki.linnakangas@i 2391 :GNC 3773 : INJECTION_POINT("nbtree-leave-page-half-dead", NULL);
2392 : :
2393 : : /*
2394 : : * Check here, as calling loops will have locks held, preventing
2395 : : * interrupts from being processed.
2396 : : */
2862 andres@anarazel.de 2397 [ + + ]:CBC 3772 : CHECK_FOR_INTERRUPTS();
2398 : :
2399 : : /* Unlink the current top parent of the subtree */
2201 pg@bowt.ie 2400 [ + + ]: 3772 : if (!BlockNumberIsValid(target))
2401 : : {
2402 : : /* Target is leaf page (or leaf page is top parent, if you prefer) */
2403 : 3653 : target = leafblkno;
2404 : :
2405 : 3653 : buf = leafbuf;
2406 : 3653 : leftsib = leafleftsib;
2407 : 3653 : targetlevel = 0;
2408 : : }
2409 : : else
2410 : : {
2411 : : /* Target is the internal page taken from leaf's top parent link */
3559 tgl@sss.pgh.pa.us 2412 [ - + ]: 119 : Assert(target != leafblkno);
2413 : :
2414 : : /* Fetch the block number of the target's left sibling */
1060 pg@bowt.ie 2415 : 119 : buf = _bt_getbuf(rel, target, BT_READ);
3667 kgrittn@postgresql.o 2416 : 119 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 2417 : 119 : opaque = BTPageGetOpaque(page);
4435 heikki.linnakangas@i 2418 : 119 : leftsib = opaque->btpo_prev;
1896 pg@bowt.ie 2419 : 119 : targetlevel = opaque->btpo_level;
2201 2420 [ - + ]: 119 : Assert(targetlevel > 0);
2421 : :
2422 : : /*
2423 : : * To avoid deadlocks, we'd better drop the target page lock before
2424 : : * going further.
2425 : : */
2114 2426 : 119 : _bt_unlockbuf(rel, buf);
2427 : : }
2428 : :
2429 : : /*
2430 : : * We have to lock the pages we need to modify in the standard order:
2431 : : * moving right, then up. Else we will deadlock against other writers.
2432 : : *
2433 : : * So, first lock the leaf page, if it's not the target. Then find and
2434 : : * write-lock the current left sibling of the target page. The sibling
2435 : : * that was current a moment ago could have split, so we may have to move
2436 : : * right.
2437 : : */
4435 heikki.linnakangas@i 2438 [ + + ]: 3772 : if (target != leafblkno)
2114 pg@bowt.ie 2439 : 119 : _bt_lockbuf(rel, leafbuf, BT_WRITE);
8472 tgl@sss.pgh.pa.us 2440 [ + + ]: 3772 : if (leftsib != P_NONE)
2441 : : {
1060 pg@bowt.ie 2442 : 923 : lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
3667 kgrittn@postgresql.o 2443 : 923 : page = BufferGetPage(lbuf);
1495 michael@paquier.xyz 2444 : 923 : opaque = BTPageGetOpaque(page);
8472 tgl@sss.pgh.pa.us 2445 [ - + - + ]: 923 : while (P_ISDELETED(opaque) || opaque->btpo_next != target)
2446 : : {
1819 tgl@sss.pgh.pa.us 2447 :UBC 0 : bool leftsibvalid = true;
2448 : :
2449 : : /*
2450 : : * Before we follow the link from the page that was the left
2451 : : * sibling mere moments ago, validate its right link. This
2452 : : * reduces the opportunities for loop to fail to ever make any
2453 : : * progress in the presence of index corruption.
2454 : : *
2455 : : * Note: we rely on the assumption that there can only be one
2456 : : * vacuum process running at a time (against the same index).
2457 : : */
1916 pg@bowt.ie 2458 [ # # # # ]: 0 : if (P_RIGHTMOST(opaque) || P_ISDELETED(opaque) ||
2459 [ # # ]: 0 : leftsib == opaque->btpo_next)
2460 : 0 : leftsibvalid = false;
2461 : :
2462 : 0 : leftsib = opaque->btpo_next;
2463 : 0 : _bt_relbuf(rel, lbuf);
2464 : :
2465 [ # # ]: 0 : if (!leftsibvalid)
2466 : : {
2467 : : /*
2468 : : * This is known to fail in the field; sibling link corruption
2469 : : * is relatively common. Press on with vacuuming rather than
2470 : : * just throwing an ERROR.
2471 : : */
2472 [ # # ]: 0 : ereport(LOG,
2473 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2474 : : errmsg_internal("valid left sibling for deletion target could not be located: "
2475 : : "left sibling %u of target %u with leafblkno %u and scanblkno %u on level %u of index \"%s\"",
2476 : : leftsib, target, leafblkno, scanblkno,
2477 : : targetlevel, RelationGetRelationName(rel))));
2478 : :
2479 : : /* Must release all pins and locks on failure exit */
1076 2480 : 0 : ReleaseBuffer(buf);
2481 [ # # ]: 0 : if (target != leafblkno)
2482 : 0 : _bt_relbuf(rel, leafbuf);
2483 : :
4435 heikki.linnakangas@i 2484 : 0 : return false;
2485 : : }
2486 : :
1916 pg@bowt.ie 2487 [ # # ]: 0 : CHECK_FOR_INTERRUPTS();
2488 : :
2489 : : /* step right one page */
1060 2490 : 0 : lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
3667 kgrittn@postgresql.o 2491 : 0 : page = BufferGetPage(lbuf);
1495 michael@paquier.xyz 2492 : 0 : opaque = BTPageGetOpaque(page);
2493 : : }
2494 : : }
2495 : : else
8472 tgl@sss.pgh.pa.us 2496 :CBC 2849 : lbuf = InvalidBuffer;
2497 : :
2498 : : /* Next write-lock the target page itself */
2114 pg@bowt.ie 2499 : 3772 : _bt_lockbuf(rel, buf, BT_WRITE);
3667 kgrittn@postgresql.o 2500 : 3772 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 2501 : 3772 : opaque = BTPageGetOpaque(page);
2502 : :
2503 : : /*
2504 : : * Check page is still empty etc, else abandon deletion. This is just for
2505 : : * paranoia's sake; a half-dead page cannot resurrect because there can be
2506 : : * only one vacuum process running at a time.
2507 : : */
4435 heikki.linnakangas@i 2508 [ + - + - : 3772 : if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
- + ]
1890 pg@bowt.ie 2509 [ # # ]:UBC 0 : elog(ERROR, "target page changed status unexpectedly in block %u of index \"%s\"",
2510 : : target, RelationGetRelationName(rel));
2511 : :
8472 tgl@sss.pgh.pa.us 2512 [ - + ]:CBC 3772 : if (opaque->btpo_prev != leftsib)
2469 peter@eisentraut.org 2513 [ # # ]:UBC 0 : ereport(ERROR,
2514 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2515 : : errmsg_internal("target page left link unexpectedly changed from %u to %u in block %u of index \"%s\"",
2516 : : leftsib, opaque->btpo_prev, target,
2517 : : RelationGetRelationName(rel))));
2518 : :
4435 heikki.linnakangas@i 2519 [ + + ]:CBC 3772 : if (target == leafblkno)
2520 : : {
2521 [ - + + - ]: 3653 : if (P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
2522 [ + - - + ]: 3653 : !P_ISLEAF(opaque) || !P_ISHALFDEAD(opaque))
1890 pg@bowt.ie 2523 [ # # ]:UBC 0 : elog(ERROR, "target leaf page changed status unexpectedly in block %u of index \"%s\"",
2524 : : target, RelationGetRelationName(rel));
2525 : :
2526 : : /* Leaf page is also target page: don't set leaftopparent */
1896 pg@bowt.ie 2527 :CBC 3653 : leaftopparent = InvalidBlockNumber;
2528 : : }
2529 : : else
2530 : : {
2531 : : IndexTuple finaldataitem;
2532 : :
4435 heikki.linnakangas@i 2533 [ - + + - ]: 119 : if (P_FIRSTDATAKEY(opaque) != PageGetMaxOffsetNumber(page) ||
2534 [ - + ]: 119 : P_ISLEAF(opaque))
1890 pg@bowt.ie 2535 [ # # ]:UBC 0 : elog(ERROR, "target internal page on level %u changed status unexpectedly in block %u of index \"%s\"",
2536 : : targetlevel, target, RelationGetRelationName(rel));
2537 : :
2538 : : /* Target is internal: set leaftopparent for next call here... */
4435 heikki.linnakangas@i 2539 [ - + ]:CBC 119 : itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque));
1896 pg@bowt.ie 2540 : 119 : finaldataitem = (IndexTuple) PageGetItem(page, itemid);
2541 : 119 : leaftopparent = BTreeTupleGetDownLink(finaldataitem);
2542 : : /* ...except when it would be a redundant pointer-to-self */
2543 [ + + ]: 119 : if (leaftopparent == leafblkno)
2544 : 59 : leaftopparent = InvalidBlockNumber;
2545 : : }
2546 : :
2547 : : /* No leaftopparent for level 0 (leaf page) or level 1 target */
1890 2548 [ + + - + ]: 3772 : Assert(!BlockNumberIsValid(leaftopparent) || targetlevel > 1);
2549 : :
2550 : : /*
2551 : : * And next write-lock the (current) right sibling.
2552 : : */
8472 tgl@sss.pgh.pa.us 2553 : 3772 : rightsib = opaque->btpo_next;
1060 pg@bowt.ie 2554 : 3772 : rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
3667 kgrittn@postgresql.o 2555 : 3772 : page = BufferGetPage(rbuf);
1495 michael@paquier.xyz 2556 : 3772 : opaque = BTPageGetOpaque(page);
2557 : :
2558 : : /*
2559 : : * Validate target's right sibling page. Its left link must point back to
2560 : : * the target page.
2561 : : */
5728 tgl@sss.pgh.pa.us 2562 [ - + ]: 3772 : if (opaque->btpo_prev != target)
2563 : : {
2564 : : /*
2565 : : * This is known to fail in the field; sibling link corruption is
2566 : : * relatively common. Press on with vacuuming rather than just
2567 : : * throwing an ERROR (same approach used for left-sibling's-right-link
2568 : : * validation check a moment ago).
2569 : : */
1076 pg@bowt.ie 2570 [ # # ]:UBC 0 : ereport(LOG,
2571 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2572 : : errmsg_internal("right sibling's left-link doesn't match: "
2573 : : "right sibling %u of target %u with leafblkno %u "
2574 : : "and scanblkno %u spuriously links to non-target %u "
2575 : : "on level %u of index \"%s\"",
2576 : : rightsib, target, leafblkno,
2577 : : scanblkno, opaque->btpo_prev,
2578 : : targetlevel, RelationGetRelationName(rel))));
2579 : :
2580 : : /* Must release all pins and locks on failure exit */
2581 [ # # ]: 0 : if (BufferIsValid(lbuf))
2582 : 0 : _bt_relbuf(rel, lbuf);
2583 : 0 : _bt_relbuf(rel, rbuf);
2584 : 0 : _bt_relbuf(rel, buf);
2585 [ # # ]: 0 : if (target != leafblkno)
2586 : 0 : _bt_relbuf(rel, leafbuf);
2587 : :
2588 : 0 : return false;
2589 : : }
2590 : :
4435 heikki.linnakangas@i 2591 :CBC 3772 : rightsib_is_rightmost = P_RIGHTMOST(opaque);
2592 [ + + ]: 3772 : *rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
2593 : :
2594 : : /*
2595 : : * If we are deleting the next-to-last page on the target's level, then
2596 : : * the rightsib is a candidate to become the new fast root. (In theory, it
2597 : : * might be possible to push the fast root even further down, but the odds
2598 : : * of doing so are slim, and the locking considerations daunting.)
2599 : : *
2600 : : * We can safely acquire a lock on the metapage here --- see comments for
2601 : : * _bt_newlevel().
2602 : : */
2603 [ + + + + ]: 3772 : if (leftsib == P_NONE && rightsib_is_rightmost)
2604 : : {
3667 kgrittn@postgresql.o 2605 : 36 : page = BufferGetPage(rbuf);
1495 michael@paquier.xyz 2606 : 36 : opaque = BTPageGetOpaque(page);
8472 tgl@sss.pgh.pa.us 2607 [ + - ]: 36 : if (P_RIGHTMOST(opaque))
2608 : : {
2609 : : /* rightsib will be the only one left on the level */
1060 pg@bowt.ie 2610 : 36 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
3667 kgrittn@postgresql.o 2611 : 36 : metapg = BufferGetPage(metabuf);
8472 tgl@sss.pgh.pa.us 2612 : 36 : metad = BTPageGetMeta(metapg);
2613 : :
2614 : : /*
2615 : : * The expected case here is btm_fastlevel == targetlevel+1; if
2616 : : * the fastlevel is <= targetlevel, something is wrong, and we
2617 : : * choose to overwrite it to fix it.
2618 : : */
8310 bruce@momjian.us 2619 [ - + ]: 36 : if (metad->btm_fastlevel > targetlevel + 1)
2620 : : {
2621 : : /* no update wanted */
8472 tgl@sss.pgh.pa.us 2622 :UBC 0 : _bt_relbuf(rel, metabuf);
2623 : 0 : metabuf = InvalidBuffer;
2624 : : }
2625 : : }
2626 : : }
2627 : :
2628 : : /*
2629 : : * Here we begin doing the deletion.
2630 : : */
2631 : :
2632 : : /* No ereport(ERROR) until changes are logged */
8472 tgl@sss.pgh.pa.us 2633 :CBC 3772 : START_CRIT_SECTION();
2634 : :
2635 : : /*
2636 : : * Update siblings' side-links. Note the target page's side-links will
2637 : : * continue to point to the siblings. Asserts here are just rechecking
2638 : : * things we already verified above.
2639 : : */
2640 [ + + ]: 3772 : if (BufferIsValid(lbuf))
2641 : : {
3667 kgrittn@postgresql.o 2642 : 923 : page = BufferGetPage(lbuf);
1495 michael@paquier.xyz 2643 : 923 : opaque = BTPageGetOpaque(page);
8472 tgl@sss.pgh.pa.us 2644 [ - + ]: 923 : Assert(opaque->btpo_next == target);
2645 : 923 : opaque->btpo_next = rightsib;
2646 : : }
3667 kgrittn@postgresql.o 2647 : 3772 : page = BufferGetPage(rbuf);
1495 michael@paquier.xyz 2648 : 3772 : opaque = BTPageGetOpaque(page);
8472 tgl@sss.pgh.pa.us 2649 [ - + ]: 3772 : Assert(opaque->btpo_prev == target);
2650 : 3772 : opaque->btpo_prev = leftsib;
2651 : :
2652 : : /*
2653 : : * If we deleted a parent of the targeted leaf page, instead of the leaf
2654 : : * itself, update the leaf to point to the next remaining child in the
2655 : : * subtree.
2656 : : *
2657 : : * Note: We rely on the fact that a buffer pin on the leaf page has been
2658 : : * held since leafhikey was initialized. This is safe, though only
2659 : : * because the page was already half-dead at that point. The leaf page
2660 : : * cannot have been modified by any other backend during the period when
2661 : : * no lock was held.
2662 : : */
4435 heikki.linnakangas@i 2663 [ + + ]: 3772 : if (target != leafblkno)
1896 pg@bowt.ie 2664 : 119 : BTreeTupleSetTopParent(leafhikey, leaftopparent);
2665 : :
2666 : : /*
2667 : : * Mark the page itself deleted. It can be recycled when all current
2668 : : * transactions are gone. Storing GetTopTransactionId() would work, but
2669 : : * we're in VACUUM and would not otherwise have an XID. Having already
2670 : : * updated links to the target, ReadNextFullTransactionId() suffices as an
2671 : : * upper bound. Any scan having retained a now-stale link is advertising
2672 : : * in its PGPROC an xmin less than or equal to the value we read here. It
2673 : : * will continue to do so, holding back the xmin horizon, for the duration
2674 : : * of that scan.
2675 : : */
3667 kgrittn@postgresql.o 2676 : 3772 : page = BufferGetPage(buf);
1495 michael@paquier.xyz 2677 : 3772 : opaque = BTPageGetOpaque(page);
2195 pg@bowt.ie 2678 [ + + - + ]: 3772 : Assert(P_ISHALFDEAD(opaque) || !P_ISLEAF(opaque));
2679 : :
2680 : : /*
2681 : : * Store upper bound XID that's used to determine when deleted page is no
2682 : : * longer needed as a tombstone
2683 : : */
1896 2684 : 3772 : safexid = ReadNextFullTransactionId();
2685 : 3772 : BTPageSetDeleted(page, safexid);
2686 : 3772 : opaque->btpo_cycleid = 0;
2687 : :
2688 : : /* And update the metapage, if needed */
8472 tgl@sss.pgh.pa.us 2689 [ + + ]: 3772 : if (BufferIsValid(metabuf))
2690 : : {
2691 : : /* upgrade metapage if needed */
2603 pg@bowt.ie 2692 [ - + ]: 36 : if (metad->btm_version < BTREE_NOVAC_VERSION)
2953 teodor@sigaev.ru 2693 :UBC 0 : _bt_upgrademetapage(metapg);
8472 tgl@sss.pgh.pa.us 2694 :CBC 36 : metad->btm_fastroot = rightsib;
2695 : 36 : metad->btm_fastlevel = targetlevel;
7340 2696 : 36 : MarkBufferDirty(metabuf);
2697 : : }
2698 : :
2699 : : /* Must mark buffers dirty before XLogInsert */
2700 : 3772 : MarkBufferDirty(rbuf);
2701 : 3772 : MarkBufferDirty(buf);
2702 [ + + ]: 3772 : if (BufferIsValid(lbuf))
2703 : 923 : MarkBufferDirty(lbuf);
4435 heikki.linnakangas@i 2704 [ + + ]: 3772 : if (target != leafblkno)
2705 : 119 : MarkBufferDirty(leafbuf);
2706 : :
2707 : : /* XLOG stuff */
5622 rhaas@postgresql.org 2708 [ + - + + : 3772 : if (RelationNeedsWAL(rel))
+ - + - ]
8472 tgl@sss.pgh.pa.us 2709 :GIC 3772 : {
2710 : : xl_btree_unlink_page xlrec;
2711 : : xl_btree_metadata xlmeta;
2712 : : uint8 xlinfo;
2713 : :
4184 heikki.linnakangas@i 2714 :CBC 3772 : XLogBeginInsert();
2715 : :
2716 : 3772 : XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
2717 [ + + ]: 3772 : if (BufferIsValid(lbuf))
2718 : 923 : XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
2719 : 3772 : XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD);
2720 [ + + ]: 3772 : if (target != leafblkno)
2721 : 119 : XLogRegisterBuffer(3, leafbuf, REGBUF_WILL_INIT);
2722 : :
2723 : : /* information stored on the target/to-be-unlinked block */
4435 2724 : 3772 : xlrec.leftsib = leftsib;
2725 : 3772 : xlrec.rightsib = rightsib;
1896 pg@bowt.ie 2726 : 3772 : xlrec.level = targetlevel;
2727 : 3772 : xlrec.safexid = safexid;
2728 : :
2729 : : /* information needed to recreate the leaf block (if not the target) */
4435 heikki.linnakangas@i 2730 : 3772 : xlrec.leafleftsib = leafleftsib;
2731 : 3772 : xlrec.leafrightsib = leafrightsib;
1896 pg@bowt.ie 2732 : 3772 : xlrec.leaftopparent = leaftopparent;
2733 : :
448 peter@eisentraut.org 2734 : 3772 : XLogRegisterData(&xlrec, SizeOfBtreeUnlinkPage);
2735 : :
8472 tgl@sss.pgh.pa.us 2736 [ + + ]: 3772 : if (BufferIsValid(metabuf))
2737 : : {
3105 2738 : 36 : XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
2739 : :
2603 pg@bowt.ie 2740 [ - + ]: 36 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
2741 : 36 : xlmeta.version = metad->btm_version;
8472 tgl@sss.pgh.pa.us 2742 : 36 : xlmeta.root = metad->btm_root;
2743 : 36 : xlmeta.level = metad->btm_level;
2744 : 36 : xlmeta.fastroot = metad->btm_fastroot;
2745 : 36 : xlmeta.fastlevel = metad->btm_fastlevel;
1896 pg@bowt.ie 2746 : 36 : xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
2260 2747 : 36 : xlmeta.allequalimage = metad->btm_allequalimage;
2748 : :
448 peter@eisentraut.org 2749 : 36 : XLogRegisterBufData(4, &xlmeta, sizeof(xl_btree_metadata));
4435 heikki.linnakangas@i 2750 : 36 : xlinfo = XLOG_BTREE_UNLINK_PAGE_META;
2751 : : }
2752 : : else
2753 : 3736 : xlinfo = XLOG_BTREE_UNLINK_PAGE;
2754 : :
4184 2755 : 3772 : recptr = XLogInsert(RM_BTREE_ID, xlinfo);
2756 : : }
2757 : : else
53 pg@bowt.ie 2758 :UNC 0 : recptr = XLogGetFakeLSN(rel);
2759 : :
53 pg@bowt.ie 2760 [ + + ]:GNC 3772 : if (BufferIsValid(metabuf))
2761 : 36 : PageSetLSN(metapg, recptr);
2762 : 3772 : page = BufferGetPage(rbuf);
2763 : 3772 : PageSetLSN(page, recptr);
2764 : 3772 : page = BufferGetPage(buf);
2765 : 3772 : PageSetLSN(page, recptr);
2766 [ + + ]: 3772 : if (BufferIsValid(lbuf))
2767 : : {
2768 : 923 : page = BufferGetPage(lbuf);
8472 tgl@sss.pgh.pa.us 2769 :CBC 923 : PageSetLSN(page, recptr);
2770 : : }
53 pg@bowt.ie 2771 [ + + ]:GNC 3772 : if (target != leafblkno)
2772 : : {
2773 : 119 : page = BufferGetPage(leafbuf);
8472 tgl@sss.pgh.pa.us 2774 :CBC 119 : PageSetLSN(page, recptr);
2775 : : }
2776 : :
2777 [ - + ]: 3772 : END_CRIT_SECTION();
2778 : :
2779 : : /* release metapage */
2780 [ + + ]: 3772 : if (BufferIsValid(metabuf))
7340 2781 : 36 : _bt_relbuf(rel, metabuf);
2782 : :
2783 : : /* release siblings */
8472 2784 [ + + ]: 3772 : if (BufferIsValid(lbuf))
7340 2785 : 923 : _bt_relbuf(rel, lbuf);
4435 heikki.linnakangas@i 2786 : 3772 : _bt_relbuf(rel, rbuf);
2787 : :
2788 : : /* If the target is not leafbuf, we're done with it now -- release it */
1896 pg@bowt.ie 2789 [ + + ]: 3772 : if (target != leafblkno)
2790 : 119 : _bt_relbuf(rel, buf);
2791 : :
2792 : : /*
2793 : : * Maintain pages_newly_deleted, which is simply the number of pages
2794 : : * deleted by the ongoing VACUUM operation.
2795 : : *
2796 : : * Maintain pages_deleted in a way that takes into account how
2797 : : * btvacuumpage() will count deleted pages that have yet to become
2798 : : * scanblkno -- only count page when it's not going to get that treatment
2799 : : * later on.
2800 : : */
1895 2801 : 3772 : stats->pages_newly_deleted++;
2195 2802 [ + + ]: 3772 : if (target <= scanblkno)
1895 2803 : 3672 : stats->pages_deleted++;
2804 : :
2805 : : /*
2806 : : * Remember information about the target page (now a newly deleted page)
2807 : : * in dedicated vstate space for later. The page will be considered as a
2808 : : * candidate to place in the FSM at the end of the current btvacuumscan()
2809 : : * call.
2810 : : */
1871 2811 : 3772 : _bt_pendingfsm_add(vstate, target, safexid);
2812 : :
2813 : : /* Success - hold on to lock on leafbuf (might also have been target) */
4435 heikki.linnakangas@i 2814 : 3772 : return true;
2815 : : }
2816 : :
2817 : : /*
2818 : : * Establish how tall the to-be-deleted subtree will be during the first stage
2819 : : * of page deletion.
2820 : : *
2821 : : * Caller's child argument is the block number of the page caller wants to
2822 : : * delete (this is leafbuf's block number, except when we're called
2823 : : * recursively). stack is a search stack leading to it. Note that we will
2824 : : * update the stack entry(s) to reflect current downlink positions --- this is
2825 : : * similar to the corresponding point in page split handling.
2826 : : *
2827 : : * If "first stage" caller cannot go ahead with deleting _any_ pages, returns
2828 : : * false. Returns true on success, in which case caller can use certain
2829 : : * details established here to perform the first stage of deletion. This
2830 : : * function is the last point at which page deletion may be deemed unsafe
2831 : : * (barring index corruption, or unexpected concurrent page deletions).
2832 : : *
2833 : : * We write lock the parent of the root of the to-be-deleted subtree for
2834 : : * caller on success (i.e. we leave our lock on the *subtreeparent buffer for
2835 : : * caller). Caller will have to remove a downlink from *subtreeparent. We
2836 : : * also set a *subtreeparent offset number in *poffset, to indicate the
2837 : : * location of the pivot tuple that contains the relevant downlink.
2838 : : *
2839 : : * The root of the to-be-deleted subtree is called the "top parent". Note
2840 : : * that the leafbuf page is often the final "top parent" page (you can think
2841 : : * of the leafbuf page as a degenerate single page subtree when that happens).
2842 : : * Caller should initialize *topparent to the target leafbuf page block number
2843 : : * (while *topparentrightsib should be set to leafbuf's right sibling block
2844 : : * number). We will update *topparent (and *topparentrightsib) for caller
2845 : : * here, though only when it turns out that caller will delete at least one
2846 : : * internal page (i.e. only when caller needs to store a valid link to the top
2847 : : * parent block in the leafbuf page using BTreeTupleSetTopParent()).
2848 : : */
2849 : : static bool
1130 andres@anarazel.de 2850 : 3793 : _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
2851 : : BTStack stack, Buffer *subtreeparent,
2852 : : OffsetNumber *poffset, BlockNumber *topparent,
2853 : : BlockNumber *topparentrightsib)
2854 : : {
2855 : : BlockNumber parent,
2856 : : leftsibparent;
2857 : : OffsetNumber parentoffset,
2858 : : maxoff;
2859 : : Buffer pbuf;
2860 : : Page page;
2861 : : BTPageOpaque opaque;
2862 : :
2863 : : /*
2864 : : * Locate the pivot tuple whose downlink points to "child". Write lock
2865 : : * the parent page itself.
2866 : : */
2867 : 3793 : pbuf = _bt_getstackbuf(rel, heaprel, stack, child);
2185 pg@bowt.ie 2868 [ - + ]: 3793 : if (pbuf == InvalidBuffer)
2869 : : {
2870 : : /*
2871 : : * Failed to "re-find" a pivot tuple whose downlink matched our child
2872 : : * block number on the parent level -- the index must be corrupt.
2873 : : * Don't even try to delete the leafbuf subtree. Just report the
2874 : : * issue and press on with vacuuming the index.
2875 : : *
2876 : : * Note: _bt_getstackbuf() recovers from concurrent page splits that
2877 : : * take place on the parent level. Its approach is a near-exhaustive
2878 : : * linear search. This also gives it a surprisingly good chance of
2879 : : * recovering in the event of a buggy or inconsistent opclass. But we
2880 : : * don't rely on that here.
2881 : : */
1869 pg@bowt.ie 2882 [ # # ]:UBC 0 : ereport(LOG,
2883 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2884 : : errmsg_internal("failed to re-find parent key in index \"%s\" for deletion target page %u",
2885 : : RelationGetRelationName(rel), child)));
1049 2886 : 0 : Assert(false);
2887 : : return false;
2888 : : }
2889 : :
2185 pg@bowt.ie 2890 :CBC 3793 : parent = stack->bts_blkno;
2891 : 3793 : parentoffset = stack->bts_offset;
2892 : :
2893 : 3793 : page = BufferGetPage(pbuf);
1495 michael@paquier.xyz 2894 : 3793 : opaque = BTPageGetOpaque(page);
2185 pg@bowt.ie 2895 : 3793 : maxoff = PageGetMaxOffsetNumber(page);
2896 : 3793 : leftsibparent = opaque->btpo_prev;
2897 : :
2898 : : /*
2899 : : * _bt_getstackbuf() completes page splits on returned parent buffer when
2900 : : * required.
2901 : : *
2902 : : * In general it's a bad idea for VACUUM to use up more disk space, which
2903 : : * is why page deletion does not finish incomplete page splits most of the
2904 : : * time. We allow this limited exception because the risk is much lower,
2905 : : * and the potential downside of not proceeding is much higher: A single
2906 : : * internal page with the INCOMPLETE_SPLIT flag set might otherwise
2907 : : * prevent us from deleting hundreds of empty leaf pages from one level
2908 : : * down.
2909 : : */
2910 [ - + ]: 3793 : Assert(!P_INCOMPLETE_SPLIT(opaque));
2911 : :
2912 [ + + ]: 3793 : if (parentoffset < maxoff)
2913 : : {
2914 : : /*
2915 : : * Child is not the rightmost child in parent, so it's safe to delete
2916 : : * the subtree whose root/topparent is child page
2917 : : */
2918 : 3653 : *subtreeparent = pbuf;
2919 : 3653 : *poffset = parentoffset;
2920 : 3653 : return true;
2921 : : }
2922 : :
2923 : : /*
2924 : : * Child is the rightmost child of parent.
2925 : : *
2926 : : * Since it's the rightmost child of parent, deleting the child (or
2927 : : * deleting the subtree whose root/topparent is the child page) is only
2928 : : * safe when it's also possible to delete the parent.
2929 : : */
2930 [ - + ]: 140 : Assert(parentoffset == maxoff);
2931 [ - + + + : 140 : if (parentoffset != P_FIRSTDATAKEY(opaque) || P_RIGHTMOST(opaque))
- + ]
2932 : : {
2933 : : /*
2934 : : * Child isn't parent's only child, or parent is rightmost on its
2935 : : * entire level. Definitely cannot delete any pages.
2936 : : */
2937 : 7 : _bt_relbuf(rel, pbuf);
2938 : 7 : return false;
2939 : : }
2940 : :
2941 : : /*
2942 : : * Now make sure that the parent deletion is itself safe by examining the
2943 : : * child's grandparent page. Recurse, passing the parent page as the
2944 : : * child page (child's grandparent is the parent on the next level up). If
2945 : : * parent deletion is unsafe, then child deletion must also be unsafe (in
2946 : : * which case caller cannot delete any pages at all).
2947 : : */
2948 : 133 : *topparent = parent;
2949 : 133 : *topparentrightsib = opaque->btpo_next;
2950 : :
2951 : : /*
2952 : : * Release lock on parent before recursing.
2953 : : *
2954 : : * It's OK to release page locks on parent before recursive call locks
2955 : : * grandparent. An internal page can only acquire an entry if the child
2956 : : * is split, but that cannot happen as long as we still hold a lock on the
2957 : : * leafbuf page.
2958 : : */
2959 : 133 : _bt_relbuf(rel, pbuf);
2960 : :
2961 : : /*
2962 : : * Before recursing, check that the left sibling of parent (if any) is not
2963 : : * marked with INCOMPLETE_SPLIT flag first (must do so after we drop the
2964 : : * parent lock).
2965 : : *
2966 : : * Note: We deliberately avoid completing incomplete splits here.
2967 : : */
1060 2968 [ - + ]: 133 : if (_bt_leftsib_splitflag(rel, leftsibparent, parent))
2185 pg@bowt.ie 2969 :UBC 0 : return false;
2970 : :
2971 : : /* Recurse to examine child page's grandparent page */
1130 andres@anarazel.de 2972 :CBC 133 : return _bt_lock_subtree_parent(rel, heaprel, parent, stack->bts_parent,
2973 : : subtreeparent, poffset,
2974 : : topparent, topparentrightsib);
2975 : : }
2976 : :
2977 : : /*
2978 : : * Initialize local memory state used by VACUUM for _bt_pendingfsm_finalize
2979 : : * optimization.
2980 : : *
2981 : : * Called at the start of a btvacuumscan(). Caller's cleanuponly argument
2982 : : * indicates if ongoing VACUUM has not (and will not) call btbulkdelete().
2983 : : *
2984 : : * We expect to allocate memory inside VACUUM's top-level memory context here.
2985 : : * The working buffer is subject to a limit based on work_mem. Our strategy
2986 : : * when the array can no longer grow within the bounds of that limit is to
2987 : : * stop saving additional newly deleted pages, while proceeding as usual with
2988 : : * the pages that we can fit.
2989 : : */
2990 : : void
1871 pg@bowt.ie 2991 : 2065 : _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
2992 : : {
2993 : : Size maxbufsize;
2994 : :
2995 : : /*
2996 : : * Don't bother with optimization in cleanup-only case -- we don't expect
2997 : : * any newly deleted pages. Besides, cleanup-only calls to btvacuumscan()
2998 : : * can only take place because this optimization didn't work out during
2999 : : * the last VACUUM.
3000 : : */
3001 [ + + ]: 2065 : if (cleanuponly)
3002 : 9 : return;
3003 : :
3004 : : /*
3005 : : * Cap maximum size of array so that we always respect work_mem. Avoid
3006 : : * int overflow here.
3007 : : */
3008 : 2056 : vstate->bufsize = 256;
459 tgl@sss.pgh.pa.us 3009 : 2056 : maxbufsize = (work_mem * (Size) 1024) / sizeof(BTPendingFSM);
1871 pg@bowt.ie 3010 : 2056 : maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
3011 : : /* BTVacState.maxbufsize has type int */
459 tgl@sss.pgh.pa.us 3012 : 2056 : maxbufsize = Min(maxbufsize, INT_MAX);
3013 : : /* Stay sane with small work_mem */
1871 pg@bowt.ie 3014 : 2056 : maxbufsize = Max(maxbufsize, vstate->bufsize);
459 tgl@sss.pgh.pa.us 3015 : 2056 : vstate->maxbufsize = (int) maxbufsize;
3016 : :
3017 : : /* Allocate buffer, indicate that there are currently 0 pending pages */
146 michael@paquier.xyz 3018 :GNC 2056 : vstate->pendingpages = palloc_array(BTPendingFSM, vstate->bufsize);
1871 pg@bowt.ie 3019 :CBC 2056 : vstate->npendingpages = 0;
3020 : : }
3021 : :
3022 : : /*
3023 : : * Place any newly deleted pages (i.e. pages that _bt_pagedel() deleted during
3024 : : * the ongoing VACUUM operation) into the free space map -- though only when
3025 : : * it is actually safe to do so by now.
3026 : : *
3027 : : * Called at the end of a btvacuumscan(), just before free space map vacuuming
3028 : : * takes place.
3029 : : *
3030 : : * Frees memory allocated by _bt_pendingfsm_init(), if any.
3031 : : */
3032 : : void
3033 : 2063 : _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
3034 : : {
3035 : 2063 : IndexBulkDeleteResult *stats = vstate->stats;
1082 tgl@sss.pgh.pa.us 3036 : 2063 : Relation heaprel = vstate->info->heaprel;
3037 : :
1871 pg@bowt.ie 3038 [ - + ]: 2063 : Assert(stats->pages_newly_deleted >= vstate->npendingpages);
1060 3039 [ - + ]: 2063 : Assert(heaprel != NULL);
3040 : :
1871 3041 [ + + ]: 2063 : if (vstate->npendingpages == 0)
3042 : : {
3043 : : /* Just free memory when nothing to do */
3044 [ + + ]: 1944 : if (vstate->pendingpages)
3045 : 1935 : pfree(vstate->pendingpages);
3046 : :
3047 : 1944 : return;
3048 : : }
3049 : :
3050 : : #ifdef DEBUG_BTREE_PENDING_FSM
3051 : :
3052 : : /*
3053 : : * Debugging aid: Sleep for 5 seconds to greatly increase the chances of
3054 : : * placing pending pages in the FSM. Note that the optimization will
3055 : : * never be effective without some other backend concurrently consuming an
3056 : : * XID.
3057 : : */
3058 : : pg_usleep(5000000L);
3059 : : #endif
3060 : :
3061 : : /*
3062 : : * Recompute VACUUM XID boundaries.
3063 : : *
3064 : : * We don't actually care about the oldest non-removable XID. Computing
3065 : : * the oldest such XID has a useful side-effect that we rely on: it
3066 : : * forcibly updates the XID horizon state for this backend. This step is
3067 : : * essential; GlobalVisCheckRemovableFullXid() will not reliably recognize
3068 : : * that it is now safe to recycle newly deleted pages without this step.
3069 : : */
1128 3070 : 119 : GetOldestNonRemovableTransactionId(heaprel);
3071 : :
1871 3072 [ + + ]: 200 : for (int i = 0; i < vstate->npendingpages; i++)
3073 : : {
3074 : 198 : BlockNumber target = vstate->pendingpages[i].target;
3075 : 198 : FullTransactionId safexid = vstate->pendingpages[i].safexid;
3076 : :
3077 : : /*
3078 : : * Do the equivalent of checking BTPageIsRecyclable(), but without
3079 : : * accessing the page again a second time.
3080 : : *
3081 : : * Give up on finding the first non-recyclable page -- all later pages
3082 : : * must be non-recyclable too, since _bt_pendingfsm_add() adds pages
3083 : : * to the array in safexid order.
3084 : : */
1128 3085 [ + + ]: 198 : if (!GlobalVisCheckRemovableFullXid(heaprel, safexid))
1871 3086 : 117 : break;
3087 : :
3088 : 81 : RecordFreeIndexPage(rel, target);
3089 : 81 : stats->pages_free++;
3090 : : }
3091 : :
3092 : 119 : pfree(vstate->pendingpages);
3093 : : }
3094 : :
3095 : : /*
3096 : : * Maintain array of pages that were deleted during current btvacuumscan()
3097 : : * call, for use in _bt_pendingfsm_finalize()
3098 : : */
3099 : : static void
3100 : 3772 : _bt_pendingfsm_add(BTVacState *vstate,
3101 : : BlockNumber target,
3102 : : FullTransactionId safexid)
3103 : : {
3104 [ - + ]: 3772 : Assert(vstate->npendingpages <= vstate->bufsize);
3105 [ - + ]: 3772 : Assert(vstate->bufsize <= vstate->maxbufsize);
3106 : :
3107 : : #ifdef USE_ASSERT_CHECKING
3108 : :
3109 : : /*
3110 : : * Verify an assumption made by _bt_pendingfsm_finalize(): pages from the
3111 : : * array will always be in safexid order (since that is the order that we
3112 : : * save them in here)
3113 : : */
3114 [ + + ]: 3772 : if (vstate->npendingpages > 0)
3115 : : {
3116 : 3653 : FullTransactionId lastsafexid =
1082 tgl@sss.pgh.pa.us 3117 : 3653 : vstate->pendingpages[vstate->npendingpages - 1].safexid;
3118 : :
1871 pg@bowt.ie 3119 [ - + ]: 3653 : Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
3120 : : }
3121 : : #endif
3122 : :
3123 : : /*
3124 : : * If temp buffer reaches maxbufsize/work_mem capacity then we discard
3125 : : * information about this page.
3126 : : *
3127 : : * Note that this also covers the case where we opted to not use the
3128 : : * optimization in _bt_pendingfsm_init().
3129 : : */
3130 [ - + ]: 3772 : if (vstate->npendingpages == vstate->maxbufsize)
1871 pg@bowt.ie 3131 :UBC 0 : return;
3132 : :
3133 : : /* Consider enlarging buffer */
1871 pg@bowt.ie 3134 [ + + ]:CBC 3772 : if (vstate->npendingpages == vstate->bufsize)
3135 : : {
3136 : 5 : int newbufsize = vstate->bufsize * 2;
3137 : :
3138 : : /* Respect work_mem */
3139 [ - + ]: 5 : if (newbufsize > vstate->maxbufsize)
1871 pg@bowt.ie 3140 :UBC 0 : newbufsize = vstate->maxbufsize;
3141 : :
1871 pg@bowt.ie 3142 :CBC 5 : vstate->bufsize = newbufsize;
3143 : 5 : vstate->pendingpages =
3144 : 5 : repalloc(vstate->pendingpages,
3145 : 5 : sizeof(BTPendingFSM) * vstate->bufsize);
3146 : : }
3147 : :
3148 : : /* Save metadata for newly deleted page */
3149 : 3772 : vstate->pendingpages[vstate->npendingpages].target = target;
3150 : 3772 : vstate->pendingpages[vstate->npendingpages].safexid = safexid;
3151 : 3772 : vstate->npendingpages++;
3152 : : }
|