Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nbtinsert.c
4 : : * Item insertion in Lehman and Yao btrees for Postgres.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/nbtree/nbtinsert.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : :
16 : : #include "postgres.h"
17 : :
18 : : #include "access/nbtree.h"
19 : : #include "access/nbtxlog.h"
20 : : #include "access/tableam.h"
21 : : #include "access/transam.h"
22 : : #include "access/xloginsert.h"
23 : : #include "common/int.h"
24 : : #include "common/pg_prng.h"
25 : : #include "lib/qunique.h"
26 : : #include "miscadmin.h"
27 : : #include "storage/lmgr.h"
28 : : #include "storage/predicate.h"
29 : :
30 : : /* Minimum tree height for application of fastpath optimization */
31 : : #define BTREE_FASTPATH_MIN_LEVEL 2
32 : :
33 : :
34 : : static BTStack _bt_search_insert(Relation rel, Relation heaprel,
35 : : BTInsertState insertstate);
36 : : static TransactionId _bt_check_unique(Relation rel, BTInsertState insertstate,
37 : : Relation heapRel,
38 : : IndexUniqueCheck checkUnique, bool *is_unique,
39 : : uint32 *speculativeToken);
40 : : static OffsetNumber _bt_findinsertloc(Relation rel,
41 : : BTInsertState insertstate,
42 : : bool checkingunique,
43 : : bool indexUnchanged,
44 : : BTStack stack,
45 : : Relation heapRel);
46 : : static void _bt_stepright(Relation rel, Relation heaprel,
47 : : BTInsertState insertstate, BTStack stack);
48 : : static void _bt_insertonpg(Relation rel, Relation heaprel, BTScanInsert itup_key,
49 : : Buffer buf,
50 : : Buffer cbuf,
51 : : BTStack stack,
52 : : IndexTuple itup,
53 : : Size itemsz,
54 : : OffsetNumber newitemoff,
55 : : int postingoff,
56 : : bool split_only_page);
57 : : static Buffer _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key,
58 : : Buffer buf, Buffer cbuf, OffsetNumber newitemoff,
59 : : Size newitemsz, IndexTuple newitem, IndexTuple orignewitem,
60 : : IndexTuple nposting, uint16 postingoff);
61 : : static void _bt_insert_parent(Relation rel, Relation heaprel, Buffer buf,
62 : : Buffer rbuf, BTStack stack, bool isroot, bool isonly);
63 : : static Buffer _bt_newlevel(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf);
64 : : static inline bool _bt_pgaddtup(Page page, Size itemsize, IndexTuple itup,
65 : : OffsetNumber itup_off, bool newfirstdataitem);
66 : : static void _bt_delete_or_dedup_one_page(Relation rel, Relation heapRel,
67 : : BTInsertState insertstate,
68 : : bool simpleonly, bool checkingunique,
69 : : bool uniquedup, bool indexUnchanged);
70 : : static void _bt_simpledel_pass(Relation rel, Buffer buffer, Relation heapRel,
71 : : OffsetNumber *deletable, int ndeletable,
72 : : IndexTuple newitem, OffsetNumber minoff,
73 : : OffsetNumber maxoff);
74 : : static BlockNumber *_bt_deadblocks(Page page, OffsetNumber *deletable,
75 : : int ndeletable, IndexTuple newitem,
76 : : int *nblocks);
77 : : static inline int _bt_blk_cmp(const void *arg1, const void *arg2);
78 : :
79 : : /*
80 : : * _bt_doinsert() -- Handle insertion of a single index tuple in the tree.
81 : : *
82 : : * This routine is called by the public interface routine, btinsert.
83 : : * By here, itup is filled in, including the TID.
84 : : *
85 : : * If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
86 : : * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
87 : : * UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
88 : : * For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
89 : : * don't actually insert.
90 : : *
91 : : * indexUnchanged executor hint indicates if itup is from an
92 : : * UPDATE that didn't logically change the indexed value, but
93 : : * must nevertheless have a new entry to point to a successor
94 : : * version.
95 : : *
96 : : * The result value is only significant for UNIQUE_CHECK_PARTIAL:
97 : : * it must be true if the entry is known unique, else false.
98 : : * (In the current implementation we'll also return true after a
99 : : * successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
100 : : * that's just a coding artifact.)
101 : : */
102 : : bool
7164 tgl@sss.pgh.pa.us 103 :CBC 3648250 : _bt_doinsert(Relation rel, IndexTuple itup,
104 : : IndexUniqueCheck checkUnique, bool indexUnchanged,
105 : : Relation heapRel)
106 : : {
5883 107 : 3648250 : bool is_unique = false;
108 : : BTInsertStateData insertstate;
109 : : BTScanInsert itup_key;
110 : : BTStack stack;
2362 pg@bowt.ie 111 : 3648250 : bool checkingunique = (checkUnique != UNIQUE_CHECK_NO);
112 : :
113 : : /* we need an insertion scan key to do our search, so build one */
819 114 : 3648250 : itup_key = _bt_mkscankey(rel, itup);
115 : :
2328 116 [ + + ]: 3648250 : if (checkingunique)
117 : : {
118 [ + + ]: 2644433 : if (!itup_key->anynullkeys)
119 : : {
120 : : /* No (heapkeyspace) scantid until uniqueness established */
121 : 2634346 : itup_key->scantid = NULL;
122 : : }
123 : : else
124 : : {
125 : : /*
126 : : * Scan key for new tuple contains NULL key values. Bypass
127 : : * checkingunique steps. They are unnecessary because core code
128 : : * considers NULL unequal to every value, including NULL.
129 : : *
130 : : * This optimization avoids O(N^2) behavior within the
131 : : * _bt_findinsertloc() heapkeyspace path when a unique index has a
132 : : * large number of "duplicates" with NULL key values.
133 : : */
134 : 10087 : checkingunique = false;
135 : : /* Tuple is unique in the sense that core code cares about */
136 [ - + ]: 10087 : Assert(checkUnique != UNIQUE_CHECK_EXISTING);
137 : 10087 : is_unique = true;
138 : : }
139 : : }
140 : :
141 : : /*
142 : : * Fill in the BTInsertState working area, to track the current page and
143 : : * position within the page to insert on.
144 : : *
145 : : * Note that itemsz is passed down to lower level code that deals with
146 : : * inserting the item. It must be MAXALIGN()'d. This ensures that space
147 : : * accounting code consistently considers the alignment overhead that we
148 : : * expect PageAddItem() will add later. (Actually, index_form_tuple() is
149 : : * already conservative about alignment, but we don't rely on that from
150 : : * this distance. Besides, preserving the "true" tuple size in index
151 : : * tuple headers for the benefit of nbtsplitloc.c might happen someday.
152 : : * Note that heapam does not MAXALIGN() each heap tuple's lp_len field.)
153 : : */
2362 154 : 3648250 : insertstate.itup = itup;
155 : 3648250 : insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
156 : 3648250 : insertstate.itup_key = itup_key;
157 : 3648250 : insertstate.bounds_valid = false;
158 : 3648250 : insertstate.buf = InvalidBuffer;
2019 159 : 3648250 : insertstate.postingoff = 0;
160 : :
1998 161 : 3648262 : search:
162 : :
163 : : /*
164 : : * Find and lock the leaf page that the tuple should be added to by
165 : : * searching from the root page. insertstate.buf will hold a buffer that
166 : : * is locked in exclusive mode afterwards.
167 : : */
889 andres@anarazel.de 168 : 3648262 : stack = _bt_search_insert(rel, heapRel, &insertstate);
169 : :
170 : : /*
171 : : * checkingunique inserts are not allowed to go ahead when two tuples with
172 : : * equal key attribute values would be visible to new MVCC snapshots once
173 : : * the xact commits. Check for conflicts in the locked page/buffer (if
174 : : * needed) here.
175 : : *
176 : : * It might be necessary to check a page to the right in _bt_check_unique,
177 : : * though that should be very rare. In practice the first page the value
178 : : * could be on (with scantid omitted) is almost always also the only page
179 : : * that a matching tuple might be found on. This is due to the behavior
180 : : * of _bt_findsplitloc with duplicate tuples -- a group of duplicates can
181 : : * only be allowed to cross a page boundary when there is no candidate
182 : : * leaf page split point that avoids it. Also, _bt_check_unique can use
183 : : * the leaf page high key to determine that there will be no duplicates on
184 : : * the right sibling without actually visiting it (it uses the high key in
185 : : * cases where the new item happens to belong at the far right of the leaf
186 : : * page).
187 : : *
188 : : * NOTE: obviously, _bt_check_unique can only detect keys that are already
189 : : * in the index; so it cannot defend against concurrent insertions of the
190 : : * same key. We protect against that by means of holding a write lock on
191 : : * the first page the value could be on, with omitted/-inf value for the
192 : : * implicit heap TID tiebreaker attribute. Any other would-be inserter of
193 : : * the same key must acquire a write lock on the same page, so only one
194 : : * would-be inserter can be making the check at one time. Furthermore,
195 : : * once we are past the check we hold write locks continuously until we
196 : : * have performed our insertion, so no later inserter can fail to see our
197 : : * insertion. (This requires some care in _bt_findinsertloc.)
198 : : *
199 : : * If we must wait for another xact, we release the lock while waiting,
200 : : * and then must perform a new search.
201 : : *
202 : : * For a partial uniqueness check, we don't wait for the other xact. Just
203 : : * let the tuple in and return false for possibly non-unique, or true for
204 : : * definitely unique.
205 : : */
2362 pg@bowt.ie 206 [ + + ]: 3648262 : if (checkingunique)
207 : : {
208 : : TransactionId xwait;
209 : : uint32 speculativeToken;
210 : :
211 : 2634358 : xwait = _bt_check_unique(rel, &insertstate, heapRel, checkUnique,
212 : : &is_unique, &speculativeToken);
213 : :
1998 214 [ + + ]: 2634106 : if (unlikely(TransactionIdIsValid(xwait)))
215 : : {
216 : : /* Have to wait for the other guy ... */
2362 217 : 12 : _bt_relbuf(rel, insertstate.buf);
218 : 12 : insertstate.buf = InvalidBuffer;
219 : :
220 : : /*
221 : : * If it's a speculative insertion, wait for it to finish (ie. to
222 : : * go ahead with the insertion, or kill the tuple). Otherwise
223 : : * wait for the transaction to finish as usual.
224 : : */
3774 andres@anarazel.de 225 [ - + ]: 12 : if (speculativeToken)
3774 andres@anarazel.de 226 :UBC 0 : SpeculativeInsertionWait(xwait, speculativeToken);
227 : : else
3774 andres@anarazel.de 228 :CBC 12 : XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
229 : :
230 : : /* start over... */
2721 andrew@dunslane.net 231 [ - + ]: 12 : if (stack)
2721 andrew@dunslane.net 232 :UBC 0 : _bt_freestack(stack);
1998 pg@bowt.ie 233 :CBC 12 : goto search;
234 : : }
235 : :
236 : : /* Uniqueness is established -- restore heap tid as scantid */
2362 237 [ + - ]: 2634094 : if (itup_key->heapkeyspace)
238 : 2634094 : itup_key->scantid = &itup->t_tid;
239 : : }
240 : :
5883 tgl@sss.pgh.pa.us 241 [ + + ]: 3647998 : if (checkUnique != UNIQUE_CHECK_EXISTING)
242 : : {
243 : : OffsetNumber newitemoff;
244 : :
245 : : /*
246 : : * The only conflict predicate locking cares about for indexes is when
247 : : * an index tuple insert conflicts with an existing lock. We don't
248 : : * know the actual page we're going to insert on for sure just yet in
249 : : * checkingunique and !heapkeyspace cases, but it's okay to use the
250 : : * first page the value could be on (with scantid omitted) instead.
251 : : */
2048 tmunro@postgresql.or 252 : 3647971 : CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate.buf));
253 : :
254 : : /*
255 : : * Do the insertion. Note that insertstate contains cached binary
256 : : * search bounds established within _bt_check_unique when insertion is
257 : : * checkingunique.
258 : : */
2362 pg@bowt.ie 259 : 3647968 : newitemoff = _bt_findinsertloc(rel, &insertstate, checkingunique,
260 : : indexUnchanged, stack, heapRel);
889 andres@anarazel.de 261 : 3647968 : _bt_insertonpg(rel, heapRel, itup_key, insertstate.buf, InvalidBuffer,
262 : : stack, itup, insertstate.itemsz, newitemoff,
263 : : insertstate.postingoff, false);
264 : : }
265 : : else
266 : : {
267 : : /* just release the buffer */
2362 pg@bowt.ie 268 : 27 : _bt_relbuf(rel, insertstate.buf);
269 : : }
270 : :
271 : : /* be tidy */
2721 andrew@dunslane.net 272 [ + + ]: 3647995 : if (stack)
273 : 3165991 : _bt_freestack(stack);
2362 pg@bowt.ie 274 : 3647995 : pfree(itup_key);
275 : :
5883 tgl@sss.pgh.pa.us 276 : 3647995 : return is_unique;
277 : : }
278 : :
279 : : /*
280 : : * _bt_search_insert() -- _bt_search() wrapper for inserts
281 : : *
282 : : * Search the tree for a particular scankey, or more precisely for the first
283 : : * leaf page it could be on. Try to make use of the fastpath optimization's
284 : : * rightmost leaf page cache before actually searching the tree from the root
285 : : * page, though.
286 : : *
287 : : * Return value is a stack of parent-page pointers (though see notes about
288 : : * fastpath optimization and page splits below). insertstate->buf is set to
289 : : * the address of the leaf-page buffer, which is write-locked and pinned in
290 : : * all cases (if necessary by creating a new empty root page for caller).
291 : : *
292 : : * The fastpath optimization avoids most of the work of searching the tree
293 : : * repeatedly when a single backend inserts successive new tuples on the
294 : : * rightmost leaf page of an index. A backend cache of the rightmost leaf
295 : : * page is maintained within _bt_insertonpg(), and used here. The cache is
296 : : * invalidated here when an insert of a non-pivot tuple must take place on a
297 : : * non-rightmost leaf page.
298 : : *
299 : : * The optimization helps with indexes on an auto-incremented field. It also
300 : : * helps with indexes on datetime columns, as well as indexes with lots of
301 : : * NULL values. (NULLs usually get inserted in the rightmost page for single
302 : : * column indexes, since they usually get treated as coming after everything
303 : : * else in the key space. Individual NULL tuples will generally be placed on
304 : : * the rightmost leaf page due to the influence of the heap TID column.)
305 : : *
306 : : * Note that we avoid applying the optimization when there is insufficient
307 : : * space on the rightmost page to fit caller's new item. This is necessary
308 : : * because we'll need to return a real descent stack when a page split is
309 : : * expected (actually, caller can cope with a leaf page split that uses a NULL
310 : : * stack, but that's very slow and so must be avoided). Note also that the
311 : : * fastpath optimization acquires the lock on the page conditionally as a way
312 : : * of reducing extra contention when there are concurrent insertions into the
313 : : * rightmost page (we give up if we'd have to wait for the lock). We assume
314 : : * that it isn't useful to apply the optimization when there is contention,
315 : : * since each per-backend cache won't stay valid for long.
316 : : */
317 : : static BTStack
889 andres@anarazel.de 318 : 3648262 : _bt_search_insert(Relation rel, Relation heaprel, BTInsertState insertstate)
319 : : {
1998 pg@bowt.ie 320 [ - + ]: 3648262 : Assert(insertstate->buf == InvalidBuffer);
321 [ - + ]: 3648262 : Assert(!insertstate->bounds_valid);
322 [ - + ]: 3648262 : Assert(insertstate->postingoff == 0);
323 : :
324 [ + - + + ]: 3648262 : if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
325 : : {
326 : : /* Simulate a _bt_getbuf() call with conditional locking */
327 [ + - ]: 37650 : insertstate->buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
1873 328 [ + + ]: 37650 : if (_bt_conditionallockbuf(rel, insertstate->buf))
329 : : {
330 : : Page page;
331 : : BTPageOpaque opaque;
332 : :
1998 333 : 37019 : _bt_checkpage(rel, insertstate->buf);
334 : 37019 : page = BufferGetPage(insertstate->buf);
1254 michael@paquier.xyz 335 : 37019 : opaque = BTPageGetOpaque(page);
336 : :
337 : : /*
338 : : * Check if the page is still the rightmost leaf page and has
339 : : * enough free space to accommodate the new tuple. Also check
340 : : * that the insertion scan key is strictly greater than the first
341 : : * non-pivot tuple on the page. (Note that we expect itup_key's
342 : : * scantid to be unset when our caller is a checkingunique
343 : : * inserter.)
344 : : */
1754 pg@bowt.ie 345 [ + + ]: 37019 : if (P_RIGHTMOST(opaque) &&
346 [ + - ]: 36986 : P_ISLEAF(opaque) &&
347 [ + - ]: 36986 : !P_IGNORE(opaque) &&
1998 348 [ + + + - ]: 73771 : PageGetFreeSpace(page) > insertstate->itemsz &&
349 [ + + ]: 73570 : PageGetMaxOffsetNumber(page) >= P_HIKEY &&
350 : 36785 : _bt_compare(rel, insertstate->itup_key, page, P_HIKEY) > 0)
351 : : {
352 : : /*
353 : : * Caller can use the fastpath optimization because cached
354 : : * block is still rightmost leaf page, which can fit caller's
355 : : * new tuple without splitting. Keep block in local cache for
356 : : * next insert, and have caller use NULL stack.
357 : : *
358 : : * Note that _bt_insert_parent() has an assertion that catches
359 : : * leaf page splits that somehow follow from a fastpath insert
360 : : * (it should only be passed a NULL stack when it must deal
361 : : * with a concurrent root page split, and never because a NULL
362 : : * stack was returned here).
363 : : */
364 : 36766 : return NULL;
365 : : }
366 : :
367 : : /* Page unsuitable for caller, drop lock and pin */
368 : 253 : _bt_relbuf(rel, insertstate->buf);
369 : : }
370 : : else
371 : : {
372 : : /* Lock unavailable, drop pin */
373 : 631 : ReleaseBuffer(insertstate->buf);
374 : : }
375 : :
376 : : /* Forget block, since cache doesn't appear to be useful */
377 : 884 : RelationSetTargetBlock(rel, InvalidBlockNumber);
378 : : }
379 : :
380 : : /* Cannot use optimization -- descend tree, return proper descent stack */
889 andres@anarazel.de 381 : 3611496 : return _bt_search(rel, heaprel, insertstate->itup_key, &insertstate->buf,
382 : : BT_WRITE);
383 : : }
384 : :
385 : : /*
386 : : * _bt_check_unique() -- Check for violation of unique index constraint
387 : : *
388 : : * Returns InvalidTransactionId if there is no conflict, else an xact ID
389 : : * we must wait for to see if it commits a conflicting tuple. If an actual
390 : : * conflict is detected, no return --- just ereport(). If an xact ID is
391 : : * returned, and the conflicting tuple still has a speculative insertion in
392 : : * progress, *speculativeToken is set to non-zero, and the caller can wait for
393 : : * the verdict on the insertion using SpeculativeInsertionWait().
394 : : *
395 : : * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
396 : : * InvalidTransactionId because we don't want to wait. In this case we
397 : : * set *is_unique to false if there is a potential conflict, and the
398 : : * core code must redo the uniqueness check later.
399 : : *
400 : : * As a side-effect, sets state in insertstate that can later be used by
401 : : * _bt_findinsertloc() to reuse most of the binary search work we do
402 : : * here.
403 : : *
404 : : * This code treats NULLs as equal, unlike the default semantics for unique
405 : : * indexes. So do not call here when there are NULL values in scan key and
406 : : * the index uses the default NULLS DISTINCT mode.
407 : : */
408 : : static TransactionId
2362 pg@bowt.ie 409 : 2634358 : _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
410 : : IndexUniqueCheck checkUnique, bool *is_unique,
411 : : uint32 *speculativeToken)
412 : : {
413 : 2634358 : IndexTuple itup = insertstate->itup;
1911 414 : 2634358 : IndexTuple curitup = NULL;
1612 415 : 2634358 : ItemId curitemid = NULL;
2362 416 : 2634358 : BTScanInsert itup_key = insertstate->itup_key;
417 : : SnapshotData SnapshotDirty;
418 : : OffsetNumber offset;
419 : : OffsetNumber maxoff;
420 : : Page page;
421 : : BTPageOpaque opaque;
9178 tgl@sss.pgh.pa.us 422 : 2634358 : Buffer nbuf = InvalidBuffer;
5883 423 : 2634358 : bool found = false;
2019 pg@bowt.ie 424 : 2634358 : bool inposting = false;
425 : 2634358 : bool prevalldead = true;
426 : 2634358 : int curposti = 0;
427 : :
428 : : /* Assume unique until we find a duplicate */
5883 tgl@sss.pgh.pa.us 429 : 2634358 : *is_unique = true;
430 : :
6740 431 : 2634358 : InitDirtySnapshot(SnapshotDirty);
432 : :
2362 pg@bowt.ie 433 : 2634358 : page = BufferGetPage(insertstate->buf);
1254 michael@paquier.xyz 434 : 2634358 : opaque = BTPageGetOpaque(page);
9178 tgl@sss.pgh.pa.us 435 : 2634358 : maxoff = PageGetMaxOffsetNumber(page);
436 : :
437 : : /*
438 : : * Find the first tuple with the same key.
439 : : *
440 : : * This also saves the binary search bounds in insertstate. We use them
441 : : * in the fastpath below, but also in the _bt_findinsertloc() call later.
442 : : */
2347 pg@bowt.ie 443 [ - + ]: 2634358 : Assert(!insertstate->bounds_valid);
2362 444 : 2634358 : offset = _bt_binsrch_insert(rel, insertstate);
445 : :
446 : : /*
447 : : * Scan over all equal tuples, looking for live conflicts.
448 : : */
449 [ + + - + ]: 2634358 : Assert(!insertstate->bounds_valid || insertstate->low == offset);
2328 450 [ - + ]: 2634358 : Assert(!itup_key->anynullkeys);
2362 451 [ + - ]: 2634358 : Assert(itup_key->scantid == NULL);
452 : : for (;;)
453 : : {
454 : : /*
455 : : * Each iteration of the loop processes one heap TID, not one index
456 : : * tuple. Current offset number for page isn't usually advanced on
457 : : * iterations that process heap TIDs from posting list tuples.
458 : : *
459 : : * "inposting" state is set when _inside_ a posting list --- not when
460 : : * we're at the start (or end) of a posting list. We advance curposti
461 : : * at the end of the iteration when inside a posting list tuple. In
462 : : * general, every loop iteration either advances the page offset or
463 : : * advances curposti --- an iteration that handles the rightmost/max
464 : : * heap TID in a posting list finally advances the page offset (and
465 : : * unsets "inposting").
466 : : *
467 : : * Make sure the offset points to an actual index tuple before trying
468 : : * to examine it...
469 : : */
9178 tgl@sss.pgh.pa.us 470 [ + + ]: 8639771 : if (offset <= maxoff)
471 : : {
472 : : /*
473 : : * Fastpath: In most cases, we can use cached search bounds to
474 : : * limit our consideration to items that are definitely
475 : : * duplicates. This fastpath doesn't apply when the original page
476 : : * is empty, or when initial offset is past the end of the
477 : : * original page, which may indicate that we need to examine a
478 : : * second or subsequent page.
479 : : *
480 : : * Note that this optimization allows us to avoid calling
481 : : * _bt_compare() directly when there are no duplicates, as long as
482 : : * the offset where the key will go is not at the end of the page.
483 : : */
2362 pg@bowt.ie 484 [ + + + + ]: 7164011 : if (nbuf == InvalidBuffer && offset == insertstate->stricthigh)
485 : : {
486 [ - + ]: 1044320 : Assert(insertstate->bounds_valid);
487 [ + + - + ]: 1044320 : Assert(insertstate->low >= P_FIRSTDATAKEY(opaque));
488 [ - + ]: 1044320 : Assert(insertstate->low <= insertstate->stricthigh);
2328 489 [ - + ]: 1044320 : Assert(_bt_compare(rel, itup_key, page, offset) < 0);
2362 490 : 1044320 : break;
491 : : }
492 : :
493 : : /*
494 : : * We can skip items that are already marked killed.
495 : : *
496 : : * In the presence of heavy update activity an index may contain
497 : : * many killed items with the same key; running _bt_compare() on
498 : : * each killed item gets expensive. Just advance over killed
499 : : * items as quickly as we can. We only apply _bt_compare() when
500 : : * we get to a non-killed item. We could reuse the bounds to
501 : : * avoid _bt_compare() calls for known equal tuples, but it
502 : : * doesn't seem worth it.
503 : : */
2019 504 [ + + ]: 6119691 : if (!inposting)
505 : 3863452 : curitemid = PageGetItemId(page, offset);
506 [ + + + + ]: 6119691 : if (inposting || !ItemIdIsDead(curitemid))
507 : : {
508 : : ItemPointerData htid;
509 : 5826058 : bool all_dead = false;
510 : :
511 [ + + ]: 5826058 : if (!inposting)
512 : : {
513 : : /* Plain tuple, or first TID in posting list tuple */
514 [ + + ]: 3569819 : if (_bt_compare(rel, itup_key, page, offset) != 0)
515 : 101430 : break; /* we're past all the equal tuples */
516 : :
517 : : /* Advanced curitup */
518 : 3468389 : curitup = (IndexTuple) PageGetItem(page, curitemid);
519 [ - + ]: 3468389 : Assert(!BTreeTupleIsPivot(curitup));
520 : : }
521 : :
522 : : /* okay, we gotta fetch the heap tuple using htid ... */
523 [ + + ]: 5724628 : if (!BTreeTupleIsPosting(curitup))
524 : : {
525 : : /* ... htid is from simple non-pivot tuple */
526 [ - + ]: 3445193 : Assert(!inposting);
527 : 3445193 : htid = curitup->t_tid;
528 : : }
529 [ + + ]: 2279435 : else if (!inposting)
530 : : {
531 : : /* ... htid is first TID in new posting list */
532 : 23196 : inposting = true;
533 : 23196 : prevalldead = true;
534 : 23196 : curposti = 0;
535 : 23196 : htid = *BTreeTupleGetPostingN(curitup, 0);
536 : : }
537 : : else
538 : : {
539 : : /* ... htid is second or subsequent TID in posting list */
540 [ - + ]: 2256239 : Assert(curposti > 0);
541 : 2256239 : htid = *BTreeTupleGetPostingN(curitup, curposti);
542 : : }
543 : :
544 : : /*
545 : : * If we are doing a recheck, we expect to find the tuple we
546 : : * are rechecking. It's not a duplicate, but we have to keep
547 : : * scanning.
548 : : */
5883 tgl@sss.pgh.pa.us 549 [ + + + + ]: 5724740 : if (checkUnique == UNIQUE_CHECK_EXISTING &&
550 : 112 : ItemPointerCompare(&htid, &itup->t_tid) == 0)
551 : : {
552 : 27 : found = true;
553 : : }
554 : :
555 : : /*
556 : : * Check if there's any table tuples for this index entry
557 : : * satisfying SnapshotDirty. This is necessary because for AMs
558 : : * with optimizations like heap's HOT, we have just a single
559 : : * index entry for the entire chain.
560 : : */
2357 andres@anarazel.de 561 [ + + ]: 5724601 : else if (table_index_fetch_tuple_check(heapRel, &htid,
562 : : &SnapshotDirty,
563 : : &all_dead))
564 : : {
565 : : TransactionId xwait;
566 : :
567 : : /*
568 : : * It is a duplicate. If we are only doing a partial
569 : : * check, then don't bother checking if the tuple is being
570 : : * updated in another transaction. Just return the fact
571 : : * that it is a potential conflict and leave the full
572 : : * check till later. Don't invalidate binary search
573 : : * bounds.
574 : : */
5883 tgl@sss.pgh.pa.us 575 [ + + ]: 364 : if (checkUnique == UNIQUE_CHECK_PARTIAL)
576 : : {
577 [ - + ]: 100 : if (nbuf != InvalidBuffer)
5883 tgl@sss.pgh.pa.us 578 :UBC 0 : _bt_relbuf(rel, nbuf);
5883 tgl@sss.pgh.pa.us 579 :CBC 100 : *is_unique = false;
580 : 112 : return InvalidTransactionId;
581 : : }
582 : :
583 : : /*
584 : : * If this tuple is being updated by other transaction
585 : : * then we have to wait for its commit/abort.
586 : : */
587 : 528 : xwait = (TransactionIdIsValid(SnapshotDirty.xmin)) ?
588 [ + + ]: 264 : SnapshotDirty.xmin : SnapshotDirty.xmax;
589 : :
8506 590 [ + + ]: 264 : if (TransactionIdIsValid(xwait))
591 : : {
592 [ - + ]: 12 : if (nbuf != InvalidBuffer)
8506 tgl@sss.pgh.pa.us 593 :UBC 0 : _bt_relbuf(rel, nbuf);
594 : : /* Tell _bt_doinsert to wait... */
3774 andres@anarazel.de 595 :CBC 12 : *speculativeToken = SnapshotDirty.speculativeToken;
596 : : /* Caller releases lock on buf immediately */
2347 pg@bowt.ie 597 : 12 : insertstate->bounds_valid = false;
8506 tgl@sss.pgh.pa.us 598 : 12 : return xwait;
599 : : }
600 : :
601 : : /*
602 : : * Otherwise we have a definite conflict. But before
603 : : * complaining, look to see if the tuple we want to insert
604 : : * is itself now committed dead --- if so, don't complain.
605 : : * This is a waste of time in normal scenarios but we must
606 : : * do it to support CREATE INDEX CONCURRENTLY.
607 : : *
608 : : * We must follow HOT-chains here because during
609 : : * concurrent index build, we insert the root TID though
610 : : * the actual tuple may be somewhere in the HOT-chain.
611 : : * While following the chain we might not stop at the
612 : : * exact tuple which triggered the insert, but that's OK
613 : : * because if we find a live tuple anywhere in this chain,
614 : : * we have a unique key conflict. The other live tuple is
615 : : * not part of this chain because it had a different index
616 : : * entry.
617 : : */
1899 pg@bowt.ie 618 : 252 : htid = itup->t_tid;
619 [ - + ]: 252 : if (table_index_fetch_tuple_check(heapRel, &htid,
620 : : SnapshotSelf, NULL))
621 : : {
622 : : /* Normal case --- it's still live */
623 : : }
624 : : else
625 : : {
626 : : /*
627 : : * It's been deleted, so no error, and no need to
628 : : * continue searching
629 : : */
6952 tgl@sss.pgh.pa.us 630 :UBC 0 : break;
631 : : }
632 : :
633 : : /*
634 : : * Check for a conflict-in as we would if we were going to
635 : : * write to this page. We aren't actually going to write,
636 : : * but we want a chance to report SSI conflicts that would
637 : : * otherwise be masked by this unique constraint
638 : : * violation.
639 : : */
2048 tmunro@postgresql.or 640 :CBC 252 : CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate->buf));
641 : :
642 : : /*
643 : : * This is a definite conflict. Break the tuple down into
644 : : * datums and report the error. But first, make sure we
645 : : * release the buffer locks we're holding ---
646 : : * BuildIndexValueDescription could make catalog accesses,
647 : : * which in the worst case might touch this same index and
648 : : * cause deadlocks.
649 : : */
5880 tgl@sss.pgh.pa.us 650 [ - + ]: 248 : if (nbuf != InvalidBuffer)
5880 tgl@sss.pgh.pa.us 651 :UBC 0 : _bt_relbuf(rel, nbuf);
2362 pg@bowt.ie 652 :CBC 248 : _bt_relbuf(rel, insertstate->buf);
653 : 248 : insertstate->buf = InvalidBuffer;
2347 654 : 248 : insertstate->bounds_valid = false;
655 : :
656 : : {
657 : : Datum values[INDEX_MAX_KEYS];
658 : : bool isnull[INDEX_MAX_KEYS];
659 : : char *key_desc;
660 : :
5880 tgl@sss.pgh.pa.us 661 : 248 : index_deform_tuple(itup, RelationGetDescr(rel),
662 : : values, isnull);
663 : :
3890 sfrost@snowman.net 664 : 248 : key_desc = BuildIndexValueDescription(rel, values,
665 : : isnull);
666 : :
5880 tgl@sss.pgh.pa.us 667 [ + - + + ]: 248 : ereport(ERROR,
668 : : (errcode(ERRCODE_UNIQUE_VIOLATION),
669 : : errmsg("duplicate key value violates unique constraint \"%s\"",
670 : : RelationGetRelationName(rel)),
671 : : key_desc ? errdetail("Key %s already exists.",
672 : : key_desc) : 0,
673 : : errtableconstraint(heapRel,
674 : : RelationGetRelationName(rel))));
675 : : }
676 : : }
2019 pg@bowt.ie 677 [ + + + + : 5724237 : else if (all_dead && (!inposting ||
+ + ]
678 : 18707 : (prevalldead &&
679 [ + + ]: 18707 : curposti == BTreeTupleGetNPosting(curitup) - 1)))
680 : : {
681 : : /*
682 : : * The conflicting tuple (or all HOT chains pointed to by
683 : : * all posting list TIDs) is dead to everyone, so mark the
684 : : * index entry killed.
685 : : */
6561 tgl@sss.pgh.pa.us 686 : 52915 : ItemIdMarkDead(curitemid);
687 : 52915 : opaque->btpo_flags |= BTP_HAS_GARBAGE;
688 : :
689 : : /*
690 : : * Mark buffer with a dirty hint, since state is not
691 : : * crucial. Be sure to mark the proper buffer dirty.
692 : : */
693 [ + + ]: 52915 : if (nbuf != InvalidBuffer)
4464 jdavis@postgresql.or 694 : 3 : MarkBufferDirtyHint(nbuf, true);
695 : : else
2362 pg@bowt.ie 696 : 52912 : MarkBufferDirtyHint(insertstate->buf, true);
697 : : }
698 : :
699 : : /*
700 : : * Remember if posting list tuple has even a single HOT chain
701 : : * whose members are not all dead
702 : : */
2019 703 [ + + + + ]: 5724264 : if (!all_dead && inposting)
704 : 2260618 : prevalldead = false;
705 : : }
706 : : }
707 : :
708 [ + + + + ]: 7493657 : if (inposting && curposti < BTreeTupleGetNPosting(curitup) - 1)
709 : : {
710 : : /* Advance to next TID in same posting list */
711 : 2256239 : curposti++;
712 : 2256239 : continue;
713 : : }
714 [ + + ]: 5237418 : else if (offset < maxoff)
715 : : {
716 : : /* Advance to next tuple */
717 : 3744152 : curposti = 0;
718 : 3744152 : inposting = false;
9178 tgl@sss.pgh.pa.us 719 : 3744152 : offset = OffsetNumberNext(offset);
720 : : }
721 : : else
722 : : {
723 : : int highkeycmp;
724 : :
725 : : /* If scankey == hikey we gotta check the next page too */
726 [ + + ]: 1493266 : if (P_RIGHTMOST(opaque))
727 : 1420545 : break;
2362 pg@bowt.ie 728 : 72721 : highkeycmp = _bt_compare(rel, itup_key, page, P_HIKEY);
729 [ - + ]: 72721 : Assert(highkeycmp <= 0);
730 [ + + ]: 72721 : if (highkeycmp != 0)
9178 tgl@sss.pgh.pa.us 731 : 67699 : break;
732 : : /* Advance to next non-dead page --- there must be one */
733 : : for (;;)
8232 tgl@sss.pgh.pa.us 734 :UBC 0 : {
2019 pg@bowt.ie 735 :CBC 5022 : BlockNumber nblkno = opaque->btpo_next;
736 : :
7808 tgl@sss.pgh.pa.us 737 : 5022 : nbuf = _bt_relandgetbuf(rel, nbuf, nblkno, BT_READ);
3426 kgrittn@postgresql.o 738 : 5022 : page = BufferGetPage(nbuf);
1254 michael@paquier.xyz 739 : 5022 : opaque = BTPageGetOpaque(page);
8232 tgl@sss.pgh.pa.us 740 [ + - ]: 5022 : if (!P_IGNORE(opaque))
741 : 5022 : break;
8232 tgl@sss.pgh.pa.us 742 [ # # ]:UBC 0 : if (P_RIGHTMOST(opaque))
6459 743 [ # # ]: 0 : elog(ERROR, "fell off the end of index \"%s\"",
744 : : RelationGetRelationName(rel));
745 : : }
746 : : /* Will also advance to next tuple */
2019 pg@bowt.ie 747 :CBC 5022 : curposti = 0;
748 : 5022 : inposting = false;
9178 tgl@sss.pgh.pa.us 749 : 5022 : maxoff = PageGetMaxOffsetNumber(page);
750 [ + + ]: 5022 : offset = P_FIRSTDATAKEY(opaque);
751 : : /* Don't invalidate binary search bounds */
752 : : }
753 : : }
754 : :
755 : : /*
756 : : * If we are doing a recheck then we should have found the tuple we are
757 : : * checking. Otherwise there's something very wrong --- probably, the
758 : : * index is on a non-immutable expression.
759 : : */
5883 760 [ + + - + ]: 2633994 : if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
5883 tgl@sss.pgh.pa.us 761 [ # # ]:UBC 0 : ereport(ERROR,
762 : : (errcode(ERRCODE_INTERNAL_ERROR),
763 : : errmsg("failed to re-find tuple within index \"%s\"",
764 : : RelationGetRelationName(rel)),
765 : : errhint("This may be because of a non-immutable index expression."),
766 : : errtableconstraint(heapRel,
767 : : RelationGetRelationName(rel))));
768 : :
9178 tgl@sss.pgh.pa.us 769 [ + + ]:CBC 2633994 : if (nbuf != InvalidBuffer)
8819 770 : 2880 : _bt_relbuf(rel, nbuf);
771 : :
8780 772 : 2633994 : return InvalidTransactionId;
773 : : }
774 : :
775 : :
776 : : /*
777 : : * _bt_findinsertloc() -- Finds an insert location for a tuple
778 : : *
779 : : * On entry, insertstate buffer contains the page the new tuple belongs
780 : : * on. It is exclusive-locked and pinned by the caller.
781 : : *
782 : : * If 'checkingunique' is true, the buffer on entry is the first page
783 : : * that contains duplicates of the new key. If there are duplicates on
784 : : * multiple pages, the correct insertion position might be some page to
785 : : * the right, rather than the first page. In that case, this function
786 : : * moves right to the correct target page.
787 : : *
788 : : * (In a !heapkeyspace index, there can be multiple pages with the same
789 : : * high key, where the new tuple could legitimately be placed on. In
790 : : * that case, the caller passes the first page containing duplicates,
791 : : * just like when checkingunique=true. If that page doesn't have enough
792 : : * room for the new tuple, this function moves right, trying to find a
793 : : * legal page that does.)
794 : : *
795 : : * If 'indexUnchanged' is true, this is for an UPDATE that didn't
796 : : * logically change the indexed value, but must nevertheless have a new
797 : : * entry to point to a successor version. This hint from the executor
798 : : * will influence our behavior when the page might have to be split and
799 : : * we must consider our options. Bottom-up index deletion can avoid
800 : : * pathological version-driven page splits, but we only want to go to the
801 : : * trouble of trying it when we already have moderate confidence that
802 : : * it's appropriate. The hint should not significantly affect our
803 : : * behavior over time unless practically all inserts on to the leaf page
804 : : * get the hint.
805 : : *
806 : : * On exit, insertstate buffer contains the chosen insertion page, and
807 : : * the offset within that page is returned. If _bt_findinsertloc needed
808 : : * to move right, the lock and pin on the original page are released, and
809 : : * the new buffer is exclusively locked and pinned instead.
810 : : *
811 : : * If insertstate contains cached binary search bounds, we will take
812 : : * advantage of them. This avoids repeating comparisons that we made in
813 : : * _bt_check_unique() already.
814 : : */
815 : : static OffsetNumber
6762 bruce@momjian.us 816 : 3647968 : _bt_findinsertloc(Relation rel,
817 : : BTInsertState insertstate,
818 : : bool checkingunique,
819 : : bool indexUnchanged,
820 : : BTStack stack,
821 : : Relation heapRel)
822 : : {
2362 pg@bowt.ie 823 : 3647968 : BTScanInsert itup_key = insertstate->itup_key;
824 : 3647968 : Page page = BufferGetPage(insertstate->buf);
825 : : BTPageOpaque opaque;
826 : : OffsetNumber newitemoff;
827 : :
1254 michael@paquier.xyz 828 : 3647968 : opaque = BTPageGetOpaque(page);
829 : :
830 : : /* Check 1/3 of a page restriction */
179 pg@bowt.ie 831 [ - + ]: 3647968 : if (unlikely(insertstate->itemsz > BTMaxItemSize))
2362 pg@bowt.ie 832 :UBC 0 : _bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
833 : : insertstate->itup);
834 : :
1754 pg@bowt.ie 835 [ + - - + ]:CBC 3647968 : Assert(P_ISLEAF(opaque) && !P_INCOMPLETE_SPLIT(opaque));
2362 836 [ + + - + ]: 3647968 : Assert(!insertstate->bounds_valid || checkingunique);
837 [ + - - + ]: 3647968 : Assert(!itup_key->heapkeyspace || itup_key->scantid != NULL);
838 [ - + - - ]: 3647968 : Assert(itup_key->heapkeyspace || itup_key->scantid == NULL);
2019 839 [ + + - + ]: 3647968 : Assert(!itup_key->allequalimage || itup_key->heapkeyspace);
840 : :
2362 841 [ + - ]: 3647968 : if (itup_key->heapkeyspace)
842 : : {
843 : : /* Keep track of whether checkingunique duplicate seen */
1697 844 : 3647968 : bool uniquedup = indexUnchanged;
845 : :
846 : : /*
847 : : * If we're inserting into a unique index, we may have to walk right
848 : : * through leaf pages to find the one leaf page that we must insert on
849 : : * to.
850 : : *
851 : : * This is needed for checkingunique callers because a scantid was not
852 : : * used when we called _bt_search(). scantid can only be set after
853 : : * _bt_check_unique() has checked for duplicates. The buffer
854 : : * initially stored in insertstate->buf has the page where the first
855 : : * duplicate key might be found, which isn't always the page that new
856 : : * tuple belongs on. The heap TID attribute for new tuple (scantid)
857 : : * could force us to insert on a sibling page, though that should be
858 : : * very rare in practice.
859 : : */
2362 860 [ + + ]: 3647968 : if (checkingunique)
861 : : {
2019 862 [ + + ]: 2634064 : if (insertstate->low < insertstate->stricthigh)
863 : : {
864 : : /* Encountered a duplicate in _bt_check_unique() */
865 [ - + ]: 220541 : Assert(insertstate->bounds_valid);
866 : 220541 : uniquedup = true;
867 : : }
868 : :
869 : : for (;;)
870 : : {
871 : : /*
872 : : * Does the new tuple belong on this page?
873 : : *
874 : : * The earlier _bt_check_unique() call may well have
875 : : * established a strict upper bound on the offset for the new
876 : : * item. If it's not the last item of the page (i.e. if there
877 : : * is at least one tuple on the page that goes after the tuple
878 : : * we're inserting) then we know that the tuple belongs on
879 : : * this page. We can skip the high key check.
880 : : */
2362 881 [ + + ]: 2639086 : if (insertstate->bounds_valid &&
882 [ + - + + ]: 5257534 : insertstate->low <= insertstate->stricthigh &&
883 : 2628767 : insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
884 : 1132432 : break;
885 : :
886 : : /* Test '<=', not '!=', since scantid is set now */
1754 887 [ + + + + ]: 1587122 : if (P_RIGHTMOST(opaque) ||
2362 888 : 80468 : _bt_compare(rel, itup_key, page, P_HIKEY) <= 0)
889 : : break;
890 : :
889 andres@anarazel.de 891 : 5022 : _bt_stepright(rel, heapRel, insertstate, stack);
892 : : /* Update local state after stepping right */
2362 pg@bowt.ie 893 : 5022 : page = BufferGetPage(insertstate->buf);
1254 michael@paquier.xyz 894 : 5022 : opaque = BTPageGetOpaque(page);
895 : : /* Assume duplicates (if checkingunique) */
2019 pg@bowt.ie 896 : 5022 : uniquedup = true;
897 : : }
898 : : }
899 : :
900 : : /*
901 : : * If the target page cannot fit newitem, try to avoid splitting the
902 : : * page on insert by performing deletion or deduplication now
903 : : */
904 [ + + ]: 3647968 : if (PageGetFreeSpace(page) < insertstate->itemsz)
1754 905 : 24967 : _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, false,
906 : : checkingunique, uniquedup,
907 : : indexUnchanged);
908 : : }
909 : : else
910 : : {
911 : : /*----------
912 : : * This is a !heapkeyspace (version 2 or 3) index. The current page
913 : : * is the first page that we could insert the new tuple to, but there
914 : : * may be other pages to the right that we could opt to use instead.
915 : : *
916 : : * If the new key is equal to one or more existing keys, we can
917 : : * legitimately place it anywhere in the series of equal keys. In
918 : : * fact, if the new key is equal to the page's "high key" we can place
919 : : * it on the next page. If it is equal to the high key, and there's
920 : : * not room to insert the new tuple on the current page without
921 : : * splitting, then we move right hoping to find more free space and
922 : : * avoid a split.
923 : : *
924 : : * Keep scanning right until we
925 : : * (a) find a page with enough free space,
926 : : * (b) reach the last page where the tuple can legally go, or
927 : : * (c) get tired of searching.
928 : : * (c) is not flippant; it is important because if there are many
929 : : * pages' worth of equal keys, it's better to split one of the early
930 : : * pages than to scan all the way to the end of the run of equal keys
931 : : * on every insert. We implement "get tired" as a random choice,
932 : : * since stopping after scanning a fixed number of pages wouldn't work
933 : : * well (we'd never reach the right-hand side of previously split
934 : : * pages). The probability of moving right is set at 0.99, which may
935 : : * seem too high to change the behavior much, but it does an excellent
936 : : * job of preventing O(N^2) behavior with many equal keys.
937 : : *----------
938 : : */
2362 pg@bowt.ie 939 [ # # ]:UBC 0 : while (PageGetFreeSpace(page) < insertstate->itemsz)
940 : : {
941 : : /*
942 : : * Before considering moving right, see if we can obtain enough
943 : : * space by erasing LP_DEAD items
944 : : */
1754 945 [ # # ]: 0 : if (P_HAS_GARBAGE(opaque))
946 : : {
947 : : /* Perform simple deletion */
948 : 0 : _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, true,
949 : : false, false, false);
950 : :
2362 951 [ # # ]: 0 : if (PageGetFreeSpace(page) >= insertstate->itemsz)
952 : 0 : break; /* OK, now we have enough space */
953 : : }
954 : :
955 : : /*
956 : : * Nope, so check conditions (b) and (c) enumerated above
957 : : *
958 : : * The earlier _bt_check_unique() call may well have established a
959 : : * strict upper bound on the offset for the new item. If it's not
960 : : * the last item of the page (i.e. if there is at least one tuple
961 : : * on the page that's greater than the tuple we're inserting to)
962 : : * then we know that the tuple belongs on this page. We can skip
963 : : * the high key check.
964 : : */
965 [ # # ]: 0 : if (insertstate->bounds_valid &&
966 [ # # # # ]: 0 : insertstate->low <= insertstate->stricthigh &&
967 : 0 : insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
968 : 0 : break;
969 : :
1754 970 [ # # # # ]: 0 : if (P_RIGHTMOST(opaque) ||
2362 971 [ # # ]: 0 : _bt_compare(rel, itup_key, page, P_HIKEY) != 0 ||
1378 tgl@sss.pgh.pa.us 972 : 0 : pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 100))
973 : : break;
974 : :
889 andres@anarazel.de 975 : 0 : _bt_stepright(rel, heapRel, insertstate, stack);
976 : : /* Update local state after stepping right */
2362 pg@bowt.ie 977 : 0 : page = BufferGetPage(insertstate->buf);
1254 michael@paquier.xyz 978 : 0 : opaque = BTPageGetOpaque(page);
979 : : }
980 : : }
981 : :
982 : : /*
983 : : * We should now be on the correct page. Find the offset within the page
984 : : * for the new tuple. (Possibly reusing earlier search bounds.)
985 : : */
1754 pg@bowt.ie 986 [ + + - + ]:CBC 3647968 : Assert(P_RIGHTMOST(opaque) ||
987 : : _bt_compare(rel, itup_key, page, P_HIKEY) <= 0);
988 : :
2019 989 : 3647968 : newitemoff = _bt_binsrch_insert(rel, insertstate);
990 : :
991 [ + + ]: 3647968 : if (insertstate->postingoff == -1)
992 : : {
993 : : /*
994 : : * There is an overlapping posting list tuple with its LP_DEAD bit
995 : : * set. We don't want to unnecessarily unset its LP_DEAD bit while
996 : : * performing a posting list split, so perform simple index tuple
997 : : * deletion early.
998 : : */
1754 999 : 1 : _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, true,
1000 : : false, false, false);
1001 : :
1002 : : /*
1003 : : * Do new binary search. New insert location cannot overlap with any
1004 : : * posting list now.
1005 : : */
1006 [ - + ]: 1 : Assert(!insertstate->bounds_valid);
2019 1007 : 1 : insertstate->postingoff = 0;
1008 : 1 : newitemoff = _bt_binsrch_insert(rel, insertstate);
1009 [ - + ]: 1 : Assert(insertstate->postingoff == 0);
1010 : : }
1011 : :
1012 : 3647968 : return newitemoff;
1013 : : }
1014 : :
1015 : : /*
1016 : : * Step right to next non-dead page, during insertion.
1017 : : *
1018 : : * This is a bit more complicated than moving right in a search. We must
1019 : : * write-lock the target page before releasing write lock on current page;
1020 : : * else someone else's _bt_check_unique scan could fail to see our insertion.
1021 : : * Write locks on intermediate dead pages won't do because we don't know when
1022 : : * they will get de-linked from the tree.
1023 : : *
1024 : : * This is more aggressive than it needs to be for non-unique !heapkeyspace
1025 : : * indexes.
1026 : : */
1027 : : static void
819 1028 : 5022 : _bt_stepright(Relation rel, Relation heaprel, BTInsertState insertstate,
1029 : : BTStack stack)
1030 : : {
1031 : : Page page;
1032 : : BTPageOpaque opaque;
1033 : : Buffer rbuf;
1034 : : BlockNumber rblkno;
1035 : :
1036 [ - + ]: 5022 : Assert(heaprel != NULL);
2362 1037 : 5022 : page = BufferGetPage(insertstate->buf);
1254 michael@paquier.xyz 1038 : 5022 : opaque = BTPageGetOpaque(page);
1039 : :
2362 pg@bowt.ie 1040 : 5022 : rbuf = InvalidBuffer;
1754 1041 : 5022 : rblkno = opaque->btpo_next;
1042 : : for (;;)
1043 : : {
2362 1044 : 5022 : rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE);
1045 : 5022 : page = BufferGetPage(rbuf);
1254 michael@paquier.xyz 1046 : 5022 : opaque = BTPageGetOpaque(page);
1047 : :
1048 : : /*
1049 : : * If this page was incompletely split, finish the split now. We do
1050 : : * this while holding a lock on the left sibling, which is not good
1051 : : * because finishing the split could be a fairly lengthy operation.
1052 : : * But this should happen very seldom.
1053 : : */
1754 pg@bowt.ie 1054 [ - + ]: 5022 : if (P_INCOMPLETE_SPLIT(opaque))
1055 : : {
889 andres@anarazel.de 1056 :UBC 0 : _bt_finish_split(rel, heaprel, rbuf, stack);
2362 pg@bowt.ie 1057 : 0 : rbuf = InvalidBuffer;
1058 : 0 : continue;
1059 : : }
1060 : :
1754 pg@bowt.ie 1061 [ + - ]:CBC 5022 : if (!P_IGNORE(opaque))
2362 1062 : 5022 : break;
1754 pg@bowt.ie 1063 [ # # ]:UBC 0 : if (P_RIGHTMOST(opaque))
2362 1064 [ # # ]: 0 : elog(ERROR, "fell off the end of index \"%s\"",
1065 : : RelationGetRelationName(rel));
1066 : :
1754 1067 : 0 : rblkno = opaque->btpo_next;
1068 : : }
1069 : : /* rbuf locked; unlock buf, update state for caller */
2362 pg@bowt.ie 1070 :CBC 5022 : _bt_relbuf(rel, insertstate->buf);
1071 : 5022 : insertstate->buf = rbuf;
1072 : 5022 : insertstate->bounds_valid = false;
6762 bruce@momjian.us 1073 : 5022 : }
1074 : :
1075 : : /*----------
1076 : : * _bt_insertonpg() -- Insert a tuple on a particular page in the index.
1077 : : *
1078 : : * This recursive procedure does the following things:
1079 : : *
1080 : : * + if postingoff != 0, splits existing posting list tuple
1081 : : * (since it overlaps with new 'itup' tuple).
1082 : : * + if necessary, splits the target page, using 'itup_key' for
1083 : : * suffix truncation on leaf pages (caller passes NULL for
1084 : : * non-leaf pages).
1085 : : * + inserts the new tuple (might be split from posting list).
1086 : : * + if the page was split, pops the parent stack, and finds the
1087 : : * right place to insert the new child pointer (by walking
1088 : : * right using information stored in the parent stack).
1089 : : * + invokes itself with the appropriate tuple for the right
1090 : : * child page on the parent.
1091 : : * + updates the metapage if a true root or fast root is split.
1092 : : *
1093 : : * On entry, we must have the correct buffer in which to do the
1094 : : * insertion, and the buffer must be pinned and write-locked. On return,
1095 : : * we will have dropped both the pin and the lock on the buffer.
1096 : : *
1097 : : * This routine only performs retail tuple insertions. 'itup' should
1098 : : * always be either a non-highkey leaf item, or a downlink (new high
1099 : : * key items are created indirectly, when a page is split). When
1100 : : * inserting to a non-leaf page, 'cbuf' is the left-sibling of the page
1101 : : * we're inserting the downlink for. This function will clear the
1102 : : * INCOMPLETE_SPLIT flag on it, and release the buffer.
1103 : : *----------
1104 : : */
1105 : : static void
1106 : 3658728 : _bt_insertonpg(Relation rel,
1107 : : Relation heaprel,
1108 : : BTScanInsert itup_key,
1109 : : Buffer buf,
1110 : : Buffer cbuf,
1111 : : BTStack stack,
1112 : : IndexTuple itup,
1113 : : Size itemsz,
1114 : : OffsetNumber newitemoff,
1115 : : int postingoff,
1116 : : bool split_only_page)
1117 : : {
1118 : : Page page;
1119 : : BTPageOpaque opaque;
1120 : : bool isleaf,
1121 : : isroot,
1122 : : isrightmost,
1123 : : isonly;
2019 pg@bowt.ie 1124 : 3658728 : IndexTuple oposting = NULL;
1125 : 3658728 : IndexTuple origitup = NULL;
1126 : 3658728 : IndexTuple nposting = NULL;
1127 : :
3426 kgrittn@postgresql.o 1128 : 3658728 : page = BufferGetPage(buf);
1254 michael@paquier.xyz 1129 : 3658728 : opaque = BTPageGetOpaque(page);
1754 pg@bowt.ie 1130 : 3658728 : isleaf = P_ISLEAF(opaque);
1131 : 3658728 : isroot = P_ISROOT(opaque);
1132 : 3658728 : isrightmost = P_RIGHTMOST(opaque);
1133 [ + + + + ]: 3658728 : isonly = P_LEFTMOST(opaque) && P_RIGHTMOST(opaque);
1134 : :
1135 : : /* child buffer must be given iff inserting on an internal page */
1136 [ - + ]: 3658728 : Assert(isleaf == !BufferIsValid(cbuf));
1137 : : /* tuple must have appropriate number of attributes */
1138 [ + + - + : 3658728 : Assert(!isleaf ||
- - ]
1139 : : BTreeTupleGetNAtts(itup, rel) ==
1140 : : IndexRelationGetNumberOfAttributes(rel));
1141 [ + + + - : 3658728 : Assert(isleaf ||
- + ]
1142 : : BTreeTupleGetNAtts(itup, rel) <=
1143 : : IndexRelationGetNumberOfKeyAttributes(rel));
2019 1144 [ - + ]: 3658728 : Assert(!BTreeTupleIsPosting(itup));
2000 1145 [ - + ]: 3658728 : Assert(MAXALIGN(IndexTupleSize(itup)) == itemsz);
1146 : : /* Caller must always finish incomplete split for us */
1754 1147 [ - + ]: 3658728 : Assert(!P_INCOMPLETE_SPLIT(opaque));
1148 : :
1149 : : /*
1150 : : * Every internal page should have exactly one negative infinity item at
1151 : : * all times. Only _bt_split() and _bt_newlevel() should add items that
1152 : : * become negative infinity items through truncation, since they're the
1153 : : * only routines that allocate new internal pages.
1154 : : */
1155 [ + + + + : 3658728 : Assert(isleaf || newitemoff > P_FIRSTDATAKEY(opaque));
- + ]
1156 : :
1157 : : /*
1158 : : * Do we need to split an existing posting list item?
1159 : : */
2019 1160 [ + + ]: 3658728 : if (postingoff != 0)
1161 : : {
1162 : 13407 : ItemId itemid = PageGetItemId(page, newitemoff);
1163 : :
1164 : : /*
1165 : : * The new tuple is a duplicate with a heap TID that falls inside the
1166 : : * range of an existing posting list tuple on a leaf page. Prepare to
1167 : : * split an existing posting list. Overwriting the posting list with
1168 : : * its post-split version is treated as an extra step in either the
1169 : : * insert or page split critical section.
1170 : : */
1410 1171 [ + - + - : 13407 : Assert(isleaf && itup_key->heapkeyspace && itup_key->allequalimage);
- + ]
2019 1172 : 13407 : oposting = (IndexTuple) PageGetItem(page, itemid);
1173 : :
1174 : : /*
1175 : : * postingoff value comes from earlier call to _bt_binsrch_posting().
1176 : : * Its binary search might think that a plain tuple must be a posting
1177 : : * list tuple that needs to be split. This can happen with corruption
1178 : : * involving an existing plain tuple that is a duplicate of the new
1179 : : * item, up to and including its table TID. Check for that here in
1180 : : * passing.
1181 : : *
1182 : : * Also verify that our caller has made sure that the existing posting
1183 : : * list tuple does not have its LP_DEAD bit set.
1184 : : */
1410 1185 [ + - - + ]: 13407 : if (!BTreeTupleIsPosting(oposting) || ItemIdIsDead(itemid))
1410 pg@bowt.ie 1186 [ # # ]:UBC 0 : ereport(ERROR,
1187 : : (errcode(ERRCODE_INDEX_CORRUPTED),
1188 : : errmsg_internal("table tid from new index tuple (%u,%u) overlaps with invalid duplicate tuple at offset %u of block %u in index \"%s\"",
1189 : : ItemPointerGetBlockNumber(&itup->t_tid),
1190 : : ItemPointerGetOffsetNumber(&itup->t_tid),
1191 : : newitemoff, BufferGetBlockNumber(buf),
1192 : : RelationGetRelationName(rel))));
1193 : :
1194 : : /* use a mutable copy of itup as our itup from here on */
2019 pg@bowt.ie 1195 :CBC 13407 : origitup = itup;
1196 : 13407 : itup = CopyIndexTuple(origitup);
1197 : 13407 : nposting = _bt_swap_posting(itup, oposting, postingoff);
1198 : : /* itup now contains rightmost/max TID from oposting */
1199 : :
1200 : : /* Alter offset so that newitem goes after posting list */
1201 : 13407 : newitemoff = OffsetNumberNext(newitemoff);
1202 : : }
1203 : :
1204 : : /*
1205 : : * Do we need to split the page to fit the item on it?
1206 : : *
1207 : : * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
1208 : : * so this comparison is correct even though we appear to be accounting
1209 : : * only for the item and not for its line pointer.
1210 : : */
9178 tgl@sss.pgh.pa.us 1211 [ + + ]: 3658728 : if (PageGetFreeSpace(page) < itemsz)
1212 : : {
1213 : : Buffer rbuf;
1214 : :
1972 pg@bowt.ie 1215 [ - + ]: 11436 : Assert(!split_only_page);
1216 : :
1217 : : /* split the buffer into left and right halves */
889 andres@anarazel.de 1218 : 11436 : rbuf = _bt_split(rel, heaprel, itup_key, buf, cbuf, newitemoff, itemsz,
1219 : : itup, origitup, nposting, postingoff);
5325 heikki.linnakangas@i 1220 : 11436 : PredicateLockPageSplit(rel,
1221 : : BufferGetBlockNumber(buf),
1222 : : BufferGetBlockNumber(rbuf));
1223 : :
1224 : : /*----------
1225 : : * By here,
1226 : : *
1227 : : * + our target page has been split;
1228 : : * + the original tuple has been inserted;
1229 : : * + we have write locks on both the old (left half)
1230 : : * and new (right half) buffers, after the split; and
1231 : : * + we know the key we want to insert into the parent
1232 : : * (it's the "high key" on the left child page).
1233 : : *
1234 : : * We're ready to do the parent insertion. We need to hold onto the
1235 : : * locks for the child pages until we locate the parent, but we can
1236 : : * at least release the lock on the right child before doing the
1237 : : * actual insertion. The lock on the left child will be released
1238 : : * last of all by parent insertion, where it is the 'cbuf' of parent
1239 : : * page.
1240 : : *----------
1241 : : */
889 andres@anarazel.de 1242 : 11436 : _bt_insert_parent(rel, heaprel, buf, rbuf, stack, isroot, isonly);
1243 : : }
1244 : : else
1245 : : {
8233 tgl@sss.pgh.pa.us 1246 : 3647292 : Buffer metabuf = InvalidBuffer;
1247 : 3647292 : Page metapg = NULL;
1248 : 3647292 : BTMetaPageData *metad = NULL;
1249 : : BlockNumber blockcache;
1250 : :
1251 : : /*
1252 : : * If we are doing this insert because we split a page that was the
1253 : : * only one on its tree level, but was not the root, it may have been
1254 : : * the "fast root". We need to ensure that the fast root link points
1255 : : * at or above the current page. We can safely acquire a lock on the
1256 : : * metapage here --- see comments for _bt_newlevel().
1257 : : */
1754 pg@bowt.ie 1258 [ + + ]: 3647292 : if (unlikely(split_only_page))
1259 : : {
1998 1260 [ - + ]: 12 : Assert(!isleaf);
1261 [ - + ]: 12 : Assert(BufferIsValid(cbuf));
1262 : :
819 1263 : 12 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
3426 kgrittn@postgresql.o 1264 : 12 : metapg = BufferGetPage(metabuf);
8233 tgl@sss.pgh.pa.us 1265 : 12 : metad = BTPageGetMeta(metapg);
1266 : :
1655 pg@bowt.ie 1267 [ - + ]: 12 : if (metad->btm_fastlevel >= opaque->btpo_level)
1268 : : {
1269 : : /* no update wanted */
8233 tgl@sss.pgh.pa.us 1270 :UBC 0 : _bt_relbuf(rel, metabuf);
1271 : 0 : metabuf = InvalidBuffer;
1272 : : }
1273 : : }
1274 : :
1275 : : /* Do the update. No ereport(ERROR) until changes are logged */
8233 tgl@sss.pgh.pa.us 1276 :CBC 3647292 : START_CRIT_SECTION();
1277 : :
2019 pg@bowt.ie 1278 [ + + ]: 3647292 : if (postingoff != 0)
1279 : 13383 : memcpy(oposting, nposting, MAXALIGN(IndexTupleSize(nposting)));
1280 : :
1998 1281 [ - + ]: 3647292 : if (PageAddItem(page, (Item) itup, itemsz, newitemoff, false,
1282 : : false) == InvalidOffsetNumber)
5487 tgl@sss.pgh.pa.us 1283 [ # # ]:UBC 0 : elog(PANIC, "failed to add new item to block %u in index \"%s\"",
1284 : : BufferGetBlockNumber(buf), RelationGetRelationName(rel));
1285 : :
7099 tgl@sss.pgh.pa.us 1286 :CBC 3647292 : MarkBufferDirty(buf);
1287 : :
8233 1288 [ + + ]: 3647292 : if (BufferIsValid(metabuf))
1289 : : {
1290 : : /* upgrade meta-page if needed */
2362 pg@bowt.ie 1291 [ - + ]: 12 : if (metad->btm_version < BTREE_NOVAC_VERSION)
2712 teodor@sigaev.ru 1292 :UBC 0 : _bt_upgrademetapage(metapg);
1999 pg@bowt.ie 1293 :CBC 12 : metad->btm_fastroot = BufferGetBlockNumber(buf);
1655 1294 : 12 : metad->btm_fastlevel = opaque->btpo_level;
7099 tgl@sss.pgh.pa.us 1295 : 12 : MarkBufferDirty(metabuf);
1296 : : }
1297 : :
1298 : : /*
1299 : : * Clear INCOMPLETE_SPLIT flag on child if inserting the new item
1300 : : * finishes a split
1301 : : */
1972 pg@bowt.ie 1302 [ + + ]: 3647292 : if (!isleaf)
1303 : : {
3426 kgrittn@postgresql.o 1304 : 10603 : Page cpage = BufferGetPage(cbuf);
1254 michael@paquier.xyz 1305 : 10603 : BTPageOpaque cpageop = BTPageGetOpaque(cpage);
1306 : :
4190 heikki.linnakangas@i 1307 [ - + ]: 10603 : Assert(P_INCOMPLETE_SPLIT(cpageop));
1308 : 10603 : cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
1309 : 10603 : MarkBufferDirty(cbuf);
1310 : : }
1311 : :
1312 : : /* XLOG stuff */
5381 rhaas@postgresql.org 1313 [ + + + + : 3647292 : if (RelationNeedsWAL(rel))
+ + + + ]
1314 : : {
1315 : : xl_btree_insert xlrec;
1316 : : xl_btree_metadata xlmeta;
1317 : : uint8 xlinfo;
1318 : : XLogRecPtr recptr;
1319 : : uint16 upostingoff;
1320 : :
1999 pg@bowt.ie 1321 : 3294290 : xlrec.offnum = newitemoff;
1322 : :
3943 heikki.linnakangas@i 1323 : 3294290 : XLogBeginInsert();
207 peter@eisentraut.org 1324 : 3294290 : XLogRegisterData(&xlrec, SizeOfBtreeInsert);
1325 : :
1998 pg@bowt.ie 1326 [ + + + + ]: 3294290 : if (isleaf && postingoff == 0)
1327 : : {
1328 : : /* Simple leaf insert */
7086 tgl@sss.pgh.pa.us 1329 : 3271203 : xlinfo = XLOG_BTREE_INSERT_LEAF;
1330 : : }
2019 pg@bowt.ie 1331 [ + + ]: 23087 : else if (postingoff != 0)
1332 : : {
1333 : : /*
1334 : : * Leaf insert with posting list split. Must include
1335 : : * postingoff field before newitem/orignewitem.
1336 : : */
1971 1337 [ - + ]: 13383 : Assert(isleaf);
2019 1338 : 13383 : xlinfo = XLOG_BTREE_INSERT_POST;
1339 : : }
1340 : : else
1341 : : {
1342 : : /* Internal page insert, which finishes a split on cbuf */
7086 tgl@sss.pgh.pa.us 1343 : 9704 : xlinfo = XLOG_BTREE_INSERT_UPPER;
1972 pg@bowt.ie 1344 : 9704 : XLogRegisterBuffer(1, cbuf, REGBUF_STANDARD);
1345 : :
1971 1346 [ + + ]: 9704 : if (BufferIsValid(metabuf))
1347 : : {
1348 : : /* Actually, it's an internal page insert + meta update */
1349 : 12 : xlinfo = XLOG_BTREE_INSERT_META;
1350 : :
1351 [ - + ]: 12 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
1352 : 12 : xlmeta.version = metad->btm_version;
1353 : 12 : xlmeta.root = metad->btm_root;
1354 : 12 : xlmeta.level = metad->btm_level;
1355 : 12 : xlmeta.fastroot = metad->btm_fastroot;
1356 : 12 : xlmeta.fastlevel = metad->btm_fastlevel;
1655 1357 : 12 : xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
1971 1358 : 12 : xlmeta.allequalimage = metad->btm_allequalimage;
1359 : :
1360 : 12 : XLogRegisterBuffer(2, metabuf,
1361 : : REGBUF_WILL_INIT | REGBUF_STANDARD);
207 peter@eisentraut.org 1362 : 12 : XLogRegisterBufData(2, &xlmeta,
1363 : : sizeof(xl_btree_metadata));
1364 : : }
1365 : : }
1366 : :
3943 heikki.linnakangas@i 1367 : 3294290 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
2019 pg@bowt.ie 1368 [ + + ]: 3294290 : if (postingoff == 0)
1369 : : {
1370 : : /* Just log itup from caller */
207 peter@eisentraut.org 1371 : 3280907 : XLogRegisterBufData(0, itup, IndexTupleSize(itup));
1372 : : }
1373 : : else
1374 : : {
1375 : : /*
1376 : : * Insert with posting list split (XLOG_BTREE_INSERT_POST
1377 : : * record) case.
1378 : : *
1379 : : * Log postingoff. Also log origitup, not itup. REDO routine
1380 : : * must reconstruct final itup (as well as nposting) using
1381 : : * _bt_swap_posting().
1382 : : */
1955 pg@bowt.ie 1383 : 13383 : upostingoff = postingoff;
1384 : :
207 peter@eisentraut.org 1385 : 13383 : XLogRegisterBufData(0, &upostingoff, sizeof(uint16));
1386 : 13383 : XLogRegisterBufData(0, origitup,
2019 pg@bowt.ie 1387 : 13383 : IndexTupleSize(origitup));
1388 : : }
1389 : :
3943 heikki.linnakangas@i 1390 : 3294290 : recptr = XLogInsert(RM_BTREE_ID, xlinfo);
1391 : :
8233 tgl@sss.pgh.pa.us 1392 [ + + ]: 3294290 : if (BufferIsValid(metabuf))
1393 : 12 : PageSetLSN(metapg, recptr);
1972 pg@bowt.ie 1394 [ + + ]: 3294290 : if (!isleaf)
3426 kgrittn@postgresql.o 1395 : 9704 : PageSetLSN(BufferGetPage(cbuf), recptr);
1396 : :
8233 tgl@sss.pgh.pa.us 1397 : 3294290 : PageSetLSN(page, recptr);
1398 : : }
1399 : :
1400 [ - + ]: 3647292 : END_CRIT_SECTION();
1401 : :
1402 : : /* Release subsidiary buffers */
1403 [ + + ]: 3647292 : if (BufferIsValid(metabuf))
7099 1404 : 12 : _bt_relbuf(rel, metabuf);
1972 pg@bowt.ie 1405 [ + + ]: 3647292 : if (!isleaf)
4190 heikki.linnakangas@i 1406 : 10603 : _bt_relbuf(rel, cbuf);
1407 : :
1408 : : /*
1409 : : * Cache the block number if this is the rightmost leaf page. Cache
1410 : : * may be used by a future inserter within _bt_search_insert().
1411 : : */
1998 pg@bowt.ie 1412 : 3647292 : blockcache = InvalidBlockNumber;
1754 1413 [ + + + + : 3647292 : if (isrightmost && isleaf && !isroot)
+ + ]
1998 1414 : 2031637 : blockcache = BufferGetBlockNumber(buf);
1415 : :
1416 : : /* Release buffer for insertion target block */
7099 tgl@sss.pgh.pa.us 1417 : 3647292 : _bt_relbuf(rel, buf);
1418 : :
1419 : : /*
1420 : : * If we decided to cache the insertion target block before releasing
1421 : : * its buffer lock, then cache it now. Check the height of the tree
1422 : : * first, though. We don't go for the optimization with small
1423 : : * indexes. Defer final check to this point to ensure that we don't
1424 : : * call _bt_getrootheight while holding a buffer lock.
1425 : : */
1998 pg@bowt.ie 1426 [ + + + + ]: 5678929 : if (BlockNumberIsValid(blockcache) &&
819 1427 : 2031637 : _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
1998 1428 : 37664 : RelationSetTargetBlock(rel, blockcache);
1429 : : }
1430 : :
1431 : : /* be tidy */
2019 1432 [ + + ]: 3658728 : if (postingoff != 0)
1433 : : {
1434 : : /* itup is actually a modified copy of caller's original */
1435 : 13407 : pfree(nposting);
1436 : 13407 : pfree(itup);
1437 : : }
10651 scrappy@hub.org 1438 : 3658728 : }
1439 : :
1440 : : /*
1441 : : * _bt_split() -- split a page in the btree.
1442 : : *
1443 : : * On entry, buf is the page to split, and is pinned and write-locked.
1444 : : * newitemoff etc. tell us about the new item that must be inserted
1445 : : * along with the data from the original page.
1446 : : *
1447 : : * itup_key is used for suffix truncation on leaf pages (internal
1448 : : * page callers pass NULL). When splitting a non-leaf page, 'cbuf'
1449 : : * is the left-sibling of the page we're inserting the downlink for.
1450 : : * This function will clear the INCOMPLETE_SPLIT flag on it, and
1451 : : * release the buffer.
1452 : : *
1453 : : * orignewitem, nposting, and postingoff are needed when an insert of
1454 : : * orignewitem results in both a posting list split and a page split.
1455 : : * These extra posting list split details are used here in the same
1456 : : * way as they are used in the more common case where a posting list
1457 : : * split does not coincide with a page split. We need to deal with
1458 : : * posting list splits directly in order to ensure that everything
1459 : : * that follows from the insert of orignewitem is handled as a single
1460 : : * atomic operation (though caller's insert of a new pivot/downlink
1461 : : * into parent page will still be a separate operation). See
1462 : : * nbtree/README for details on the design of posting list splits.
1463 : : *
1464 : : * Returns the new right sibling of buf, pinned and write-locked.
1465 : : * The pin and lock on buf are maintained.
1466 : : */
1467 : : static Buffer
889 andres@anarazel.de 1468 : 11436 : _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key, Buffer buf,
1469 : : Buffer cbuf, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
1470 : : IndexTuple orignewitem, IndexTuple nposting, uint16 postingoff)
1471 : : {
1472 : : Buffer rbuf;
1473 : : Page origpage;
1474 : : Page leftpage,
1475 : : rightpage;
1476 : : BlockNumber origpagenumber,
1477 : : rightpagenumber;
1478 : : BTPageOpaque ropaque,
1479 : : lopaque,
1480 : : oopaque;
8232 tgl@sss.pgh.pa.us 1481 : 11436 : Buffer sbuf = InvalidBuffer;
1482 : 11436 : Page spage = NULL;
1483 : 11436 : BTPageOpaque sopaque = NULL;
1484 : : Size itemsz;
1485 : : ItemId itemid;
1486 : : IndexTuple firstright,
1487 : : lefthighkey;
1488 : : OffsetNumber firstrightoff;
1489 : : OffsetNumber afterleftoff,
1490 : : afterrightoff,
1491 : : minusinfoff;
1492 : : OffsetNumber origpagepostingoff;
1493 : : OffsetNumber maxoff;
1494 : : OffsetNumber i;
1495 : : bool newitemonleft,
1496 : : isleaf,
1497 : : isrightmost;
1498 : :
1499 : : /*
1500 : : * origpage is the original page to be split. leftpage is a temporary
1501 : : * buffer that receives the left-sibling data, which will be copied back
1502 : : * into origpage on success. rightpage is the new page that will receive
1503 : : * the right-sibling data.
1504 : : *
1505 : : * leftpage is allocated after choosing a split point. rightpage's new
1506 : : * buffer isn't acquired until after leftpage is initialized and has new
1507 : : * high key, the last point where splitting the page may fail (barring
1508 : : * corruption). Failing before acquiring new buffer won't have lasting
1509 : : * consequences, since origpage won't have been modified and leftpage is
1510 : : * only workspace.
1511 : : */
3426 kgrittn@postgresql.o 1512 : 11436 : origpage = BufferGetPage(buf);
1254 michael@paquier.xyz 1513 : 11436 : oopaque = BTPageGetOpaque(origpage);
1972 pg@bowt.ie 1514 : 11436 : isleaf = P_ISLEAF(oopaque);
1515 : 11436 : isrightmost = P_RIGHTMOST(oopaque);
1516 : 11436 : maxoff = PageGetMaxOffsetNumber(origpage);
5487 tgl@sss.pgh.pa.us 1517 : 11436 : origpagenumber = BufferGetBlockNumber(buf);
1518 : :
1519 : : /*
1520 : : * Choose a point to split origpage at.
1521 : : *
1522 : : * A split point can be thought of as a point _between_ two existing data
1523 : : * items on origpage (the lastleft and firstright tuples), provided you
1524 : : * pretend that the new item that didn't fit is already on origpage.
1525 : : *
1526 : : * Since origpage does not actually contain newitem, the representation of
1527 : : * split points needs to work with two boundary cases: splits where
1528 : : * newitem is lastleft, and splits where newitem is firstright.
1529 : : * newitemonleft resolves the ambiguity that would otherwise exist when
1530 : : * newitemoff == firstrightoff. In all other cases it's clear which side
1531 : : * of the split every tuple goes on from context. newitemonleft is
1532 : : * usually (but not always) redundant information.
1533 : : *
1534 : : * firstrightoff is supposed to be an origpage offset number, but it's
1535 : : * possible that its value will be maxoff+1, which is "past the end" of
1536 : : * origpage. This happens in the rare case where newitem goes after all
1537 : : * existing items (i.e. newitemoff is maxoff+1) and we end up splitting
1538 : : * origpage at the point that leaves newitem alone on new right page. Any
1539 : : * "!newitemonleft && newitemoff == firstrightoff" split point makes
1540 : : * newitem the firstright tuple, though, so this case isn't a special
1541 : : * case.
1542 : : */
1972 pg@bowt.ie 1543 : 11436 : firstrightoff = _bt_findsplitloc(rel, origpage, newitemoff, newitemsz,
1544 : : newitem, &newitemonleft);
1545 : :
1546 : : /* Allocate temp buffer for leftpage */
2308 1547 : 11436 : leftpage = PageGetTempPage(origpage);
1548 : 11436 : _bt_pageinit(leftpage, BufferGetPageSize(buf));
1254 michael@paquier.xyz 1549 : 11436 : lopaque = BTPageGetOpaque(leftpage);
1550 : :
1551 : : /*
1552 : : * leftpage won't be the root when we're done. Also, clear the SPLIT_END
1553 : : * and HAS_GARBAGE flags.
1554 : : */
9103 vadim4o@yahoo.com 1555 : 11436 : lopaque->btpo_flags = oopaque->btpo_flags;
6983 tgl@sss.pgh.pa.us 1556 : 11436 : lopaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
1557 : : /* set flag in leftpage indicating that rightpage has no downlink yet */
4190 heikki.linnakangas@i 1558 : 11436 : lopaque->btpo_flags |= BTP_INCOMPLETE_SPLIT;
10226 bruce@momjian.us 1559 : 11436 : lopaque->btpo_prev = oopaque->btpo_prev;
1560 : : /* handle btpo_next after rightpage buffer acquired */
1655 pg@bowt.ie 1561 : 11436 : lopaque->btpo_level = oopaque->btpo_level;
1562 : : /* handle btpo_cycleid after rightpage buffer acquired */
1563 : :
1564 : : /*
1565 : : * Copy the original page's LSN into leftpage, which will become the
1566 : : * updated version of the page. We need this because XLogInsert will
1567 : : * examine the LSN and possibly dump it in a page image.
1568 : : */
2308 1569 : 11436 : PageSetLSN(leftpage, PageGetLSN(origpage));
1570 : :
1571 : : /*
1572 : : * Determine page offset number of existing overlapped-with-orignewitem
1573 : : * posting list when it is necessary to perform a posting list split in
1574 : : * passing. Note that newitem was already changed by caller (newitem no
1575 : : * longer has the orignewitem TID).
1576 : : *
1577 : : * This page offset number (origpagepostingoff) will be used to pretend
1578 : : * that the posting split has already taken place, even though the
1579 : : * required modifications to origpage won't occur until we reach the
1580 : : * critical section. The lastleft and firstright tuples of our page split
1581 : : * point should, in effect, come from an imaginary version of origpage
1582 : : * that has the nposting tuple instead of the original posting list tuple.
1583 : : *
1584 : : * Note: _bt_findsplitloc() should have compensated for coinciding posting
1585 : : * list splits in just the same way, at least in theory. It doesn't
1586 : : * bother with that, though. In practice it won't affect its choice of
1587 : : * split point.
1588 : : */
2019 1589 : 11436 : origpagepostingoff = InvalidOffsetNumber;
1590 [ + + ]: 11436 : if (postingoff != 0)
1591 : : {
1592 [ - + ]: 24 : Assert(isleaf);
1593 [ - + ]: 24 : Assert(ItemPointerCompare(&orignewitem->t_tid,
1594 : : &newitem->t_tid) < 0);
1595 [ - + ]: 24 : Assert(BTreeTupleIsPosting(nposting));
1596 : 24 : origpagepostingoff = OffsetNumberPrev(newitemoff);
1597 : : }
1598 : :
1599 : : /*
1600 : : * The high key for the new left page is a possibly-truncated copy of
1601 : : * firstright on the leaf level (it's "firstright itself" on internal
1602 : : * pages; see !isleaf comments below). This may seem to be contrary to
1603 : : * Lehman & Yao's approach of using a copy of lastleft as the new high key
1604 : : * when splitting on the leaf level. It isn't, though.
1605 : : *
1606 : : * Suffix truncation will leave the left page's high key fully equal to
1607 : : * lastleft when lastleft and firstright are equal prior to heap TID (that
1608 : : * is, the tiebreaker TID value comes from lastleft). It isn't actually
1609 : : * necessary for a new leaf high key to be a copy of lastleft for the L&Y
1610 : : * "subtree" invariant to hold. It's sufficient to make sure that the new
1611 : : * leaf high key is strictly less than firstright, and greater than or
1612 : : * equal to (not necessarily equal to) lastleft. In other words, when
1613 : : * suffix truncation isn't possible during a leaf page split, we take
1614 : : * L&Y's exact approach to generating a new high key for the left page.
1615 : : * (Actually, that is slightly inaccurate. We don't just use a copy of
1616 : : * lastleft. A tuple with all the keys from firstright but the max heap
1617 : : * TID from lastleft is used, to avoid introducing a special case.)
1618 : : */
1972 1619 [ + + + + ]: 11436 : if (!newitemonleft && newitemoff == firstrightoff)
1620 : : {
1621 : : /* incoming tuple becomes firstright */
9178 tgl@sss.pgh.pa.us 1622 : 30 : itemsz = newitemsz;
1972 pg@bowt.ie 1623 : 30 : firstright = newitem;
1624 : : }
1625 : : else
1626 : : {
1627 : : /* existing item at firstrightoff becomes firstright */
1628 : 11406 : itemid = PageGetItemId(origpage, firstrightoff);
9178 tgl@sss.pgh.pa.us 1629 : 11406 : itemsz = ItemIdGetLength(itemid);
1972 pg@bowt.ie 1630 : 11406 : firstright = (IndexTuple) PageGetItem(origpage, itemid);
1631 [ - + ]: 11406 : if (firstrightoff == origpagepostingoff)
1972 pg@bowt.ie 1632 :UBC 0 : firstright = nposting;
1633 : : }
1634 : :
1972 pg@bowt.ie 1635 [ + + ]:CBC 11436 : if (isleaf)
1636 : : {
1637 : : IndexTuple lastleft;
1638 : :
1639 : : /* Attempt suffix truncation for leaf page splits */
1640 [ + + + + ]: 11279 : if (newitemonleft && newitemoff == firstrightoff)
1641 : : {
1642 : : /* incoming tuple becomes lastleft */
2362 1643 : 192 : lastleft = newitem;
1644 : : }
1645 : : else
1646 : : {
1647 : : OffsetNumber lastleftoff;
1648 : :
1649 : : /* existing item before firstrightoff becomes lastleft */
1972 1650 : 11087 : lastleftoff = OffsetNumberPrev(firstrightoff);
2362 1651 [ + + - + ]: 11087 : Assert(lastleftoff >= P_FIRSTDATAKEY(oopaque));
1652 : 11087 : itemid = PageGetItemId(origpage, lastleftoff);
1653 : 11087 : lastleft = (IndexTuple) PageGetItem(origpage, itemid);
2019 1654 [ + + ]: 11087 : if (lastleftoff == origpagepostingoff)
1655 : 3 : lastleft = nposting;
1656 : : }
1657 : :
1972 1658 : 11279 : lefthighkey = _bt_truncate(rel, lastleft, firstright, itup_key);
1659 : 11279 : itemsz = IndexTupleSize(lefthighkey);
1660 : : }
1661 : : else
1662 : : {
1663 : : /*
1664 : : * Don't perform suffix truncation on a copy of firstright to make
1665 : : * left page high key for internal page splits. Must use firstright
1666 : : * as new high key directly.
1667 : : *
1668 : : * Each distinct separator key value originates as a leaf level high
1669 : : * key; all other separator keys/pivot tuples are copied from one
1670 : : * level down. A separator key in a grandparent page must be
1671 : : * identical to high key in rightmost parent page of the subtree to
1672 : : * its left, which must itself be identical to high key in rightmost
1673 : : * child page of that same subtree (this even applies to separator
1674 : : * from grandparent's high key). There must always be an unbroken
1675 : : * "seam" of identical separator keys that guide index scans at every
1676 : : * level, starting from the grandparent. That's why suffix truncation
1677 : : * is unsafe here.
1678 : : *
1679 : : * Internal page splits will truncate firstright into a "negative
1680 : : * infinity" data item when it gets inserted on the new right page
1681 : : * below, though. This happens during the call to _bt_pgaddtup() for
1682 : : * the new first data item for right page. Do not confuse this
1683 : : * mechanism with suffix truncation. It is just a convenient way of
1684 : : * implementing page splits that split the internal page "inside"
1685 : : * firstright. The lefthighkey separator key cannot appear a second
1686 : : * time in the right page (only firstright's downlink goes in right
1687 : : * page).
1688 : : */
1689 : 157 : lefthighkey = firstright;
1690 : : }
1691 : :
1692 : : /*
1693 : : * Add new high key to leftpage
1694 : : */
1695 : 11436 : afterleftoff = P_HIKEY;
1696 : :
1697 [ + - - + ]: 11436 : Assert(BTreeTupleGetNAtts(lefthighkey, rel) > 0);
1698 [ + - - + ]: 11436 : Assert(BTreeTupleGetNAtts(lefthighkey, rel) <=
1699 : : IndexRelationGetNumberOfKeyAttributes(rel));
1700 [ - + ]: 11436 : Assert(itemsz == MAXALIGN(IndexTupleSize(lefthighkey)));
1701 [ - + ]: 11436 : if (PageAddItem(leftpage, (Item) lefthighkey, itemsz, afterleftoff, false,
1702 : : false) == InvalidOffsetNumber)
1972 pg@bowt.ie 1703 [ # # ]:UBC 0 : elog(ERROR, "failed to add high key to the left sibling"
1704 : : " while splitting block %u of index \"%s\"",
1705 : : origpagenumber, RelationGetRelationName(rel));
1972 pg@bowt.ie 1706 :CBC 11436 : afterleftoff = OffsetNumberNext(afterleftoff);
1707 : :
1708 : : /*
1709 : : * Acquire a new right page to split into, now that left page has a new
1710 : : * high key. From here on, it's not okay to throw an error without
1711 : : * zeroing rightpage first. This coding rule ensures that we won't
1712 : : * confuse future VACUUM operations, which might otherwise try to re-find
1713 : : * a downlink to a leftover junk page as the page undergoes deletion.
1714 : : *
1715 : : * It would be reasonable to start the critical section just after the new
1716 : : * rightpage buffer is acquired instead; that would allow us to avoid
1717 : : * leftover junk pages without bothering to zero rightpage. We do it this
1718 : : * way because it avoids an unnecessary PANIC when either origpage or its
1719 : : * existing sibling page are corrupt.
1720 : : */
819 1721 : 11436 : rbuf = _bt_allocbuf(rel, heaprel);
2308 1722 : 11436 : rightpage = BufferGetPage(rbuf);
1723 : 11436 : rightpagenumber = BufferGetBlockNumber(rbuf);
1724 : : /* rightpage was initialized by _bt_allocbuf */
1254 michael@paquier.xyz 1725 : 11436 : ropaque = BTPageGetOpaque(rightpage);
1726 : :
1727 : : /*
1728 : : * Finish off remaining leftpage special area fields. They cannot be set
1729 : : * before both origpage (leftpage) and rightpage buffers are acquired and
1730 : : * locked.
1731 : : *
1732 : : * btpo_cycleid is only used with leaf pages, though we set it here in all
1733 : : * cases just to be consistent.
1734 : : */
2308 pg@bowt.ie 1735 : 11436 : lopaque->btpo_next = rightpagenumber;
1736 : 11436 : lopaque->btpo_cycleid = _bt_vacuum_cycleid(rel);
1737 : :
1738 : : /*
1739 : : * rightpage won't be the root when we're done. Also, clear the SPLIT_END
1740 : : * and HAS_GARBAGE flags.
1741 : : */
1742 : 11436 : ropaque->btpo_flags = oopaque->btpo_flags;
1743 : 11436 : ropaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
1744 : 11436 : ropaque->btpo_prev = origpagenumber;
1745 : 11436 : ropaque->btpo_next = oopaque->btpo_next;
1655 1746 : 11436 : ropaque->btpo_level = oopaque->btpo_level;
2308 1747 : 11436 : ropaque->btpo_cycleid = lopaque->btpo_cycleid;
1748 : :
1749 : : /*
1750 : : * Add new high key to rightpage where necessary.
1751 : : *
1752 : : * If the page we're splitting is not the rightmost page at its level in
1753 : : * the tree, then the first entry on the page is the high key from
1754 : : * origpage.
1755 : : */
1972 1756 : 11436 : afterrightoff = P_HIKEY;
1757 : :
1758 [ + + ]: 11436 : if (!isrightmost)
1759 : : {
1760 : : IndexTuple righthighkey;
1761 : :
2308 1762 : 5044 : itemid = PageGetItemId(origpage, P_HIKEY);
1763 : 5044 : itemsz = ItemIdGetLength(itemid);
1972 1764 : 5044 : righthighkey = (IndexTuple) PageGetItem(origpage, itemid);
1765 [ + - - + ]: 5044 : Assert(BTreeTupleGetNAtts(righthighkey, rel) > 0);
1766 [ + - - + ]: 5044 : Assert(BTreeTupleGetNAtts(righthighkey, rel) <=
1767 : : IndexRelationGetNumberOfKeyAttributes(rel));
1768 [ - + ]: 5044 : if (PageAddItem(rightpage, (Item) righthighkey, itemsz, afterrightoff,
1769 : : false, false) == InvalidOffsetNumber)
1770 : : {
2308 pg@bowt.ie 1771 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1972 1772 [ # # ]: 0 : elog(ERROR, "failed to add high key to the right sibling"
1773 : : " while splitting block %u of index \"%s\"",
1774 : : origpagenumber, RelationGetRelationName(rel));
1775 : : }
1972 pg@bowt.ie 1776 :CBC 5044 : afterrightoff = OffsetNumberNext(afterrightoff);
1777 : : }
1778 : :
1779 : : /*
1780 : : * Internal page splits truncate first data item on right page -- it
1781 : : * becomes "minus infinity" item for the page. Set this up here.
1782 : : */
1783 : 11436 : minusinfoff = InvalidOffsetNumber;
1784 [ + + ]: 11436 : if (!isleaf)
1785 : 157 : minusinfoff = afterrightoff;
1786 : :
1787 : : /*
1788 : : * Now transfer all the data items (non-pivot tuples in isleaf case, or
1789 : : * additional pivot tuples in !isleaf case) to the appropriate page.
1790 : : *
1791 : : * Note: we *must* insert at least the right page's items in item-number
1792 : : * order, for the benefit of _bt_restore_page().
1793 : : */
9178 tgl@sss.pgh.pa.us 1794 [ + + + + ]: 3471510 : for (i = P_FIRSTDATAKEY(oopaque); i <= maxoff; i = OffsetNumberNext(i))
1795 : : {
1796 : : IndexTuple dataitem;
1797 : :
10226 bruce@momjian.us 1798 : 3460074 : itemid = PageGetItemId(origpage, i);
1799 : 3460074 : itemsz = ItemIdGetLength(itemid);
1972 pg@bowt.ie 1800 : 3460074 : dataitem = (IndexTuple) PageGetItem(origpage, itemid);
1801 : :
1802 : : /* replace original item with nposting due to posting split? */
2019 1803 [ + + ]: 3460074 : if (i == origpagepostingoff)
1804 : : {
1972 1805 [ - + ]: 24 : Assert(BTreeTupleIsPosting(dataitem));
2019 1806 [ - + ]: 24 : Assert(itemsz == MAXALIGN(IndexTupleSize(nposting)));
1972 1807 : 24 : dataitem = nposting;
1808 : : }
1809 : :
1810 : : /* does new item belong before this one? */
2019 1811 [ + + ]: 3460050 : else if (i == newitemoff)
1812 : : {
9178 tgl@sss.pgh.pa.us 1813 [ + + ]: 6532 : if (newitemonleft)
1814 : : {
1972 pg@bowt.ie 1815 [ - + ]: 1757 : Assert(newitemoff <= firstrightoff);
1816 [ - + ]: 1757 : if (!_bt_pgaddtup(leftpage, newitemsz, newitem, afterleftoff,
1817 : : false))
1818 : : {
5487 tgl@sss.pgh.pa.us 1819 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1820 [ # # ]: 0 : elog(ERROR, "failed to add new item to the left sibling"
1821 : : " while splitting block %u of index \"%s\"",
1822 : : origpagenumber, RelationGetRelationName(rel));
1823 : : }
1972 pg@bowt.ie 1824 :CBC 1757 : afterleftoff = OffsetNumberNext(afterleftoff);
1825 : : }
1826 : : else
1827 : : {
1828 [ - + ]: 4775 : Assert(newitemoff >= firstrightoff);
1829 [ - + ]: 4775 : if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
1830 : : afterrightoff == minusinfoff))
1831 : : {
5487 tgl@sss.pgh.pa.us 1832 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1833 [ # # ]: 0 : elog(ERROR, "failed to add new item to the right sibling"
1834 : : " while splitting block %u of index \"%s\"",
1835 : : origpagenumber, RelationGetRelationName(rel));
1836 : : }
1972 pg@bowt.ie 1837 :CBC 4775 : afterrightoff = OffsetNumberNext(afterrightoff);
1838 : : }
1839 : : }
1840 : :
1841 : : /* decide which page to put it on */
1842 [ + + ]: 3460074 : if (i < firstrightoff)
1843 : : {
1844 [ - + ]: 2622948 : if (!_bt_pgaddtup(leftpage, itemsz, dataitem, afterleftoff, false))
1845 : : {
5487 tgl@sss.pgh.pa.us 1846 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1847 [ # # ]: 0 : elog(ERROR, "failed to add old item to the left sibling"
1848 : : " while splitting block %u of index \"%s\"",
1849 : : origpagenumber, RelationGetRelationName(rel));
1850 : : }
1972 pg@bowt.ie 1851 :CBC 2622948 : afterleftoff = OffsetNumberNext(afterleftoff);
1852 : : }
1853 : : else
1854 : : {
1855 [ - + ]: 837126 : if (!_bt_pgaddtup(rightpage, itemsz, dataitem, afterrightoff,
1856 : : afterrightoff == minusinfoff))
1857 : : {
5487 tgl@sss.pgh.pa.us 1858 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1859 [ # # ]: 0 : elog(ERROR, "failed to add old item to the right sibling"
1860 : : " while splitting block %u of index \"%s\"",
1861 : : origpagenumber, RelationGetRelationName(rel));
1862 : : }
1972 pg@bowt.ie 1863 :CBC 837126 : afterrightoff = OffsetNumberNext(afterrightoff);
1864 : : }
1865 : : }
1866 : :
1867 : : /* Handle case where newitem goes at the end of rightpage */
9178 tgl@sss.pgh.pa.us 1868 [ + + ]: 11436 : if (i <= newitemoff)
1869 : : {
1870 : : /*
1871 : : * Can't have newitemonleft here; that would imply we were told to put
1872 : : * *everything* on the left page, which cannot fit (if it could, we'd
1873 : : * not be splitting the page).
1874 : : */
1972 pg@bowt.ie 1875 [ + - - + ]: 4904 : Assert(!newitemonleft && newitemoff == maxoff + 1);
1876 [ - + ]: 4904 : if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
1877 : : afterrightoff == minusinfoff))
1878 : : {
5487 tgl@sss.pgh.pa.us 1879 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
1880 [ # # ]: 0 : elog(ERROR, "failed to add new item to the right sibling"
1881 : : " while splitting block %u of index \"%s\"",
1882 : : origpagenumber, RelationGetRelationName(rel));
1883 : : }
1972 pg@bowt.ie 1884 :CBC 4904 : afterrightoff = OffsetNumberNext(afterrightoff);
1885 : : }
1886 : :
1887 : : /*
1888 : : * We have to grab the original right sibling (if any) and update its prev
1889 : : * link. We are guaranteed that this is deadlock-free, since we couple
1890 : : * the locks in the standard order: left to right.
1891 : : */
1892 [ + + ]: 11436 : if (!isrightmost)
1893 : : {
819 1894 : 5044 : sbuf = _bt_getbuf(rel, oopaque->btpo_next, BT_WRITE);
3426 kgrittn@postgresql.o 1895 : 5044 : spage = BufferGetPage(sbuf);
1254 michael@paquier.xyz 1896 : 5044 : sopaque = BTPageGetOpaque(spage);
5487 tgl@sss.pgh.pa.us 1897 [ - + ]: 5044 : if (sopaque->btpo_prev != origpagenumber)
1898 : : {
5487 tgl@sss.pgh.pa.us 1899 :UBC 0 : memset(rightpage, 0, BufferGetPageSize(rbuf));
2228 peter@eisentraut.org 1900 [ # # ]: 0 : ereport(ERROR,
1901 : : (errcode(ERRCODE_INDEX_CORRUPTED),
1902 : : errmsg_internal("right sibling's left-link doesn't match: "
1903 : : "block %u links to %u instead of expected %u in index \"%s\"",
1904 : : oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
1905 : : RelationGetRelationName(rel))));
1906 : : }
1907 : :
1908 : : /*
1909 : : * Check to see if we can set the SPLIT_END flag in the right-hand
1910 : : * split page; this can save some I/O for vacuum since it need not
1911 : : * proceed to the right sibling. We can set the flag if the right
1912 : : * sibling has a different cycleid: that means it could not be part of
1913 : : * a group of pages that were all split off from the same ancestor
1914 : : * page. If you're confused, imagine that page A splits to A B and
1915 : : * then again, yielding A C B, while vacuum is in progress. Tuples
1916 : : * originally in A could now be in either B or C, hence vacuum must
1917 : : * examine both pages. But if D, our right sibling, has a different
1918 : : * cycleid then it could not contain any tuples that were in A when
1919 : : * the vacuum started.
1920 : : */
7061 tgl@sss.pgh.pa.us 1921 [ + + ]:CBC 5044 : if (sopaque->btpo_cycleid != ropaque->btpo_cycleid)
7061 tgl@sss.pgh.pa.us 1922 :GBC 1 : ropaque->btpo_flags |= BTP_SPLIT_END;
1923 : : }
1924 : :
1925 : : /*
1926 : : * Right sibling is locked, new siblings are prepared, but original page
1927 : : * is not updated yet.
1928 : : *
1929 : : * NO EREPORT(ERROR) till right sibling is updated. We can get away with
1930 : : * not starting the critical section till here because we haven't been
1931 : : * scribbling on the original page yet; see comments above.
1932 : : */
9003 tgl@sss.pgh.pa.us 1933 :CBC 11436 : START_CRIT_SECTION();
1934 : :
1935 : : /*
1936 : : * By here, the original data page has been split into two new halves, and
1937 : : * these are correct. The algorithm requires that the left page never
1938 : : * move during a split, so we copy the new left page back on top of the
1939 : : * original. We need to do this before writing the WAL record, so that
1940 : : * XLogInsert can WAL log an image of the page if necessary.
1941 : : */
6785 bruce@momjian.us 1942 : 11436 : PageRestoreTempPage(leftpage, origpage);
1943 : : /* leftpage, lopaque must not be used below here */
1944 : :
6723 tgl@sss.pgh.pa.us 1945 : 11436 : MarkBufferDirty(buf);
1946 : 11436 : MarkBufferDirty(rbuf);
1947 : :
1972 pg@bowt.ie 1948 [ + + ]: 11436 : if (!isrightmost)
1949 : : {
5487 tgl@sss.pgh.pa.us 1950 : 5044 : sopaque->btpo_prev = rightpagenumber;
6723 1951 : 5044 : MarkBufferDirty(sbuf);
1952 : : }
1953 : :
1954 : : /*
1955 : : * Clear INCOMPLETE_SPLIT flag on child if inserting the new item finishes
1956 : : * a split
1957 : : */
4190 heikki.linnakangas@i 1958 [ + + ]: 11436 : if (!isleaf)
1959 : : {
3426 kgrittn@postgresql.o 1960 : 157 : Page cpage = BufferGetPage(cbuf);
1254 michael@paquier.xyz 1961 : 157 : BTPageOpaque cpageop = BTPageGetOpaque(cpage);
1962 : :
4190 heikki.linnakangas@i 1963 : 157 : cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
1964 : 157 : MarkBufferDirty(cbuf);
1965 : : }
1966 : :
1967 : : /* XLOG stuff */
5381 rhaas@postgresql.org 1968 [ + + + + : 11436 : if (RelationNeedsWAL(rel))
+ + + - ]
1969 : : {
1970 : : xl_btree_split xlrec;
1971 : : uint8 xlinfo;
1972 : : XLogRecPtr recptr;
1973 : :
1655 pg@bowt.ie 1974 : 10518 : xlrec.level = ropaque->btpo_level;
1975 : : /* See comments below on newitem, orignewitem, and posting lists */
1972 1976 : 10518 : xlrec.firstrightoff = firstrightoff;
3943 heikki.linnakangas@i 1977 : 10518 : xlrec.newitemoff = newitemoff;
2019 pg@bowt.ie 1978 : 10518 : xlrec.postingoff = 0;
1972 1979 [ + + + + ]: 10518 : if (postingoff != 0 && origpagepostingoff < firstrightoff)
2019 1980 : 13 : xlrec.postingoff = postingoff;
1981 : :
3943 heikki.linnakangas@i 1982 : 10518 : XLogBeginInsert();
207 peter@eisentraut.org 1983 : 10518 : XLogRegisterData(&xlrec, SizeOfBtreeSplit);
1984 : :
3943 heikki.linnakangas@i 1985 : 10518 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
1986 : 10518 : XLogRegisterBuffer(1, rbuf, REGBUF_WILL_INIT);
1987 : : /* Log original right sibling, since we've changed its prev-pointer */
1972 pg@bowt.ie 1988 [ + + ]: 10518 : if (!isrightmost)
3943 heikki.linnakangas@i 1989 : 5038 : XLogRegisterBuffer(2, sbuf, REGBUF_STANDARD);
1972 pg@bowt.ie 1990 [ + + ]: 10518 : if (!isleaf)
3943 heikki.linnakangas@i 1991 : 157 : XLogRegisterBuffer(3, cbuf, REGBUF_STANDARD);
1992 : :
1993 : : /*
1994 : : * Log the new item, if it was inserted on the left page. (If it was
1995 : : * put on the right page, we don't need to explicitly WAL log it
1996 : : * because it's included with all the other items on the right page.)
1997 : : * Show the new item as belonging to the left page buffer, so that it
1998 : : * is not stored if XLogInsert decides it needs a full-page image of
1999 : : * the left page. We always store newitemoff in the record, though.
2000 : : *
2001 : : * The details are sometimes slightly different for page splits that
2002 : : * coincide with a posting list split. If both the replacement
2003 : : * posting list and newitem go on the right page, then we don't need
2004 : : * to log anything extra, just like the simple !newitemonleft
2005 : : * no-posting-split case (postingoff is set to zero in the WAL record,
2006 : : * so recovery doesn't need to process a posting list split at all).
2007 : : * Otherwise, we set postingoff and log orignewitem instead of
2008 : : * newitem, despite having actually inserted newitem. REDO routine
2009 : : * must reconstruct nposting and newitem using _bt_swap_posting().
2010 : : *
2011 : : * Note: It's possible that our page split point is the point that
2012 : : * makes the posting list lastleft and newitem firstright. This is
2013 : : * the only case where we log orignewitem/newitem despite newitem
2014 : : * going on the right page. If XLogInsert decides that it can omit
2015 : : * orignewitem due to logging a full-page image of the left page,
2016 : : * everything still works out, since recovery only needs to log
2017 : : * orignewitem for items on the left page (just like the regular
2018 : : * newitem-logged case).
2019 : : */
2019 pg@bowt.ie 2020 [ + + + + ]: 10518 : if (newitemonleft && xlrec.postingoff == 0)
207 peter@eisentraut.org 2021 : 1744 : XLogRegisterBufData(0, newitem, newitemsz);
2019 pg@bowt.ie 2022 [ + + ]: 8774 : else if (xlrec.postingoff != 0)
2023 : : {
1972 2024 [ - + ]: 13 : Assert(isleaf);
2025 [ + + - + ]: 13 : Assert(newitemonleft || firstrightoff == newitemoff);
2026 [ - + ]: 13 : Assert(newitemsz == IndexTupleSize(orignewitem));
207 peter@eisentraut.org 2027 : 13 : XLogRegisterBufData(0, orignewitem, newitemsz);
2028 : : }
2029 : :
2030 : : /* Log the left page's new high key */
1972 pg@bowt.ie 2031 [ + + ]: 10518 : if (!isleaf)
2032 : : {
2033 : : /* lefthighkey isn't local copy, get current pointer */
2034 : 157 : itemid = PageGetItemId(origpage, P_HIKEY);
2035 : 157 : lefthighkey = (IndexTuple) PageGetItem(origpage, itemid);
2036 : : }
207 peter@eisentraut.org 2037 : 10518 : XLogRegisterBufData(0, lefthighkey,
1972 pg@bowt.ie 2038 : 10518 : MAXALIGN(IndexTupleSize(lefthighkey)));
2039 : :
2040 : : /*
2041 : : * Log the contents of the right page in the format understood by
2042 : : * _bt_restore_page(). The whole right page will be recreated.
2043 : : *
2044 : : * Direct access to page is not good but faster - we should implement
2045 : : * some new func in page API. Note we only store the tuples
2046 : : * themselves, knowing that they were inserted in item-number order
2047 : : * and so the line pointers can be reconstructed. See comments for
2048 : : * _bt_restore_page().
2049 : : */
3943 heikki.linnakangas@i 2050 : 10518 : XLogRegisterBufData(1,
2999 tgl@sss.pgh.pa.us 2051 : 10518 : (char *) rightpage + ((PageHeader) rightpage)->pd_upper,
3943 heikki.linnakangas@i 2052 : 10518 : ((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper);
2053 : :
2362 pg@bowt.ie 2054 [ + + ]: 10518 : xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R;
3943 heikki.linnakangas@i 2055 : 10518 : recptr = XLogInsert(RM_BTREE_ID, xlinfo);
2056 : :
6785 alvherre@alvh.no-ip. 2057 : 10518 : PageSetLSN(origpage, recptr);
9103 vadim4o@yahoo.com 2058 : 10518 : PageSetLSN(rightpage, recptr);
1972 pg@bowt.ie 2059 [ + + ]: 10518 : if (!isrightmost)
9103 vadim4o@yahoo.com 2060 : 5038 : PageSetLSN(spage, recptr);
4176 heikki.linnakangas@i 2061 [ + + ]: 10518 : if (!isleaf)
3426 kgrittn@postgresql.o 2062 : 157 : PageSetLSN(BufferGetPage(cbuf), recptr);
2063 : : }
2064 : :
8992 tgl@sss.pgh.pa.us 2065 [ - + ]: 11436 : END_CRIT_SECTION();
2066 : :
2067 : : /* release the old right sibling */
1972 pg@bowt.ie 2068 [ + + ]: 11436 : if (!isrightmost)
7099 tgl@sss.pgh.pa.us 2069 : 5044 : _bt_relbuf(rel, sbuf);
2070 : :
2071 : : /* release the child */
4190 heikki.linnakangas@i 2072 [ + + ]: 11436 : if (!isleaf)
2073 : 157 : _bt_relbuf(rel, cbuf);
2074 : :
2075 : : /* be tidy */
1972 pg@bowt.ie 2076 [ + + ]: 11436 : if (isleaf)
2077 : 11279 : pfree(lefthighkey);
2078 : :
2079 : : /* split's done */
9867 bruce@momjian.us 2080 : 11436 : return rbuf;
2081 : : }
2082 : :
2083 : : /*
2084 : : * _bt_insert_parent() -- Insert downlink into parent, completing split.
2085 : : *
2086 : : * On entry, buf and rbuf are the left and right split pages, which we
2087 : : * still hold write locks on. Both locks will be released here. We
2088 : : * release the rbuf lock once we have a write lock on the page that we
2089 : : * intend to insert a downlink to rbuf on (i.e. buf's current parent page).
2090 : : * The lock on buf is released at the same point as the lock on the parent
2091 : : * page, since buf's INCOMPLETE_SPLIT flag must be cleared by the same
2092 : : * atomic operation that completes the split by inserting a new downlink.
2093 : : *
2094 : : * stack - stack showing how we got here. Will be NULL when splitting true
2095 : : * root, or during concurrent root split, where we can be inefficient
2096 : : * isroot - we split the true root
2097 : : * isonly - we split a page alone on its level (might have been fast root)
2098 : : */
2099 : : static void
8233 tgl@sss.pgh.pa.us 2100 : 11436 : _bt_insert_parent(Relation rel,
2101 : : Relation heaprel,
2102 : : Buffer buf,
2103 : : Buffer rbuf,
2104 : : BTStack stack,
2105 : : bool isroot,
2106 : : bool isonly)
2107 : : {
819 pg@bowt.ie 2108 [ - + ]: 11436 : Assert(heaprel != NULL);
2109 : :
2110 : : /*
2111 : : * Here we have to do something Lehman and Yao don't talk about: deal with
2112 : : * a root split and construction of a new root. If our stack is empty
2113 : : * then we have just split a node on what had been the root level when we
2114 : : * descended the tree. If it was still the root then we perform a
2115 : : * new-root construction. If it *wasn't* the root anymore, search to find
2116 : : * the next higher level that someone constructed meanwhile, and find the
2117 : : * right place to insert as for the normal case.
2118 : : *
2119 : : * If we have to search for the parent level, we do so by re-descending
2120 : : * from the root. This is not super-efficient, but it's rare enough not
2121 : : * to matter.
2122 : : */
1754 2123 [ + + ]: 11436 : if (isroot)
2124 : : {
2125 : : Buffer rootbuf;
2126 : :
7913 neilc@samurai.com 2127 [ - + ]: 676 : Assert(stack == NULL);
1754 pg@bowt.ie 2128 [ - + ]: 676 : Assert(isonly);
2129 : : /* create a new root node one level up and update the metapage */
819 2130 : 676 : rootbuf = _bt_newlevel(rel, heaprel, buf, rbuf);
2131 : : /* release the split buffers */
7099 tgl@sss.pgh.pa.us 2132 : 676 : _bt_relbuf(rel, rootbuf);
2133 : 676 : _bt_relbuf(rel, rbuf);
2134 : 676 : _bt_relbuf(rel, buf);
2135 : : }
2136 : : else
2137 : : {
8233 2138 : 10760 : BlockNumber bknum = BufferGetBlockNumber(buf);
2139 : 10760 : BlockNumber rbknum = BufferGetBlockNumber(rbuf);
3426 kgrittn@postgresql.o 2140 : 10760 : Page page = BufferGetPage(buf);
2141 : : IndexTuple new_item;
2142 : : BTStackData fakestack;
2143 : : IndexTuple ritem;
2144 : : Buffer pbuf;
2145 : :
7913 neilc@samurai.com 2146 [ + + ]: 10760 : if (stack == NULL)
2147 : : {
2148 : : BTPageOpaque opaque;
2149 : :
4013 heikki.linnakangas@i 2150 [ - + ]: 12 : elog(DEBUG2, "concurrent ROOT page split");
1254 michael@paquier.xyz 2151 : 12 : opaque = BTPageGetOpaque(page);
2152 : :
2153 : : /*
2154 : : * We should never reach here when a leaf page split takes place
2155 : : * despite the insert of newitem being able to apply the fastpath
2156 : : * optimization. Make sure of that with an assertion.
2157 : : *
2158 : : * This is more of a performance issue than a correctness issue.
2159 : : * The fastpath won't have a descent stack. Using a phony stack
2160 : : * here works, but never rely on that. The fastpath should be
2161 : : * rejected within _bt_search_insert() when the rightmost leaf
2162 : : * page will split, since it's faster to go through _bt_search()
2163 : : * and get a stack in the usual way.
2164 : : */
1754 pg@bowt.ie 2165 [ + - + - : 12 : Assert(!(P_ISLEAF(opaque) &&
- + ]
2166 : : BlockNumberIsValid(RelationGetTargetBlock(rel))));
2167 : :
2168 : : /* Find the leftmost page at the next level up */
729 tmunro@postgresql.or 2169 : 12 : pbuf = _bt_get_endpoint(rel, opaque->btpo_level + 1, false);
2170 : : /* Set up a phony stack entry pointing there */
8233 tgl@sss.pgh.pa.us 2171 : 12 : stack = &fakestack;
2172 : 12 : stack->bts_blkno = BufferGetBlockNumber(pbuf);
2173 : 12 : stack->bts_offset = InvalidOffsetNumber;
2174 : 12 : stack->bts_parent = NULL;
2175 : 12 : _bt_relbuf(rel, pbuf);
2176 : : }
2177 : :
2178 : : /* get high key from left, a strict lower bound for new right page */
7164 2179 : 10760 : ritem = (IndexTuple) PageGetItem(page,
2180 : 10760 : PageGetItemId(page, P_HIKEY));
2181 : :
2182 : : /* form an index tuple that points at the new right page */
2183 : 10760 : new_item = CopyIndexTuple(ritem);
2091 pg@bowt.ie 2184 : 10760 : BTreeTupleSetDownLink(new_item, rbknum);
2185 : :
2186 : : /*
2187 : : * Re-find and write lock the parent of buf.
2188 : : *
2189 : : * It's possible that the location of buf's downlink has changed since
2190 : : * our initial _bt_search() descent. _bt_getstackbuf() will detect
2191 : : * and recover from this, updating the stack, which ensures that the
2192 : : * new downlink will be inserted at the correct offset. Even buf's
2193 : : * parent may have changed.
2194 : : */
889 andres@anarazel.de 2195 : 10760 : pbuf = _bt_getstackbuf(rel, heaprel, stack, bknum);
2196 : :
2197 : : /*
2198 : : * Unlock the right child. The left child will be unlocked in
2199 : : * _bt_insertonpg().
2200 : : *
2201 : : * Unlocking the right child must be delayed until here to ensure that
2202 : : * no concurrent VACUUM operation can become confused. Page deletion
2203 : : * cannot be allowed to fail to re-find a downlink for the rbuf page.
2204 : : * (Actually, this is just a vestige of how things used to work. The
2205 : : * page deletion code is expected to check for the INCOMPLETE_SPLIT
2206 : : * flag on the left child. It won't attempt deletion of the right
2207 : : * child until the split is complete. Despite all this, we opt to
2208 : : * conservatively delay unlocking the right child until here.)
2209 : : */
7099 tgl@sss.pgh.pa.us 2210 : 10760 : _bt_relbuf(rel, rbuf);
2211 : :
8233 2212 [ - + ]: 10760 : if (pbuf == InvalidBuffer)
2228 peter@eisentraut.org 2213 [ # # ]:UBC 0 : ereport(ERROR,
2214 : : (errcode(ERRCODE_INDEX_CORRUPTED),
2215 : : errmsg_internal("failed to re-find parent key in index \"%s\" for split pages %u/%u",
2216 : : RelationGetRelationName(rel), bknum, rbknum)));
2217 : :
2218 : : /* Recursively insert into the parent */
889 andres@anarazel.de 2219 :CBC 21520 : _bt_insertonpg(rel, heaprel, NULL, pbuf, buf, stack->bts_parent,
2000 pg@bowt.ie 2220 : 10760 : new_item, MAXALIGN(IndexTupleSize(new_item)),
1754 2221 : 10760 : stack->bts_offset + 1, 0, isonly);
2222 : :
2223 : : /* be tidy */
8233 tgl@sss.pgh.pa.us 2224 : 10760 : pfree(new_item);
2225 : : }
2226 : 11436 : }
2227 : :
2228 : : /*
2229 : : * _bt_finish_split() -- Finish an incomplete split
2230 : : *
2231 : : * A crash or other failure can leave a split incomplete. The insertion
2232 : : * routines won't allow to insert on a page that is incompletely split.
2233 : : * Before inserting on such a page, call _bt_finish_split().
2234 : : *
2235 : : * On entry, 'lbuf' must be locked in write-mode. On exit, it is unlocked
2236 : : * and unpinned.
2237 : : *
2238 : : * Caller must provide a valid heaprel, since finishing a page split requires
2239 : : * allocating a new page if and when the parent page splits in turn.
2240 : : */
2241 : : void
889 andres@anarazel.de 2242 :UBC 0 : _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
2243 : : {
3426 kgrittn@postgresql.o 2244 : 0 : Page lpage = BufferGetPage(lbuf);
1254 michael@paquier.xyz 2245 : 0 : BTPageOpaque lpageop = BTPageGetOpaque(lpage);
2246 : : Buffer rbuf;
2247 : : Page rpage;
2248 : : BTPageOpaque rpageop;
2249 : : bool wasroot;
2250 : : bool wasonly;
2251 : :
4190 heikki.linnakangas@i 2252 [ # # ]: 0 : Assert(P_INCOMPLETE_SPLIT(lpageop));
819 pg@bowt.ie 2253 [ # # ]: 0 : Assert(heaprel != NULL);
2254 : :
2255 : : /* Lock right sibling, the one missing the downlink */
2256 : 0 : rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
3426 kgrittn@postgresql.o 2257 : 0 : rpage = BufferGetPage(rbuf);
1254 michael@paquier.xyz 2258 : 0 : rpageop = BTPageGetOpaque(rpage);
2259 : :
2260 : : /* Could this be a root split? */
4190 heikki.linnakangas@i 2261 [ # # ]: 0 : if (!stack)
2262 : : {
2263 : : Buffer metabuf;
2264 : : Page metapg;
2265 : : BTMetaPageData *metad;
2266 : :
2267 : : /* acquire lock on the metapage */
819 pg@bowt.ie 2268 : 0 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
3426 kgrittn@postgresql.o 2269 : 0 : metapg = BufferGetPage(metabuf);
4190 heikki.linnakangas@i 2270 : 0 : metad = BTPageGetMeta(metapg);
2271 : :
1754 pg@bowt.ie 2272 : 0 : wasroot = (metad->btm_root == BufferGetBlockNumber(lbuf));
2273 : :
4190 heikki.linnakangas@i 2274 : 0 : _bt_relbuf(rel, metabuf);
2275 : : }
2276 : : else
1754 pg@bowt.ie 2277 : 0 : wasroot = false;
2278 : :
2279 : : /* Was this the only page on the level before split? */
2280 [ # # # # ]: 0 : wasonly = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
2281 : :
4190 heikki.linnakangas@i 2282 [ # # ]: 0 : elog(DEBUG1, "finishing incomplete split of %u/%u",
2283 : : BufferGetBlockNumber(lbuf), BufferGetBlockNumber(rbuf));
2284 : :
889 andres@anarazel.de 2285 : 0 : _bt_insert_parent(rel, heaprel, lbuf, rbuf, stack, wasroot, wasonly);
4190 heikki.linnakangas@i 2286 : 0 : }
2287 : :
2288 : : /*
2289 : : * _bt_getstackbuf() -- Walk back up the tree one step, and find the pivot
2290 : : * tuple whose downlink points to child page.
2291 : : *
2292 : : * Caller passes child's block number, which is used to identify
2293 : : * associated pivot tuple in parent page using a linear search that
2294 : : * matches on pivot's downlink/block number. The expected location of
2295 : : * the pivot tuple is taken from the stack one level above the child
2296 : : * page. This is used as a starting point. Insertions into the
2297 : : * parent level could cause the pivot tuple to move right; deletions
2298 : : * could cause it to move left, but not left of the page we previously
2299 : : * found it on.
2300 : : *
2301 : : * Caller can use its stack to relocate the pivot tuple/downlink for
2302 : : * any same-level page to the right of the page found by its initial
2303 : : * descent. This is necessary because of the possibility that caller
2304 : : * moved right to recover from a concurrent page split. It's also
2305 : : * convenient for certain callers to be able to step right when there
2306 : : * wasn't a concurrent page split, while still using their original
2307 : : * stack. For example, the checkingunique _bt_doinsert() case may
2308 : : * have to step right when there are many physical duplicates, and its
2309 : : * scantid forces an insertion to the right of the "first page the
2310 : : * value could be on". (This is also relied on by all of our callers
2311 : : * when dealing with !heapkeyspace indexes.)
2312 : : *
2313 : : * Returns write-locked parent page buffer, or InvalidBuffer if pivot
2314 : : * tuple not found (should not happen). Adjusts bts_blkno &
2315 : : * bts_offset if changed. Page split caller should insert its new
2316 : : * pivot tuple for its new right sibling page on parent page, at the
2317 : : * offset number bts_offset + 1.
2318 : : */
2319 : : Buffer
889 andres@anarazel.de 2320 :CBC 13771 : _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child)
2321 : : {
2322 : : BlockNumber blkno;
2323 : : OffsetNumber start;
2324 : :
9178 tgl@sss.pgh.pa.us 2325 : 13771 : blkno = stack->bts_blkno;
2326 : 13771 : start = stack->bts_offset;
2327 : :
2328 : : for (;;)
2329 : 9 : {
2330 : : Buffer buf;
2331 : : Page page;
2332 : : BTPageOpaque opaque;
2333 : :
819 pg@bowt.ie 2334 : 13780 : buf = _bt_getbuf(rel, blkno, BT_WRITE);
3426 kgrittn@postgresql.o 2335 : 13780 : page = BufferGetPage(buf);
1254 michael@paquier.xyz 2336 : 13780 : opaque = BTPageGetOpaque(page);
2337 : :
819 pg@bowt.ie 2338 [ - + ]: 13780 : Assert(heaprel != NULL);
2385 2339 [ - + ]: 13780 : if (P_INCOMPLETE_SPLIT(opaque))
2340 : : {
889 andres@anarazel.de 2341 :UBC 0 : _bt_finish_split(rel, heaprel, buf, stack->bts_parent);
4190 heikki.linnakangas@i 2342 : 0 : continue;
2343 : : }
2344 : :
8232 tgl@sss.pgh.pa.us 2345 [ + + ]:CBC 13780 : if (!P_IGNORE(opaque))
2346 : : {
2347 : : OffsetNumber offnum,
2348 : : minoff,
2349 : : maxoff;
2350 : : ItemId itemid;
2351 : : IndexTuple item;
2352 : :
2353 [ + + ]: 13771 : minoff = P_FIRSTDATAKEY(opaque);
2354 : 13771 : maxoff = PageGetMaxOffsetNumber(page);
2355 : :
2356 : : /*
2357 : : * start = InvalidOffsetNumber means "search the whole page". We
2358 : : * need this test anyway due to possibility that page has a high
2359 : : * key now when it didn't before.
2360 : : */
2361 [ + + ]: 13771 : if (start < minoff)
2362 : 21 : start = minoff;
2363 : :
2364 : : /*
2365 : : * Need this check too, to guard against possibility that page
2366 : : * split since we visited it originally.
2367 : : */
7690 2368 [ - + ]: 13771 : if (start > maxoff)
7690 tgl@sss.pgh.pa.us 2369 :UBC 0 : start = OffsetNumberNext(maxoff);
2370 : :
2371 : : /*
2372 : : * These loops will check every item on the page --- but in an
2373 : : * order that's attuned to the probability of where it actually
2374 : : * is. Scan to the right first, then to the left.
2375 : : */
8232 tgl@sss.pgh.pa.us 2376 :CBC 13771 : for (offnum = start;
2377 [ + - ]: 13811 : offnum <= maxoff;
2378 : 40 : offnum = OffsetNumberNext(offnum))
2379 : : {
2380 : 13811 : itemid = PageGetItemId(page, offnum);
7164 2381 : 13811 : item = (IndexTuple) PageGetItem(page, itemid);
2382 : :
2091 pg@bowt.ie 2383 [ + + ]: 13811 : if (BTreeTupleGetDownLink(item) == child)
2384 : : {
2385 : : /* Return accurate pointer to where link is now */
8232 tgl@sss.pgh.pa.us 2386 : 13771 : stack->bts_blkno = blkno;
2387 : 13771 : stack->bts_offset = offnum;
2388 : 13771 : return buf;
2389 : : }
2390 : : }
2391 : :
8232 tgl@sss.pgh.pa.us 2392 :UBC 0 : for (offnum = OffsetNumberPrev(start);
2393 [ # # ]: 0 : offnum >= minoff;
2394 : 0 : offnum = OffsetNumberPrev(offnum))
2395 : : {
2396 : 0 : itemid = PageGetItemId(page, offnum);
7164 2397 : 0 : item = (IndexTuple) PageGetItem(page, itemid);
2398 : :
2091 pg@bowt.ie 2399 [ # # ]: 0 : if (BTreeTupleGetDownLink(item) == child)
2400 : : {
2401 : : /* Return accurate pointer to where link is now */
8232 tgl@sss.pgh.pa.us 2402 : 0 : stack->bts_blkno = blkno;
2403 : 0 : stack->bts_offset = offnum;
2404 : 0 : return buf;
2405 : : }
2406 : : }
2407 : : }
2408 : :
2409 : : /*
2410 : : * The item we're looking for moved right at least one page.
2411 : : *
2412 : : * Lehman and Yao couple/chain locks when moving right here, which we
2413 : : * can avoid. See nbtree/README.
2414 : : */
9178 tgl@sss.pgh.pa.us 2415 [ - + ]:CBC 9 : if (P_RIGHTMOST(opaque))
2416 : : {
8819 tgl@sss.pgh.pa.us 2417 :UBC 0 : _bt_relbuf(rel, buf);
7808 2418 : 0 : return InvalidBuffer;
2419 : : }
9178 tgl@sss.pgh.pa.us 2420 :CBC 9 : blkno = opaque->btpo_next;
8233 2421 : 9 : start = InvalidOffsetNumber;
8819 2422 : 9 : _bt_relbuf(rel, buf);
2423 : : }
2424 : : }
2425 : :
2426 : : /*
2427 : : * _bt_newlevel() -- Create a new level above root page.
2428 : : *
2429 : : * We've just split the old root page and need to create a new one.
2430 : : * In order to do this, we add a new root page to the file, then lock
2431 : : * the metadata page and update it. This is guaranteed to be deadlock-
2432 : : * free, because all readers release their locks on the metadata page
2433 : : * before trying to lock the root, and all writers lock the root before
2434 : : * trying to lock the metadata page. We have a write lock on the old
2435 : : * root page, so we have not introduced any cycles into the waits-for
2436 : : * graph.
2437 : : *
2438 : : * On entry, lbuf (the old root) and rbuf (its new peer) are write-
2439 : : * locked. On exit, a new root page exists with entries for the
2440 : : * two new children, metapage is updated and unlocked/unpinned.
2441 : : * The new root buffer is returned to caller which has to unlock/unpin
2442 : : * lbuf, rbuf & rootbuf.
2443 : : */
2444 : : static Buffer
819 pg@bowt.ie 2445 : 676 : _bt_newlevel(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf)
2446 : : {
2447 : : Buffer rootbuf;
2448 : : Page lpage,
2449 : : rootpage;
2450 : : BlockNumber lbkno,
2451 : : rbkno;
2452 : : BlockNumber rootblknum;
2453 : : BTPageOpaque rootopaque;
2454 : : BTPageOpaque lopaque;
2455 : : ItemId itemid;
2456 : : IndexTuple item;
2457 : : IndexTuple left_item;
2458 : : Size left_item_sz;
2459 : : IndexTuple right_item;
2460 : : Size right_item_sz;
2461 : : Buffer metabuf;
2462 : : Page metapg;
2463 : : BTMetaPageData *metad;
2464 : :
8233 tgl@sss.pgh.pa.us 2465 : 676 : lbkno = BufferGetBlockNumber(lbuf);
2466 : 676 : rbkno = BufferGetBlockNumber(rbuf);
3426 kgrittn@postgresql.o 2467 : 676 : lpage = BufferGetPage(lbuf);
1254 michael@paquier.xyz 2468 : 676 : lopaque = BTPageGetOpaque(lpage);
2469 : :
2470 : : /* get a new root page */
819 pg@bowt.ie 2471 : 676 : rootbuf = _bt_allocbuf(rel, heaprel);
3426 kgrittn@postgresql.o 2472 : 676 : rootpage = BufferGetPage(rootbuf);
9103 vadim4o@yahoo.com 2473 : 676 : rootblknum = BufferGetBlockNumber(rootbuf);
2474 : :
2475 : : /* acquire lock on the metapage */
819 pg@bowt.ie 2476 : 676 : metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
3426 kgrittn@postgresql.o 2477 : 676 : metapg = BufferGetPage(metabuf);
9018 vadim4o@yahoo.com 2478 : 676 : metad = BTPageGetMeta(metapg);
2479 : :
2480 : : /*
2481 : : * Create downlink item for left page (old root). The key value used is
2482 : : * "minus infinity", a sentinel value that's reliably less than any real
2483 : : * key value that could appear in the left page.
2484 : : */
4173 heikki.linnakangas@i 2485 : 676 : left_item_sz = sizeof(IndexTupleData);
2486 : 676 : left_item = (IndexTuple) palloc(left_item_sz);
2487 : 676 : left_item->t_info = left_item_sz;
2091 pg@bowt.ie 2488 : 676 : BTreeTupleSetDownLink(left_item, lbkno);
1978 2489 : 676 : BTreeTupleSetNAtts(left_item, 0, false);
2490 : :
2491 : : /*
2492 : : * Create downlink item for right page. The key for it is obtained from
2493 : : * the "high key" position in the left page.
2494 : : */
4173 heikki.linnakangas@i 2495 : 676 : itemid = PageGetItemId(lpage, P_HIKEY);
2496 : 676 : right_item_sz = ItemIdGetLength(itemid);
2497 : 676 : item = (IndexTuple) PageGetItem(lpage, itemid);
2498 : 676 : right_item = CopyIndexTuple(item);
2091 pg@bowt.ie 2499 : 676 : BTreeTupleSetDownLink(right_item, rbkno);
2500 : :
2501 : : /* NO EREPORT(ERROR) from here till newroot op is logged */
9003 tgl@sss.pgh.pa.us 2502 : 676 : START_CRIT_SECTION();
2503 : :
2504 : : /* upgrade metapage if needed */
2362 pg@bowt.ie 2505 [ - + ]: 676 : if (metad->btm_version < BTREE_NOVAC_VERSION)
2656 teodor@sigaev.ru 2506 :UBC 0 : _bt_upgrademetapage(metapg);
2507 : :
2508 : : /* set btree special data */
1254 michael@paquier.xyz 2509 :CBC 676 : rootopaque = BTPageGetOpaque(rootpage);
10226 bruce@momjian.us 2510 : 676 : rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
8233 tgl@sss.pgh.pa.us 2511 : 676 : rootopaque->btpo_flags = BTP_ROOT;
1655 pg@bowt.ie 2512 : 676 : rootopaque->btpo_level =
1254 michael@paquier.xyz 2513 : 676 : (BTPageGetOpaque(lpage))->btpo_level + 1;
7061 tgl@sss.pgh.pa.us 2514 : 676 : rootopaque->btpo_cycleid = 0;
2515 : :
2516 : : /* update metapage data */
8233 2517 : 676 : metad->btm_root = rootblknum;
1655 pg@bowt.ie 2518 : 676 : metad->btm_level = rootopaque->btpo_level;
8233 tgl@sss.pgh.pa.us 2519 : 676 : metad->btm_fastroot = rootblknum;
1655 pg@bowt.ie 2520 : 676 : metad->btm_fastlevel = rootopaque->btpo_level;
2521 : :
2522 : : /*
2523 : : * Insert the left page pointer into the new root page. The root page is
2524 : : * the rightmost page on its level so there is no "high key" in it; the
2525 : : * two items will go into positions P_HIKEY and P_FIRSTKEY.
2526 : : *
2527 : : * Note: we *must* insert the two items in item-number order, for the
2528 : : * benefit of _bt_restore_page().
2529 : : */
2697 teodor@sigaev.ru 2530 [ + - - + ]: 676 : Assert(BTreeTupleGetNAtts(left_item, rel) == 0);
4173 heikki.linnakangas@i 2531 [ - + ]: 676 : if (PageAddItem(rootpage, (Item) left_item, left_item_sz, P_HIKEY,
2532 : : false, false) == InvalidOffsetNumber)
6459 tgl@sss.pgh.pa.us 2533 [ # # ]:UBC 0 : elog(PANIC, "failed to add leftkey to new root page"
2534 : : " while splitting block %u of index \"%s\"",
2535 : : BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
2536 : :
2537 : : /*
2538 : : * insert the right page pointer into the new root page.
2539 : : */
2362 pg@bowt.ie 2540 [ + - - + ]:CBC 676 : Assert(BTreeTupleGetNAtts(right_item, rel) > 0);
2541 [ + - - + ]: 676 : Assert(BTreeTupleGetNAtts(right_item, rel) <=
2542 : : IndexRelationGetNumberOfKeyAttributes(rel));
4173 heikki.linnakangas@i 2543 [ - + ]: 676 : if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY,
2544 : : false, false) == InvalidOffsetNumber)
6459 tgl@sss.pgh.pa.us 2545 [ # # ]:UBC 0 : elog(PANIC, "failed to add rightkey to new root page"
2546 : : " while splitting block %u of index \"%s\"",
2547 : : BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
2548 : :
2549 : : /* Clear the incomplete-split flag in the left child */
4190 heikki.linnakangas@i 2550 [ - + ]:CBC 676 : Assert(P_INCOMPLETE_SPLIT(lopaque));
2551 : 676 : lopaque->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
2552 : 676 : MarkBufferDirty(lbuf);
2553 : :
7099 tgl@sss.pgh.pa.us 2554 : 676 : MarkBufferDirty(rootbuf);
2555 : 676 : MarkBufferDirty(metabuf);
2556 : :
2557 : : /* XLOG stuff */
5381 rhaas@postgresql.org 2558 [ + + + + : 676 : if (RelationNeedsWAL(rel))
+ + + - ]
2559 : : {
2560 : : xl_btree_newroot xlrec;
2561 : : XLogRecPtr recptr;
2562 : : xl_btree_metadata md;
2563 : :
8233 tgl@sss.pgh.pa.us 2564 : 657 : xlrec.rootblk = rootblknum;
9018 vadim4o@yahoo.com 2565 : 657 : xlrec.level = metad->btm_level;
2566 : :
3943 heikki.linnakangas@i 2567 : 657 : XLogBeginInsert();
207 peter@eisentraut.org 2568 : 657 : XLogRegisterData(&xlrec, SizeOfBtreeNewroot);
2569 : :
3943 heikki.linnakangas@i 2570 : 657 : XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
2571 : 657 : XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
2864 tgl@sss.pgh.pa.us 2572 : 657 : XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
2573 : :
2362 pg@bowt.ie 2574 [ - + ]: 657 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
2575 : 657 : md.version = metad->btm_version;
3943 heikki.linnakangas@i 2576 : 657 : md.root = rootblknum;
2577 : 657 : md.level = metad->btm_level;
2578 : 657 : md.fastroot = rootblknum;
2579 : 657 : md.fastlevel = metad->btm_level;
1655 pg@bowt.ie 2580 : 657 : md.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
2019 2581 : 657 : md.allequalimage = metad->btm_allequalimage;
2582 : :
207 peter@eisentraut.org 2583 : 657 : XLogRegisterBufData(2, &md, sizeof(xl_btree_metadata));
2584 : :
2585 : : /*
2586 : : * Direct access to page is not good but faster - we should implement
2587 : : * some new func in page API.
2588 : : */
3943 heikki.linnakangas@i 2589 : 657 : XLogRegisterBufData(0,
2999 tgl@sss.pgh.pa.us 2590 : 657 : (char *) rootpage + ((PageHeader) rootpage)->pd_upper,
3943 heikki.linnakangas@i 2591 : 657 : ((PageHeader) rootpage)->pd_special -
2592 : 657 : ((PageHeader) rootpage)->pd_upper);
2593 : :
2594 : 657 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
2595 : :
4155 2596 : 657 : PageSetLSN(lpage, recptr);
9103 vadim4o@yahoo.com 2597 : 657 : PageSetLSN(rootpage, recptr);
9094 2598 : 657 : PageSetLSN(metapg, recptr);
2599 : : }
2600 : :
9003 tgl@sss.pgh.pa.us 2601 [ - + ]: 676 : END_CRIT_SECTION();
2602 : :
2603 : : /* done with metapage */
7099 2604 : 676 : _bt_relbuf(rel, metabuf);
2605 : :
4173 heikki.linnakangas@i 2606 : 676 : pfree(left_item);
2607 : 676 : pfree(right_item);
2608 : :
7178 neilc@samurai.com 2609 : 676 : return rootbuf;
2610 : : }
2611 : :
2612 : : /*
2613 : : * _bt_pgaddtup() -- add a data item to a particular page during split.
2614 : : *
2615 : : * The difference between this routine and a bare PageAddItem call is
2616 : : * that this code can deal with the first data item on an internal btree
2617 : : * page in passing. This data item (which is called "firstright" within
2618 : : * _bt_split()) has a key that must be treated as minus infinity after
2619 : : * the split. Therefore, we truncate away all attributes when caller
2620 : : * specifies it's the first data item on page (downlink is not changed,
2621 : : * though). This extra step is only needed for the right page of an
2622 : : * internal page split. There is no need to do this for the first data
2623 : : * item on the existing/left page, since that will already have been
2624 : : * truncated during an earlier page split.
2625 : : *
2626 : : * See _bt_split() for a high level explanation of why we truncate here.
2627 : : * Note that this routine has nothing to do with suffix truncation,
2628 : : * despite using some of the same infrastructure.
2629 : : */
2630 : : static inline bool
5487 tgl@sss.pgh.pa.us 2631 : 3471510 : _bt_pgaddtup(Page page,
2632 : : Size itemsize,
2633 : : IndexTuple itup,
2634 : : OffsetNumber itup_off,
2635 : : bool newfirstdataitem)
2636 : : {
2637 : : IndexTupleData trunctuple;
2638 : :
1972 pg@bowt.ie 2639 [ + + ]: 3471510 : if (newfirstdataitem)
2640 : : {
7164 tgl@sss.pgh.pa.us 2641 : 157 : trunctuple = *itup;
2642 : 157 : trunctuple.t_info = sizeof(IndexTupleData);
1978 pg@bowt.ie 2643 : 157 : BTreeTupleSetNAtts(&trunctuple, 0, false);
7164 tgl@sss.pgh.pa.us 2644 : 157 : itup = &trunctuple;
2645 : 157 : itemsize = sizeof(IndexTupleData);
2646 : : }
2647 : :
1972 pg@bowt.ie 2648 [ - + ]: 3471510 : if (unlikely(PageAddItem(page, (Item) itup, itemsize, itup_off, false,
2649 : : false) == InvalidOffsetNumber))
5487 tgl@sss.pgh.pa.us 2650 :UBC 0 : return false;
2651 : :
5487 tgl@sss.pgh.pa.us 2652 :CBC 3471510 : return true;
2653 : : }
2654 : :
2655 : : /*
2656 : : * _bt_delete_or_dedup_one_page - Try to avoid a leaf page split.
2657 : : *
2658 : : * There are three operations performed here: simple index deletion, bottom-up
2659 : : * index deletion, and deduplication. If all three operations fail to free
2660 : : * enough space for the incoming item then caller will go on to split the
2661 : : * page. We always consider simple deletion first. If that doesn't work out
2662 : : * we consider alternatives. Callers that only want us to consider simple
2663 : : * deletion (without any fallback) ask for that using the 'simpleonly'
2664 : : * argument.
2665 : : *
2666 : : * We usually pick only one alternative "complex" operation when simple
2667 : : * deletion alone won't prevent a page split. The 'checkingunique',
2668 : : * 'uniquedup', and 'indexUnchanged' arguments are used for that.
2669 : : *
2670 : : * Note: We used to only delete LP_DEAD items when the BTP_HAS_GARBAGE page
2671 : : * level flag was found set. The flag was useful back when there wasn't
2672 : : * necessarily one single page for a duplicate tuple to go on (before heap TID
2673 : : * became a part of the key space in version 4 indexes). But we don't
2674 : : * actually look at the flag anymore (it's not a gating condition for our
2675 : : * caller). That would cause us to miss tuples that are safe to delete,
2676 : : * without getting any benefit in return. We know that the alternative is to
2677 : : * split the page; scanning the line pointer array in passing won't have
2678 : : * noticeable overhead. (We still maintain the BTP_HAS_GARBAGE flag despite
2679 : : * all this because !heapkeyspace indexes must still do a "getting tired"
2680 : : * linear search, and so are likely to get some benefit from using it as a
2681 : : * gating condition.)
2682 : : */
2683 : : static void
1754 pg@bowt.ie 2684 : 24968 : _bt_delete_or_dedup_one_page(Relation rel, Relation heapRel,
2685 : : BTInsertState insertstate,
2686 : : bool simpleonly, bool checkingunique,
2687 : : bool uniquedup, bool indexUnchanged)
2688 : : {
2689 : : OffsetNumber deletable[MaxIndexTuplesPerPage];
6912 bruce@momjian.us 2690 : 24968 : int ndeletable = 0;
2691 : : OffsetNumber offnum,
2692 : : minoff,
2693 : : maxoff;
1754 pg@bowt.ie 2694 : 24968 : Buffer buffer = insertstate->buf;
2695 : 24968 : BTScanInsert itup_key = insertstate->itup_key;
3426 kgrittn@postgresql.o 2696 : 24968 : Page page = BufferGetPage(buffer);
1254 michael@paquier.xyz 2697 : 24968 : BTPageOpaque opaque = BTPageGetOpaque(page);
2698 : :
2362 pg@bowt.ie 2699 [ - + ]: 24968 : Assert(P_ISLEAF(opaque));
1697 2700 [ + + - + ]: 24968 : Assert(simpleonly || itup_key->heapkeyspace);
2701 [ + + + - : 24968 : Assert(!simpleonly || (!checkingunique && !uniquedup && !indexUnchanged));
+ - - + ]
2702 : :
2703 : : /*
2704 : : * Scan over all items to see which ones need to be deleted according to
2705 : : * LP_DEAD flags. We'll usually manage to delete a few extra items that
2706 : : * are not marked LP_DEAD in passing. Often the extra items that actually
2707 : : * end up getting deleted are items that would have had their LP_DEAD bit
2708 : : * set before long anyway (if we opted not to include them as extras).
2709 : : */
2710 [ + + ]: 24968 : minoff = P_FIRSTDATAKEY(opaque);
6983 tgl@sss.pgh.pa.us 2711 : 24968 : maxoff = PageGetMaxOffsetNumber(page);
1697 pg@bowt.ie 2712 : 24968 : for (offnum = minoff;
6983 tgl@sss.pgh.pa.us 2713 [ + + ]: 6750010 : offnum <= maxoff;
2714 : 6725042 : offnum = OffsetNumberNext(offnum))
2715 : : {
6912 bruce@momjian.us 2716 : 6725042 : ItemId itemId = PageGetItemId(page, offnum);
2717 : :
6569 tgl@sss.pgh.pa.us 2718 [ + + ]: 6725042 : if (ItemIdIsDead(itemId))
6983 2719 : 127856 : deletable[ndeletable++] = offnum;
2720 : : }
2721 : :
2722 [ + + ]: 24968 : if (ndeletable > 0)
2723 : : {
1697 pg@bowt.ie 2724 : 3548 : _bt_simpledel_pass(rel, buffer, heapRel, deletable, ndeletable,
2725 : : insertstate->itup, minoff, maxoff);
1754 2726 : 3548 : insertstate->bounds_valid = false;
2727 : :
2728 : : /* Return when a page split has already been avoided */
2729 [ + + ]: 3548 : if (PageGetFreeSpace(page) >= insertstate->itemsz)
2730 : 11676 : return;
2731 : :
2732 : : /* Might as well assume duplicates (if checkingunique) */
2733 : 52 : uniquedup = true;
2734 : : }
2735 : :
2736 : : /*
2737 : : * We're done with simple deletion. Return early with callers that only
2738 : : * call here so that simple deletion can be considered. This includes
2739 : : * callers that explicitly ask for this and checkingunique callers that
2740 : : * probably don't have any version churn duplicates on the page.
2741 : : *
2742 : : * Note: The page's BTP_HAS_GARBAGE hint flag may still be set when we
2743 : : * return at this point (or when we go on the try either or both of our
2744 : : * other strategies and they also fail). We do not bother expending a
2745 : : * separate write to clear it, however. Caller will definitely clear it
2746 : : * when it goes on to split the page (note also that the deduplication
2747 : : * process will clear the flag in passing, just to keep things tidy).
2748 : : */
1697 2749 [ + - + + : 21472 : if (simpleonly || (checkingunique && !uniquedup))
+ + ]
2750 : : {
2751 [ - + ]: 7981 : Assert(!indexUnchanged);
1754 2752 : 7981 : return;
2753 : : }
2754 : :
2755 : : /* Assume bounds about to be invalidated (this is almost certain now) */
2756 : 13491 : insertstate->bounds_valid = false;
2757 : :
2758 : : /*
2759 : : * Perform bottom-up index deletion pass when executor hint indicated that
2760 : : * incoming item is logically unchanged, or for a unique index that is
2761 : : * known to have physical duplicates for some other reason. (There is a
2762 : : * large overlap between these two cases for a unique index. It's worth
2763 : : * having both triggering conditions in order to apply the optimization in
2764 : : * the event of successive related INSERT and DELETE statements.)
2765 : : *
2766 : : * We'll go on to do a deduplication pass when a bottom-up pass fails to
2767 : : * delete an acceptable amount of free space (a significant fraction of
2768 : : * the page, or space for the new item, whichever is greater).
2769 : : *
2770 : : * Note: Bottom-up index deletion uses the same equality/equivalence
2771 : : * routines as deduplication internally. However, it does not merge
2772 : : * together index tuples, so the same correctness considerations do not
2773 : : * apply. We deliberately omit an index-is-allequalimage test here.
2774 : : */
1697 2775 [ + + + + : 15421 : if ((indexUnchanged || uniquedup) &&
+ + ]
2776 : 1930 : _bt_bottomupdel_pass(rel, buffer, heapRel, insertstate->itemsz))
2777 : 199 : return;
2778 : :
2779 : : /* Perform deduplication pass (when enabled and index-is-allequalimage) */
1754 2780 [ + - - + : 13292 : if (BTGetDeduplicateItems(rel) && itup_key->allequalimage)
+ + + + +
+ + - ]
872 2781 : 13283 : _bt_dedup_pass(rel, buffer, insertstate->itup, insertstate->itemsz,
2782 [ + + + + ]: 13283 : (indexUnchanged || uniquedup));
2783 : : }
2784 : :
2785 : : /*
2786 : : * _bt_simpledel_pass - Simple index tuple deletion pass.
2787 : : *
2788 : : * We delete all LP_DEAD-set index tuples on a leaf page. The offset numbers
2789 : : * of all such tuples are determined by caller (caller passes these to us as
2790 : : * its 'deletable' argument).
2791 : : *
2792 : : * We might also delete extra index tuples that turn out to be safe to delete
2793 : : * in passing (though they must be cheap to check in passing to begin with).
2794 : : * There is no certainty that any extra tuples will be deleted, though. The
2795 : : * high level goal of the approach we take is to get the most out of each call
2796 : : * here (without noticeably increasing the per-call overhead compared to what
2797 : : * we need to do just to be able to delete the page's LP_DEAD-marked index
2798 : : * tuples).
2799 : : *
2800 : : * The number of extra index tuples that turn out to be deletable might
2801 : : * greatly exceed the number of LP_DEAD-marked index tuples due to various
2802 : : * locality related effects. For example, it's possible that the total number
2803 : : * of table blocks (pointed to by all TIDs on the leaf page) is naturally
2804 : : * quite low, in which case we might end up checking if it's possible to
2805 : : * delete _most_ index tuples on the page (without the tableam needing to
2806 : : * access additional table blocks). The tableam will sometimes stumble upon
2807 : : * _many_ extra deletable index tuples in indexes where this pattern is
2808 : : * common.
2809 : : *
2810 : : * See nbtree/README for further details on simple index tuple deletion.
2811 : : */
2812 : : static void
1697 2813 : 3548 : _bt_simpledel_pass(Relation rel, Buffer buffer, Relation heapRel,
2814 : : OffsetNumber *deletable, int ndeletable, IndexTuple newitem,
2815 : : OffsetNumber minoff, OffsetNumber maxoff)
2816 : : {
2817 : 3548 : Page page = BufferGetPage(buffer);
2818 : : BlockNumber *deadblocks;
2819 : : int ndeadblocks;
2820 : : TM_IndexDeleteOp delstate;
2821 : : OffsetNumber offnum;
2822 : :
2823 : : /* Get array of table blocks pointed to by LP_DEAD-set tuples */
2824 : 3548 : deadblocks = _bt_deadblocks(page, deletable, ndeletable, newitem,
2825 : : &ndeadblocks);
2826 : :
2827 : : /* Initialize tableam state that describes index deletion operation */
1402 2828 : 3548 : delstate.irel = rel;
2829 : 3548 : delstate.iblknum = BufferGetBlockNumber(buffer);
1697 2830 : 3548 : delstate.bottomup = false;
2831 : 3548 : delstate.bottomupfreespace = 0;
2832 : 3548 : delstate.ndeltids = 0;
2833 : 3548 : delstate.deltids = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexDelete));
2834 : 3548 : delstate.status = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexStatus));
2835 : :
2836 : 3548 : for (offnum = minoff;
2837 [ + + ]: 1031722 : offnum <= maxoff;
2838 : 1028174 : offnum = OffsetNumberNext(offnum))
2839 : : {
2840 : 1028174 : ItemId itemid = PageGetItemId(page, offnum);
2841 : 1028174 : IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
2842 : 1028174 : TM_IndexDelete *odeltid = &delstate.deltids[delstate.ndeltids];
2843 : 1028174 : TM_IndexStatus *ostatus = &delstate.status[delstate.ndeltids];
2844 : : BlockNumber tidblock;
2845 : : void *match;
2846 : :
2847 [ + + ]: 1028174 : if (!BTreeTupleIsPosting(itup))
2848 : : {
2849 : 982160 : tidblock = ItemPointerGetBlockNumber(&itup->t_tid);
2850 : 982160 : match = bsearch(&tidblock, deadblocks, ndeadblocks,
2851 : : sizeof(BlockNumber), _bt_blk_cmp);
2852 : :
2853 [ + + ]: 982160 : if (!match)
2854 : : {
2855 [ - + ]: 604913 : Assert(!ItemIdIsDead(itemid));
2856 : 604913 : continue;
2857 : : }
2858 : :
2859 : : /*
2860 : : * TID's table block is among those pointed to by the TIDs from
2861 : : * LP_DEAD-bit set tuples on page -- add TID to deltids
2862 : : */
2863 : 377247 : odeltid->tid = itup->t_tid;
2864 : 377247 : odeltid->id = delstate.ndeltids;
2865 : 377247 : ostatus->idxoffnum = offnum;
2866 : 377247 : ostatus->knowndeletable = ItemIdIsDead(itemid);
2867 : 377247 : ostatus->promising = false; /* unused */
2868 : 377247 : ostatus->freespace = 0; /* unused */
2869 : :
2870 : 377247 : delstate.ndeltids++;
2871 : : }
2872 : : else
2873 : : {
2874 : 46014 : int nitem = BTreeTupleGetNPosting(itup);
2875 : :
2876 [ + + ]: 227849 : for (int p = 0; p < nitem; p++)
2877 : : {
2878 : 181835 : ItemPointer tid = BTreeTupleGetPostingN(itup, p);
2879 : :
2880 : 181835 : tidblock = ItemPointerGetBlockNumber(tid);
2881 : 181835 : match = bsearch(&tidblock, deadblocks, ndeadblocks,
2882 : : sizeof(BlockNumber), _bt_blk_cmp);
2883 : :
2884 [ + + ]: 181835 : if (!match)
2885 : : {
2886 [ - + ]: 158807 : Assert(!ItemIdIsDead(itemid));
2887 : 158807 : continue;
2888 : : }
2889 : :
2890 : : /*
2891 : : * TID's table block is among those pointed to by the TIDs
2892 : : * from LP_DEAD-bit set tuples on page -- add TID to deltids
2893 : : */
2894 : 23028 : odeltid->tid = *tid;
2895 : 23028 : odeltid->id = delstate.ndeltids;
2896 : 23028 : ostatus->idxoffnum = offnum;
2897 : 23028 : ostatus->knowndeletable = ItemIdIsDead(itemid);
2898 : 23028 : ostatus->promising = false; /* unused */
2899 : 23028 : ostatus->freespace = 0; /* unused */
2900 : :
2901 : 23028 : odeltid++;
2902 : 23028 : ostatus++;
2903 : 23028 : delstate.ndeltids++;
2904 : : }
2905 : : }
2906 : : }
2907 : :
2908 : 3548 : pfree(deadblocks);
2909 : :
2910 [ - + ]: 3548 : Assert(delstate.ndeltids >= ndeletable);
2911 : :
2912 : : /* Physically delete LP_DEAD tuples (plus any delete-safe extra TIDs) */
2913 : 3548 : _bt_delitems_delete_check(rel, buffer, heapRel, &delstate);
2914 : :
2915 : 3548 : pfree(delstate.deltids);
2916 : 3548 : pfree(delstate.status);
2917 : 3548 : }
2918 : :
2919 : : /*
2920 : : * _bt_deadblocks() -- Get LP_DEAD related table blocks.
2921 : : *
2922 : : * Builds sorted and unique-ified array of table block numbers from index
2923 : : * tuple TIDs whose line pointers are marked LP_DEAD. Also adds the table
2924 : : * block from incoming newitem just in case it isn't among the LP_DEAD-related
2925 : : * table blocks.
2926 : : *
2927 : : * Always counting the newitem's table block as an LP_DEAD related block makes
2928 : : * sense because the cost is consistently low; it is practically certain that
2929 : : * the table block will not incur a buffer miss in tableam. On the other hand
2930 : : * the benefit is often quite high. There is a decent chance that there will
2931 : : * be some deletable items from this block, since in general most garbage
2932 : : * tuples became garbage in the recent past (in many cases this won't be the
2933 : : * first logical row that core code added to/modified in table block
2934 : : * recently).
2935 : : *
2936 : : * Returns final array, and sets *nblocks to its final size for caller.
2937 : : */
2938 : : static BlockNumber *
2939 : 3548 : _bt_deadblocks(Page page, OffsetNumber *deletable, int ndeletable,
2940 : : IndexTuple newitem, int *nblocks)
2941 : : {
2942 : : int spacentids,
2943 : : ntids;
2944 : : BlockNumber *tidblocks;
2945 : :
2946 : : /*
2947 : : * Accumulate each TID's block in array whose initial size has space for
2948 : : * one table block per LP_DEAD-set tuple (plus space for the newitem table
2949 : : * block). Array will only need to grow when there are LP_DEAD-marked
2950 : : * posting list tuples (which is not that common).
2951 : : */
2952 : 3548 : spacentids = ndeletable + 1;
2953 : 3548 : ntids = 0;
2954 : 3548 : tidblocks = (BlockNumber *) palloc(sizeof(BlockNumber) * spacentids);
2955 : :
2956 : : /*
2957 : : * First add the table block for the incoming newitem. This is the one
2958 : : * case where simple deletion can visit a table block that doesn't have
2959 : : * any known deletable items.
2960 : : */
2961 [ + - - + ]: 3548 : Assert(!BTreeTupleIsPosting(newitem) && !BTreeTupleIsPivot(newitem));
2962 : 3548 : tidblocks[ntids++] = ItemPointerGetBlockNumber(&newitem->t_tid);
2963 : :
2964 [ + + ]: 131404 : for (int i = 0; i < ndeletable; i++)
2965 : : {
2966 : 127856 : ItemId itemid = PageGetItemId(page, deletable[i]);
2967 : 127856 : IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
2968 : :
2969 [ - + ]: 127856 : Assert(ItemIdIsDead(itemid));
2970 : :
2971 [ + + ]: 127856 : if (!BTreeTupleIsPosting(itup))
2972 : : {
2973 [ + + ]: 123590 : if (ntids + 1 > spacentids)
2974 : : {
2975 : 109 : spacentids *= 2;
2976 : : tidblocks = (BlockNumber *)
2977 : 109 : repalloc(tidblocks, sizeof(BlockNumber) * spacentids);
2978 : : }
2979 : :
2980 : 123590 : tidblocks[ntids++] = ItemPointerGetBlockNumber(&itup->t_tid);
2981 : : }
2982 : : else
2983 : : {
2984 : 4266 : int nposting = BTreeTupleGetNPosting(itup);
2985 : :
2986 [ + + ]: 4266 : if (ntids + nposting > spacentids)
2987 : : {
2988 : 94 : spacentids = Max(spacentids * 2, ntids + nposting);
2989 : : tidblocks = (BlockNumber *)
2990 : 94 : repalloc(tidblocks, sizeof(BlockNumber) * spacentids);
2991 : : }
2992 : :
2993 [ + + ]: 14183 : for (int j = 0; j < nposting; j++)
2994 : : {
2995 : 9917 : ItemPointer tid = BTreeTupleGetPostingN(itup, j);
2996 : :
2997 : 9917 : tidblocks[ntids++] = ItemPointerGetBlockNumber(tid);
2998 : : }
2999 : : }
3000 : : }
3001 : :
3002 : 3548 : qsort(tidblocks, ntids, sizeof(BlockNumber), _bt_blk_cmp);
3003 : 3548 : *nblocks = qunique(tidblocks, ntids, sizeof(BlockNumber), _bt_blk_cmp);
3004 : :
3005 : 3548 : return tidblocks;
3006 : : }
3007 : :
3008 : : /*
3009 : : * _bt_blk_cmp() -- qsort comparison function for _bt_simpledel_pass
3010 : : */
3011 : : static inline int
3012 : 2630382 : _bt_blk_cmp(const void *arg1, const void *arg2)
3013 : : {
3014 : 2630382 : BlockNumber b1 = *((BlockNumber *) arg1);
3015 : 2630382 : BlockNumber b2 = *((BlockNumber *) arg2);
3016 : :
568 nathan@postgresql.or 3017 : 2630382 : return pg_cmp_u32(b1, b2);
3018 : : }
|