Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nbtutils.c
4 : : * Utility code for Postgres btree implementation.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/nbtree/nbtutils.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : :
16 : : #include "postgres.h"
17 : :
18 : : #include <time.h>
19 : :
20 : : #include "access/nbtree.h"
21 : : #include "access/reloptions.h"
22 : : #include "access/relscan.h"
23 : : #include "commands/progress.h"
24 : : #include "miscadmin.h"
25 : : #include "utils/datum.h"
26 : : #include "utils/lsyscache.h"
27 : : #include "utils/rel.h"
28 : :
29 : :
30 : : #define LOOK_AHEAD_REQUIRED_RECHECKS 3
31 : : #define LOOK_AHEAD_DEFAULT_DISTANCE 5
32 : : #define NSKIPADVANCES_THRESHOLD 3
33 : :
34 : : static inline int32 _bt_compare_array_skey(FmgrInfo *orderproc,
35 : : Datum tupdatum, bool tupnull,
36 : : Datum arrdatum, ScanKey cur);
37 : : static void _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir,
38 : : Datum tupdatum, bool tupnull,
39 : : BTArrayKeyInfo *array, ScanKey cur,
40 : : int32 *set_elem_result);
41 : : static void _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
42 : : int32 set_elem_result, Datum tupdatum, bool tupnull);
43 : : static void _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
44 : : static void _bt_array_set_low_or_high(Relation rel, ScanKey skey,
45 : : BTArrayKeyInfo *array, bool low_not_high);
46 : : static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
47 : : static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
48 : : static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
49 : : bool *skip_array_set);
50 : : static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
51 : : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
52 : : bool readpagetup, int sktrig, bool *scanBehind);
53 : : static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
54 : : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
55 : : int sktrig, bool sktrig_required);
56 : : #ifdef USE_ASSERT_CHECKING
57 : : static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
58 : : #endif
59 : : static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
60 : : IndexTuple finaltup);
61 : : static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
62 : : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
63 : : bool advancenonrequired, bool forcenonrequired,
64 : : bool *continuescan, int *ikey);
65 : : static bool _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult);
66 : : static bool _bt_check_rowcompare(ScanKey skey,
67 : : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
68 : : ScanDirection dir, bool forcenonrequired, bool *continuescan);
69 : : static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
70 : : int tupnatts, TupleDesc tupdesc);
71 : : static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
72 : : IndexTuple firstright, BTScanInsert itup_key);
73 : :
74 : :
75 : : /*
76 : : * _bt_mkscankey
77 : : * Build an insertion scan key that contains comparison data from itup
78 : : * as well as comparator routines appropriate to the key datatypes.
79 : : *
80 : : * The result is intended for use with _bt_compare() and _bt_truncate().
81 : : * Callers that don't need to fill out the insertion scankey arguments
82 : : * (e.g. they use an ad-hoc comparison routine, or only need a scankey
83 : : * for _bt_truncate()) can pass a NULL index tuple. The scankey will
84 : : * be initialized as if an "all truncated" pivot tuple was passed
85 : : * instead.
86 : : *
87 : : * Note that we may occasionally have to share lock the metapage to
88 : : * determine whether or not the keys in the index are expected to be
89 : : * unique (i.e. if this is a "heapkeyspace" index). We assume a
90 : : * heapkeyspace index when caller passes a NULL tuple, allowing index
91 : : * build callers to avoid accessing the non-existent metapage. We
92 : : * also assume that the index is _not_ allequalimage when a NULL tuple
93 : : * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
94 : : * field themselves.
95 : : */
96 : : BTScanInsert
871 pg@bowt.ie 97 :CBC 6004683 : _bt_mkscankey(Relation rel, IndexTuple itup)
98 : : {
99 : : BTScanInsert key;
100 : : ScanKey skey;
101 : : TupleDesc itupdesc;
102 : : int indnkeyatts;
103 : : int16 *indoption;
104 : : int tupnatts;
105 : : int i;
106 : :
9919 bruce@momjian.us 107 : 6004683 : itupdesc = RelationGetDescr(rel);
2761 teodor@sigaev.ru 108 : 6004683 : indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
6867 tgl@sss.pgh.pa.us 109 : 6004683 : indoption = rel->rd_indoption;
2414 pg@bowt.ie 110 [ + + + + ]: 6004683 : tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
111 : :
112 [ - + ]: 6004683 : Assert(tupnatts <= IndexRelationGetNumberOfAttributes(rel));
113 : :
114 : : /*
115 : : * We'll execute search using scan key constructed on key columns.
116 : : * Truncated attributes and non-key attributes are omitted from the final
117 : : * scan key.
118 : : */
119 : 6004683 : key = palloc(offsetof(BTScanInsertData, scankeys) +
120 : 6004683 : sizeof(ScanKeyData) * indnkeyatts);
2071 121 [ + + ]: 6004683 : if (itup)
871 122 : 5933266 : _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
123 : : else
124 : : {
125 : : /* Utility statement callers can set these fields themselves */
2071 126 : 71417 : key->heapkeyspace = true;
127 : 71417 : key->allequalimage = false;
128 : : }
2351 tgl@sss.pgh.pa.us 129 : 6004683 : key->anynullkeys = false; /* initial assumption */
690 pg@bowt.ie 130 : 6004683 : key->nextkey = false; /* usual case, required by btinsert */
131 : 6004683 : key->backward = false; /* usual case, required by btinsert */
2414 132 : 6004683 : key->keysz = Min(indnkeyatts, tupnatts);
133 [ + + ]: 6004683 : key->scantid = key->heapkeyspace && itup ?
134 [ + - ]: 12009366 : BTreeTupleGetHeapTID(itup) : NULL;
135 : 6004683 : skey = key->scankeys;
2761 teodor@sigaev.ru 136 [ + + ]: 16263195 : for (i = 0; i < indnkeyatts; i++)
137 : : {
138 : : FmgrInfo *procinfo;
139 : : Datum arg;
140 : : bool null;
141 : : int flags;
142 : :
143 : : /*
144 : : * We can use the cached (default) support procs since no cross-type
145 : : * comparison can be needed.
146 : : */
8788 tgl@sss.pgh.pa.us 147 : 10258512 : procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
148 : :
149 : : /*
150 : : * Key arguments built from truncated attributes (or when caller
151 : : * provides no tuple) are defensively represented as NULL values. They
152 : : * should never be used.
153 : : */
2414 pg@bowt.ie 154 [ + + ]: 10258512 : if (i < tupnatts)
155 : 10130111 : arg = index_getattr(itup, i + 1, itupdesc, &null);
156 : : else
157 : : {
158 : 128401 : arg = (Datum) 0;
159 : 128401 : null = true;
160 : : }
161 : 10258512 : flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
8788 tgl@sss.pgh.pa.us 162 : 10258512 : ScanKeyEntryInitializeWithInfo(&skey[i],
163 : : flags,
164 : 10258512 : (AttrNumber) (i + 1),
165 : : InvalidStrategy,
166 : : InvalidOid,
5313 167 : 10258512 : rel->rd_indcollation[i],
168 : : procinfo,
169 : : arg);
170 : : /* Record if any key attribute is NULL (or truncated) */
2380 pg@bowt.ie 171 [ + + ]: 10258512 : if (null)
172 : 138725 : key->anynullkeys = true;
173 : : }
174 : :
175 : : /*
176 : : * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
177 : : * that full uniqueness check is done.
178 : : */
1363 peter@eisentraut.org 179 [ + + ]: 6004683 : if (rel->rd_index->indnullsnotdistinct)
180 : 93 : key->anynullkeys = false;
181 : :
2414 pg@bowt.ie 182 : 6004683 : return key;
183 : : }
184 : :
185 : : /*
186 : : * free a retracement stack made by _bt_search.
187 : : */
188 : : void
10703 scrappy@hub.org 189 : 10467231 : _bt_freestack(BTStack stack)
190 : : {
191 : : BTStack ostack;
192 : :
7965 neilc@samurai.com 193 [ + + ]: 19326191 : while (stack != NULL)
194 : : {
10278 bruce@momjian.us 195 : 8858960 : ostack = stack;
196 : 8858960 : stack = stack->bts_parent;
197 : 8858960 : pfree(ostack);
198 : : }
10703 scrappy@hub.org 199 : 10467231 : }
200 : :
201 : : /*
202 : : * _bt_compare_array_skey() -- apply array comparison function
203 : : *
204 : : * Compares caller's tuple attribute value to a scan key/array element.
205 : : * Helper function used during binary searches of SK_SEARCHARRAY arrays.
206 : : *
207 : : * This routine returns:
208 : : * <0 if tupdatum < arrdatum;
209 : : * 0 if tupdatum == arrdatum;
210 : : * >0 if tupdatum > arrdatum.
211 : : *
212 : : * This is essentially the same interface as _bt_compare: both functions
213 : : * compare the value that they're searching for to a binary search pivot.
214 : : * However, unlike _bt_compare, this function's "tuple argument" comes first,
215 : : * while its "array/scankey argument" comes second.
216 : : */
217 : : static inline int32
288 pg@bowt.ie 218 : 449824 : _bt_compare_array_skey(FmgrInfo *orderproc,
219 : : Datum tupdatum, bool tupnull,
220 : : Datum arrdatum, ScanKey cur)
221 : : {
222 : 449824 : int32 result = 0;
223 : :
224 [ - + ]: 449824 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
207 225 [ - + ]: 449824 : Assert(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)));
226 : :
288 227 [ + + ]: 449824 : if (tupnull) /* NULL tupdatum */
228 : : {
229 [ + + ]: 231 : if (cur->sk_flags & SK_ISNULL)
230 : 126 : result = 0; /* NULL "=" NULL */
231 [ + + ]: 105 : else if (cur->sk_flags & SK_BT_NULLS_FIRST)
232 : 18 : result = -1; /* NULL "<" NOT_NULL */
233 : : else
234 : 87 : result = 1; /* NULL ">" NOT_NULL */
235 : : }
236 [ + + ]: 449593 : else if (cur->sk_flags & SK_ISNULL) /* NOT_NULL tupdatum, NULL arrdatum */
237 : : {
238 [ + + ]: 30510 : if (cur->sk_flags & SK_BT_NULLS_FIRST)
239 : 54 : result = 1; /* NOT_NULL ">" NULL */
240 : : else
241 : 30456 : result = -1; /* NOT_NULL "<" NULL */
242 : : }
243 : : else
244 : : {
245 : : /*
246 : : * Like _bt_compare, we need to be careful of cross-type comparisons,
247 : : * so the left value has to be the value that came from an index tuple
248 : : */
249 : 419083 : result = DatumGetInt32(FunctionCall2Coll(orderproc, cur->sk_collation,
250 : : tupdatum, arrdatum));
251 : :
252 : : /*
253 : : * We flip the sign by following the obvious rule: flip whenever the
254 : : * column is a DESC column.
255 : : *
256 : : * _bt_compare does it the wrong way around (flip when *ASC*) in order
257 : : * to compensate for passing its orderproc arguments backwards. We
258 : : * don't need to play these games because we find it natural to pass
259 : : * tupdatum as the left value (and arrdatum as the right value).
260 : : */
261 [ + + ]: 419083 : if (cur->sk_flags & SK_BT_DESC)
262 [ + + ]: 52029 : INVERT_COMPARE_RESULT(result);
263 : : }
264 : :
265 : 449824 : return result;
266 : : }
267 : :
268 : : /*
269 : : * _bt_binsrch_array_skey() -- Binary search for next matching array key
270 : : *
271 : : * Returns an index to the first array element >= caller's tupdatum argument.
272 : : * This convention is more natural for forwards scan callers, but that can't
273 : : * really matter to backwards scan callers. Both callers require handling for
274 : : * the case where the match we return is < tupdatum, and symmetric handling
275 : : * for the case where our best match is > tupdatum.
276 : : *
277 : : * Also sets *set_elem_result to the result _bt_compare_array_skey returned
278 : : * when we used it to compare the matching array element to tupdatum/tupnull.
279 : : *
280 : : * cur_elem_trig indicates if array advancement was triggered by this array's
281 : : * scan key, and that the array is for a required scan key. We can apply this
282 : : * information to find the next matching array element in the current scan
283 : : * direction using far fewer comparisons (fewer on average, compared to naive
284 : : * binary search). This scheme takes advantage of an important property of
285 : : * required arrays: required arrays always advance in lockstep with the index
286 : : * scan's progress through the index's key space.
287 : : */
288 : : int
289 : 15886 : _bt_binsrch_array_skey(FmgrInfo *orderproc,
290 : : bool cur_elem_trig, ScanDirection dir,
291 : : Datum tupdatum, bool tupnull,
292 : : BTArrayKeyInfo *array, ScanKey cur,
293 : : int32 *set_elem_result)
294 : : {
295 : 15886 : int low_elem = 0,
296 : 15886 : mid_elem = -1,
297 : 15886 : high_elem = array->num_elems - 1,
298 : 15886 : result = 0;
299 : : Datum arrdatum;
300 : :
301 [ - + ]: 15886 : Assert(cur->sk_flags & SK_SEARCHARRAY);
207 302 [ - + ]: 15886 : Assert(!(cur->sk_flags & SK_BT_SKIP));
303 [ - + ]: 15886 : Assert(!(cur->sk_flags & SK_ISNULL)); /* SAOP arrays never have NULLs */
288 304 [ - + ]: 15886 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
305 : :
306 [ + + ]: 15886 : if (cur_elem_trig)
307 : : {
308 [ - + ]: 15727 : Assert(!ScanDirectionIsNoMovement(dir));
309 [ - + ]: 15727 : Assert(cur->sk_flags & SK_BT_REQFWD);
310 : :
311 : : /*
312 : : * When the scan key that triggered array advancement is a required
313 : : * array scan key, it is now certain that the current array element
314 : : * (plus all prior elements relative to the current scan direction)
315 : : * cannot possibly be at or ahead of the corresponding tuple value.
316 : : * (_bt_checkkeys must have called _bt_tuple_before_array_skeys, which
317 : : * makes sure this is true as a condition of advancing the arrays.)
318 : : *
319 : : * This makes it safe to exclude array elements up to and including
320 : : * the former-current array element from our search.
321 : : *
322 : : * Separately, when array advancement was triggered by a required scan
323 : : * key, the array element immediately after the former-current element
324 : : * is often either an exact tupdatum match, or a "close by" near-match
325 : : * (a near-match tupdatum is one whose key space falls _between_ the
326 : : * former-current and new-current array elements). We'll detect both
327 : : * cases via an optimistic comparison of the new search lower bound
328 : : * (or new search upper bound in the case of backwards scans).
329 : : */
330 [ + + ]: 15727 : if (ScanDirectionIsForward(dir))
331 : : {
332 : 15697 : low_elem = array->cur_elem + 1; /* old cur_elem exhausted */
333 : :
334 : : /* Compare prospective new cur_elem (also the new lower bound) */
335 [ + + ]: 15697 : if (high_elem >= low_elem)
336 : : {
337 : 11660 : arrdatum = array->elem_values[low_elem];
338 : 11660 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
339 : : arrdatum, cur);
340 : :
341 [ + + ]: 11660 : if (result <= 0)
342 : : {
343 : : /* Optimistic comparison optimization worked out */
344 : 11617 : *set_elem_result = result;
345 : 11617 : return low_elem;
346 : : }
347 : 43 : mid_elem = low_elem;
348 : 43 : low_elem++; /* this cur_elem exhausted, too */
349 : : }
350 : :
351 [ + + ]: 4080 : if (high_elem < low_elem)
352 : : {
353 : : /* Caller needs to perform "beyond end" array advancement */
354 : 4040 : *set_elem_result = 1;
355 : 4040 : return high_elem;
356 : : }
357 : : }
358 : : else
359 : : {
360 : 30 : high_elem = array->cur_elem - 1; /* old cur_elem exhausted */
361 : :
362 : : /* Compare prospective new cur_elem (also the new upper bound) */
363 [ + + ]: 30 : if (high_elem >= low_elem)
364 : : {
365 : 21 : arrdatum = array->elem_values[high_elem];
366 : 21 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
367 : : arrdatum, cur);
368 : :
369 [ + + ]: 21 : if (result >= 0)
370 : : {
371 : : /* Optimistic comparison optimization worked out */
372 : 15 : *set_elem_result = result;
373 : 15 : return high_elem;
374 : : }
375 : 6 : mid_elem = high_elem;
376 : 6 : high_elem--; /* this cur_elem exhausted, too */
377 : : }
378 : :
379 [ + - ]: 15 : if (high_elem < low_elem)
380 : : {
381 : : /* Caller needs to perform "beyond end" array advancement */
382 : 15 : *set_elem_result = -1;
383 : 15 : return low_elem;
384 : : }
385 : : }
386 : : }
387 : :
388 [ + + ]: 349 : while (high_elem > low_elem)
389 : : {
390 : 219 : mid_elem = low_elem + ((high_elem - low_elem) / 2);
391 : 219 : arrdatum = array->elem_values[mid_elem];
392 : :
393 : 219 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
394 : : arrdatum, cur);
395 : :
396 [ + + ]: 219 : if (result == 0)
397 : : {
398 : : /*
399 : : * It's safe to quit as soon as we see an equal array element.
400 : : * This often saves an extra comparison or two...
401 : : */
402 : 69 : low_elem = mid_elem;
403 : 69 : break;
404 : : }
405 : :
406 [ + + ]: 150 : if (result > 0)
407 : 135 : low_elem = mid_elem + 1;
408 : : else
409 : 15 : high_elem = mid_elem;
410 : : }
411 : :
412 : : /*
413 : : * ...but our caller also cares about how its searched-for tuple datum
414 : : * compares to the low_elem datum. Must always set *set_elem_result with
415 : : * the result of that comparison specifically.
416 : : */
417 [ + + ]: 199 : if (low_elem != mid_elem)
418 : 121 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
419 : 121 : array->elem_values[low_elem], cur);
420 : :
421 : 199 : *set_elem_result = result;
422 : :
423 : 199 : return low_elem;
424 : : }
425 : :
426 : : /*
427 : : * _bt_binsrch_skiparray_skey() -- "Binary search" within a skip array
428 : : *
429 : : * Does not return an index into the array, since skip arrays don't really
430 : : * contain elements (they generate their array elements procedurally instead).
431 : : * Our interface matches that of _bt_binsrch_array_skey in every other way.
432 : : *
433 : : * Sets *set_elem_result just like _bt_binsrch_array_skey would with a true
434 : : * array. The value 0 indicates that tupdatum/tupnull is within the range of
435 : : * the skip array. We return -1 when tupdatum/tupnull is lower that any value
436 : : * within the range of the array, and 1 when it is higher than every value.
437 : : * Caller should pass *set_elem_result to _bt_skiparray_set_element to advance
438 : : * the array.
439 : : *
440 : : * cur_elem_trig indicates if array advancement was triggered by this array's
441 : : * scan key. We use this to optimize-away comparisons that are known by our
442 : : * caller to be unnecessary from context, just like _bt_binsrch_array_skey.
443 : : */
444 : : static void
207 445 : 85242 : _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir,
446 : : Datum tupdatum, bool tupnull,
447 : : BTArrayKeyInfo *array, ScanKey cur,
448 : : int32 *set_elem_result)
449 : : {
450 [ - + ]: 85242 : Assert(cur->sk_flags & SK_BT_SKIP);
451 [ - + ]: 85242 : Assert(cur->sk_flags & SK_SEARCHARRAY);
452 [ - + ]: 85242 : Assert(cur->sk_flags & SK_BT_REQFWD);
453 [ - + ]: 85242 : Assert(array->num_elems == -1);
454 [ - + ]: 85242 : Assert(!ScanDirectionIsNoMovement(dir));
455 : :
456 [ + + ]: 85242 : if (array->null_elem)
457 : : {
458 [ + - - + ]: 72804 : Assert(!array->low_compare && !array->high_compare);
459 : :
460 : 72804 : *set_elem_result = 0;
461 : 72804 : return;
462 : : }
463 : :
464 [ + + ]: 12438 : if (tupnull) /* NULL tupdatum */
465 : : {
466 [ - + ]: 12 : if (cur->sk_flags & SK_BT_NULLS_FIRST)
207 pg@bowt.ie 467 :UBC 0 : *set_elem_result = -1; /* NULL "<" NOT_NULL */
468 : : else
207 pg@bowt.ie 469 :CBC 12 : *set_elem_result = 1; /* NULL ">" NOT_NULL */
470 : 12 : return;
471 : : }
472 : :
473 : : /*
474 : : * Array inequalities determine whether tupdatum is within the range of
475 : : * caller's skip array
476 : : */
477 : 12426 : *set_elem_result = 0;
478 [ + + ]: 12426 : if (ScanDirectionIsForward(dir))
479 : : {
480 : : /*
481 : : * Evaluate low_compare first (unless cur_elem_trig tells us that it
482 : : * cannot possibly fail to be satisfied), then evaluate high_compare
483 : : */
484 [ + + + + ]: 12393 : if (!cur_elem_trig && array->low_compare &&
485 [ - + ]: 444 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
486 : 444 : array->low_compare->sk_collation,
487 : : tupdatum,
488 : 444 : array->low_compare->sk_argument)))
207 pg@bowt.ie 489 :UBC 0 : *set_elem_result = -1;
207 pg@bowt.ie 490 [ + + ]:CBC 12393 : else if (array->high_compare &&
491 [ + + ]: 4236 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
492 : 4236 : array->high_compare->sk_collation,
493 : : tupdatum,
494 : 4236 : array->high_compare->sk_argument)))
495 : 3204 : *set_elem_result = 1;
496 : : }
497 : : else
498 : : {
499 : : /*
500 : : * Evaluate high_compare first (unless cur_elem_trig tells us that it
501 : : * cannot possibly fail to be satisfied), then evaluate low_compare
502 : : */
503 [ + + + + ]: 33 : if (!cur_elem_trig && array->high_compare &&
504 [ - + ]: 6 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
505 : 6 : array->high_compare->sk_collation,
506 : : tupdatum,
507 : 6 : array->high_compare->sk_argument)))
207 pg@bowt.ie 508 :UBC 0 : *set_elem_result = 1;
207 pg@bowt.ie 509 [ + + ]:CBC 33 : else if (array->low_compare &&
510 [ - + ]: 15 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
511 : 15 : array->low_compare->sk_collation,
512 : : tupdatum,
513 : 15 : array->low_compare->sk_argument)))
207 pg@bowt.ie 514 :UBC 0 : *set_elem_result = -1;
515 : : }
516 : :
517 : : /*
518 : : * Assert that any keys that were assumed to be satisfied already (due to
519 : : * caller passing cur_elem_trig=true) really are satisfied as expected
520 : : */
521 : : #ifdef USE_ASSERT_CHECKING
207 pg@bowt.ie 522 [ + + ]:CBC 12426 : if (cur_elem_trig)
523 : : {
524 [ + + + + ]: 8364 : if (ScanDirectionIsForward(dir) && array->low_compare)
525 [ - + ]: 693 : Assert(DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
526 : : array->low_compare->sk_collation,
527 : : tupdatum,
528 : : array->low_compare->sk_argument)));
529 : :
530 [ + + + + ]: 8364 : if (ScanDirectionIsBackward(dir) && array->high_compare)
531 [ - + ]: 3 : Assert(DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
532 : : array->high_compare->sk_collation,
533 : : tupdatum,
534 : : array->high_compare->sk_argument)));
535 : : }
536 : : #endif
537 : : }
538 : :
539 : : /*
540 : : * _bt_skiparray_set_element() -- Set skip array scan key's sk_argument
541 : : *
542 : : * Caller passes set_elem_result returned by _bt_binsrch_skiparray_skey for
543 : : * caller's tupdatum/tupnull.
544 : : *
545 : : * We copy tupdatum/tupnull into skey's sk_argument iff set_elem_result == 0.
546 : : * Otherwise, we set skey to either the lowest or highest value that's within
547 : : * the range of caller's skip array (whichever is the best available match to
548 : : * tupdatum/tupnull that is still within the range of the skip array according
549 : : * to _bt_binsrch_skiparray_skey/set_elem_result).
550 : : */
551 : : static void
552 : 76705 : _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
553 : : int32 set_elem_result, Datum tupdatum, bool tupnull)
554 : : {
555 [ - + ]: 76705 : Assert(skey->sk_flags & SK_BT_SKIP);
556 [ - + ]: 76705 : Assert(skey->sk_flags & SK_SEARCHARRAY);
557 : :
558 [ + + ]: 76705 : if (set_elem_result)
559 : : {
560 : : /* tupdatum/tupnull is out of the range of the skip array */
561 [ - + ]: 321 : Assert(!array->null_elem);
562 : :
563 : 321 : _bt_array_set_low_or_high(rel, skey, array, set_elem_result < 0);
564 : 321 : return;
565 : : }
566 : :
567 : : /* Advance skip array to tupdatum (or tupnull) value */
568 [ + + ]: 76384 : if (unlikely(tupnull))
569 : : {
570 : 18 : _bt_skiparray_set_isnull(rel, skey, array);
571 : 18 : return;
572 : : }
573 : :
574 : : /* Free memory previously allocated for sk_argument if needed */
575 [ + + + + ]: 76366 : if (!array->attbyval && skey->sk_argument)
576 : 38367 : pfree(DatumGetPointer(skey->sk_argument));
577 : :
578 : : /* tupdatum becomes new sk_argument/new current element */
579 : 76366 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
580 : : SK_BT_MINVAL | SK_BT_MAXVAL |
581 : : SK_BT_NEXT | SK_BT_PRIOR);
582 : 76366 : skey->sk_argument = datumCopy(tupdatum, array->attbyval, array->attlen);
583 : : }
584 : :
585 : : /*
586 : : * _bt_skiparray_set_isnull() -- set skip array scan key to NULL
587 : : */
588 : : static void
589 : 24 : _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
590 : : {
591 [ - + ]: 24 : Assert(skey->sk_flags & SK_BT_SKIP);
592 [ - + ]: 24 : Assert(skey->sk_flags & SK_SEARCHARRAY);
593 [ + - + - : 24 : Assert(array->null_elem && !array->low_compare && !array->high_compare);
- + ]
594 : :
595 : : /* Free memory previously allocated for sk_argument if needed */
596 [ + + + - ]: 24 : if (!array->attbyval && skey->sk_argument)
597 : 3 : pfree(DatumGetPointer(skey->sk_argument));
598 : :
599 : : /* NULL becomes new sk_argument/new current element */
600 : 24 : skey->sk_argument = (Datum) 0;
601 : 24 : skey->sk_flags &= ~(SK_BT_MINVAL | SK_BT_MAXVAL |
602 : : SK_BT_NEXT | SK_BT_PRIOR);
603 : 24 : skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
604 : 24 : }
605 : :
606 : : /*
607 : : * _bt_start_array_keys() -- Initialize array keys at start of a scan
608 : : *
609 : : * Set up the cur_elem counters and fill in the first sk_argument value for
610 : : * each array scankey.
611 : : */
612 : : void
288 613 : 40826 : _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
614 : : {
207 615 : 40826 : Relation rel = scan->indexRelation;
570 616 : 40826 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
617 : :
288 618 [ - + ]: 40826 : Assert(so->numArrayKeys);
570 619 [ - + ]: 40826 : Assert(so->qual_ok);
620 : :
207 621 [ + + ]: 81968 : for (int i = 0; i < so->numArrayKeys; i++)
622 : : {
623 : 41142 : BTArrayKeyInfo *array = &so->arrayKeys[i];
624 : 41142 : ScanKey skey = &so->keyData[array->scan_key];
625 : :
288 626 [ - + ]: 41142 : Assert(skey->sk_flags & SK_SEARCHARRAY);
627 : :
207 628 : 41142 : _bt_array_set_low_or_high(rel, skey, array,
629 : : ScanDirectionIsForward(dir));
630 : : }
377 631 : 40826 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
4779 tgl@sss.pgh.pa.us 632 : 40826 : }
633 : :
634 : : /*
635 : : * _bt_array_set_low_or_high() -- Set array scan key to lowest/highest element
636 : : *
637 : : * Caller also passes associated scan key, which will have its argument set to
638 : : * the lowest/highest array value in passing.
639 : : */
640 : : static void
207 pg@bowt.ie 641 : 46888 : _bt_array_set_low_or_high(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
642 : : bool low_not_high)
643 : : {
644 [ - + ]: 46888 : Assert(skey->sk_flags & SK_SEARCHARRAY);
645 : :
646 [ + + ]: 46888 : if (array->num_elems != -1)
647 : : {
648 : : /* set low or high element for SAOP array */
649 : 42359 : int set_elem = 0;
650 : :
651 [ - + ]: 42359 : Assert(!(skey->sk_flags & SK_BT_SKIP));
652 : :
653 [ + + ]: 42359 : if (!low_not_high)
654 : 4159 : set_elem = array->num_elems - 1;
655 : :
656 : : /*
657 : : * Just copy over array datum (only skip arrays require freeing and
658 : : * allocating memory for sk_argument)
659 : : */
660 : 42359 : array->cur_elem = set_elem;
661 : 42359 : skey->sk_argument = array->elem_values[set_elem];
662 : :
663 : 42359 : return;
664 : : }
665 : :
666 : : /* set low or high element for skip array */
667 [ - + ]: 4529 : Assert(skey->sk_flags & SK_BT_SKIP);
668 [ - + ]: 4529 : Assert(array->num_elems == -1);
669 : :
670 : : /* Free memory previously allocated for sk_argument if needed */
671 [ + + + + ]: 4529 : if (!array->attbyval && skey->sk_argument)
672 : 960 : pfree(DatumGetPointer(skey->sk_argument));
673 : :
674 : : /* Reset flags */
675 : 4529 : skey->sk_argument = (Datum) 0;
676 : 4529 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
677 : : SK_BT_MINVAL | SK_BT_MAXVAL |
678 : : SK_BT_NEXT | SK_BT_PRIOR);
679 : :
680 [ + + ]: 4529 : if (array->null_elem &&
681 [ + + ]: 3650 : (low_not_high == ((skey->sk_flags & SK_BT_NULLS_FIRST) != 0)))
682 : : {
683 : : /* Requested element (either lowest or highest) has the value NULL */
684 : 487 : skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
685 : : }
686 [ + + ]: 4042 : else if (low_not_high)
687 : : {
688 : : /* Setting array to lowest element (according to low_compare) */
689 : 3682 : skey->sk_flags |= SK_BT_MINVAL;
690 : : }
691 : : else
692 : : {
693 : : /* Setting array to highest element (according to high_compare) */
694 : 360 : skey->sk_flags |= SK_BT_MAXVAL;
695 : : }
696 : : }
697 : :
698 : : /*
699 : : * _bt_array_decrement() -- decrement array scan key's sk_argument
700 : : *
701 : : * Return value indicates whether caller's array was successfully decremented.
702 : : * Cannot decrement an array whose current element is already the first one.
703 : : */
704 : : static bool
705 : 456 : _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
706 : : {
707 : 456 : bool uflow = false;
708 : : Datum dec_sk_argument;
709 : :
710 [ - + ]: 456 : Assert(skey->sk_flags & SK_SEARCHARRAY);
711 [ - + ]: 456 : Assert(!(skey->sk_flags & (SK_BT_MAXVAL | SK_BT_NEXT | SK_BT_PRIOR)));
712 : :
713 : : /* SAOP array? */
714 [ + + ]: 456 : if (array->num_elems != -1)
715 : : {
716 [ - + ]: 18 : Assert(!(skey->sk_flags & (SK_BT_SKIP | SK_BT_MINVAL | SK_BT_MAXVAL)));
717 [ + + ]: 18 : if (array->cur_elem > 0)
718 : : {
719 : : /*
720 : : * Just decrement current element, and assign its datum to skey
721 : : * (only skip arrays need us to free existing sk_argument memory)
722 : : */
723 : 3 : array->cur_elem--;
724 : 3 : skey->sk_argument = array->elem_values[array->cur_elem];
725 : :
726 : : /* Successfully decremented array */
727 : 3 : return true;
728 : : }
729 : :
730 : : /* Cannot decrement to before first array element */
731 : 15 : return false;
732 : : }
733 : :
734 : : /* Nope, this is a skip array */
735 [ - + ]: 438 : Assert(skey->sk_flags & SK_BT_SKIP);
736 : :
737 : : /*
738 : : * The sentinel value that represents the minimum value within the range
739 : : * of a skip array (often just -inf) is never decrementable
740 : : */
741 [ - + ]: 438 : if (skey->sk_flags & SK_BT_MINVAL)
207 pg@bowt.ie 742 :UBC 0 : return false;
743 : :
744 : : /*
745 : : * When the current array element is NULL, and the lowest sorting value in
746 : : * the index is also NULL, we cannot decrement before first array element
747 : : */
207 pg@bowt.ie 748 [ + + - + ]:CBC 438 : if ((skey->sk_flags & SK_ISNULL) && (skey->sk_flags & SK_BT_NULLS_FIRST))
207 pg@bowt.ie 749 :UBC 0 : return false;
750 : :
751 : : /*
752 : : * Opclasses without skip support "decrement" the scan key's current
753 : : * element by setting the PRIOR flag. The true prior value is determined
754 : : * by repositioning to the last index tuple < existing sk_argument/current
755 : : * array element. Note that this works in the usual way when the scan key
756 : : * is already marked ISNULL (i.e. when the current element is NULL).
757 : : */
207 pg@bowt.ie 758 [ + + ]:CBC 438 : if (!array->sksup)
759 : : {
760 : : /* Successfully "decremented" array */
761 : 6 : skey->sk_flags |= SK_BT_PRIOR;
762 : 6 : return true;
763 : : }
764 : :
765 : : /*
766 : : * Opclasses with skip support directly decrement sk_argument
767 : : */
768 [ + + ]: 432 : if (skey->sk_flags & SK_ISNULL)
769 : : {
770 [ - + ]: 3 : Assert(!(skey->sk_flags & SK_BT_NULLS_FIRST));
771 : :
772 : : /*
773 : : * Existing sk_argument/array element is NULL (for an IS NULL qual).
774 : : *
775 : : * "Decrement" from NULL to the high_elem value provided by opclass
776 : : * skip support routine.
777 : : */
778 : 3 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
779 : 6 : skey->sk_argument = datumCopy(array->sksup->high_elem,
780 : 3 : array->attbyval, array->attlen);
781 : 3 : return true;
782 : : }
783 : :
784 : : /*
785 : : * Ask opclass support routine to provide decremented copy of existing
786 : : * non-NULL sk_argument
787 : : */
788 : 429 : dec_sk_argument = array->sksup->decrement(rel, skey->sk_argument, &uflow);
789 [ - + ]: 429 : if (unlikely(uflow))
790 : : {
791 : : /* dec_sk_argument has undefined value (so no pfree) */
207 pg@bowt.ie 792 [ # # # # ]:UBC 0 : if (array->null_elem && (skey->sk_flags & SK_BT_NULLS_FIRST))
793 : : {
794 : 0 : _bt_skiparray_set_isnull(rel, skey, array);
795 : :
796 : : /* Successfully "decremented" array to NULL */
797 : 0 : return true;
798 : : }
799 : :
800 : : /* Cannot decrement to before first array element */
801 : 0 : return false;
802 : : }
803 : :
804 : : /*
805 : : * Successfully decremented sk_argument to a non-NULL value. Make sure
806 : : * that the decremented value is still within the range of the array.
807 : : */
207 pg@bowt.ie 808 [ + + ]:CBC 429 : if (array->low_compare &&
809 [ + + ]: 6 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
810 : 6 : array->low_compare->sk_collation,
811 : : dec_sk_argument,
812 : 6 : array->low_compare->sk_argument)))
813 : : {
814 : : /* Keep existing sk_argument after all */
815 [ - + ]: 3 : if (!array->attbyval)
207 pg@bowt.ie 816 :UBC 0 : pfree(DatumGetPointer(dec_sk_argument));
817 : :
818 : : /* Cannot decrement to before first array element */
207 pg@bowt.ie 819 :CBC 3 : return false;
820 : : }
821 : :
822 : : /* Accept value returned by opclass decrement callback */
823 [ - + - - ]: 426 : if (!array->attbyval && skey->sk_argument)
207 pg@bowt.ie 824 :UBC 0 : pfree(DatumGetPointer(skey->sk_argument));
207 pg@bowt.ie 825 :CBC 426 : skey->sk_argument = dec_sk_argument;
826 : :
827 : : /* Successfully decremented array */
828 : 426 : return true;
829 : : }
830 : :
831 : : /*
832 : : * _bt_array_increment() -- increment array scan key's sk_argument
833 : : *
834 : : * Return value indicates whether caller's array was successfully incremented.
835 : : * Cannot increment an array whose current element is already the final one.
836 : : */
837 : : static bool
838 : 15411 : _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
839 : : {
840 : 15411 : bool oflow = false;
841 : : Datum inc_sk_argument;
842 : :
843 [ - + ]: 15411 : Assert(skey->sk_flags & SK_SEARCHARRAY);
844 [ - + ]: 15411 : Assert(!(skey->sk_flags & (SK_BT_MINVAL | SK_BT_NEXT | SK_BT_PRIOR)));
845 : :
846 : : /* SAOP array? */
847 [ + + ]: 15411 : if (array->num_elems != -1)
848 : : {
849 [ - + ]: 4180 : Assert(!(skey->sk_flags & (SK_BT_SKIP | SK_BT_MINVAL | SK_BT_MAXVAL)));
850 [ + + ]: 4180 : if (array->cur_elem < array->num_elems - 1)
851 : : {
852 : : /*
853 : : * Just increment current element, and assign its datum to skey
854 : : * (only skip arrays need us to free existing sk_argument memory)
855 : : */
856 : 28 : array->cur_elem++;
857 : 28 : skey->sk_argument = array->elem_values[array->cur_elem];
858 : :
859 : : /* Successfully incremented array */
860 : 28 : return true;
861 : : }
862 : :
863 : : /* Cannot increment past final array element */
864 : 4152 : return false;
865 : : }
866 : :
867 : : /* Nope, this is a skip array */
868 [ - + ]: 11231 : Assert(skey->sk_flags & SK_BT_SKIP);
869 : :
870 : : /*
871 : : * The sentinel value that represents the maximum value within the range
872 : : * of a skip array (often just +inf) is never incrementable
873 : : */
874 [ + + ]: 11231 : if (skey->sk_flags & SK_BT_MAXVAL)
875 : 321 : return false;
876 : :
877 : : /*
878 : : * When the current array element is NULL, and the highest sorting value
879 : : * in the index is also NULL, we cannot increment past the final element
880 : : */
881 [ + + + + ]: 10910 : if ((skey->sk_flags & SK_ISNULL) && !(skey->sk_flags & SK_BT_NULLS_FIRST))
882 : 221 : return false;
883 : :
884 : : /*
885 : : * Opclasses without skip support "increment" the scan key's current
886 : : * element by setting the NEXT flag. The true next value is determined by
887 : : * repositioning to the first index tuple > existing sk_argument/current
888 : : * array element. Note that this works in the usual way when the scan key
889 : : * is already marked ISNULL (i.e. when the current element is NULL).
890 : : */
891 [ + + ]: 10689 : if (!array->sksup)
892 : : {
893 : : /* Successfully "incremented" array */
894 : 7044 : skey->sk_flags |= SK_BT_NEXT;
895 : 7044 : return true;
896 : : }
897 : :
898 : : /*
899 : : * Opclasses with skip support directly increment sk_argument
900 : : */
901 [ + + ]: 3645 : if (skey->sk_flags & SK_ISNULL)
902 : : {
903 [ - + ]: 18 : Assert(skey->sk_flags & SK_BT_NULLS_FIRST);
904 : :
905 : : /*
906 : : * Existing sk_argument/array element is NULL (for an IS NULL qual).
907 : : *
908 : : * "Increment" from NULL to the low_elem value provided by opclass
909 : : * skip support routine.
910 : : */
911 : 18 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
912 : 36 : skey->sk_argument = datumCopy(array->sksup->low_elem,
913 : 18 : array->attbyval, array->attlen);
914 : 18 : return true;
915 : : }
916 : :
917 : : /*
918 : : * Ask opclass support routine to provide incremented copy of existing
919 : : * non-NULL sk_argument
920 : : */
921 : 3627 : inc_sk_argument = array->sksup->increment(rel, skey->sk_argument, &oflow);
922 [ + + ]: 3627 : if (unlikely(oflow))
923 : : {
924 : : /* inc_sk_argument has undefined value (so no pfree) */
925 [ + - + + ]: 15 : if (array->null_elem && !(skey->sk_flags & SK_BT_NULLS_FIRST))
926 : : {
927 : 6 : _bt_skiparray_set_isnull(rel, skey, array);
928 : :
929 : : /* Successfully "incremented" array to NULL */
930 : 6 : return true;
931 : : }
932 : :
933 : : /* Cannot increment past final array element */
934 : 9 : return false;
935 : : }
936 : :
937 : : /*
938 : : * Successfully incremented sk_argument to a non-NULL value. Make sure
939 : : * that the incremented value is still within the range of the array.
940 : : */
941 [ + + ]: 3612 : if (array->high_compare &&
942 [ + + ]: 21 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
943 : 21 : array->high_compare->sk_collation,
944 : : inc_sk_argument,
945 : 21 : array->high_compare->sk_argument)))
946 : : {
947 : : /* Keep existing sk_argument after all */
948 [ - + ]: 6 : if (!array->attbyval)
207 pg@bowt.ie 949 :UBC 0 : pfree(DatumGetPointer(inc_sk_argument));
950 : :
951 : : /* Cannot increment past final array element */
207 pg@bowt.ie 952 :CBC 6 : return false;
953 : : }
954 : :
955 : : /* Accept value returned by opclass increment callback */
956 [ - + - - ]: 3606 : if (!array->attbyval && skey->sk_argument)
207 pg@bowt.ie 957 :UBC 0 : pfree(DatumGetPointer(skey->sk_argument));
207 pg@bowt.ie 958 :CBC 3606 : skey->sk_argument = inc_sk_argument;
959 : :
960 : : /* Successfully incremented array */
961 : 3606 : return true;
962 : : }
963 : :
964 : : /*
965 : : * _bt_advance_array_keys_increment() -- Advance to next set of array elements
966 : : *
967 : : * Advances the array keys by a single increment in the current scan
968 : : * direction. When there are multiple array keys this can roll over from the
969 : : * lowest order array to higher order arrays.
970 : : *
971 : : * Returns true if there is another set of values to consider, false if not.
972 : : * On true result, the scankeys are initialized with the next set of values.
973 : : * On false result, the scankeys stay the same, and the array keys are not
974 : : * advanced (every array remains at its final element for scan direction).
975 : : */
976 : : static bool
977 : 15295 : _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
978 : : bool *skip_array_set)
979 : : {
980 : 15295 : Relation rel = scan->indexRelation;
4779 tgl@sss.pgh.pa.us 981 : 15295 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
982 : :
983 : : /*
984 : : * We must advance the last array key most quickly, since it will
985 : : * correspond to the lowest-order index column among the available
986 : : * qualifications
987 : : */
570 pg@bowt.ie 988 [ + + ]: 20022 : for (int i = so->numArrayKeys - 1; i >= 0; i--)
989 : : {
207 990 : 15867 : BTArrayKeyInfo *array = &so->arrayKeys[i];
991 : 15867 : ScanKey skey = &so->keyData[array->scan_key];
992 : :
993 [ + + ]: 15867 : if (array->num_elems == -1)
994 : 11669 : *skip_array_set = true;
995 : :
996 [ + + ]: 15867 : if (ScanDirectionIsForward(dir))
997 : : {
998 [ + + ]: 15411 : if (_bt_array_increment(rel, skey, array))
999 : 10702 : return true;
1000 : : }
1001 : : else
1002 : : {
1003 [ + + ]: 456 : if (_bt_array_decrement(rel, skey, array))
1004 : 438 : return true;
1005 : : }
1006 : :
1007 : : /*
1008 : : * Couldn't increment (or decrement) array. Handle array roll over.
1009 : : *
1010 : : * Start over at the array's lowest sorting value (or its highest
1011 : : * value, for backward scans)...
1012 : : */
1013 : 4727 : _bt_array_set_low_or_high(rel, skey, array,
1014 : : ScanDirectionIsForward(dir));
1015 : :
1016 : : /* ...then increment (or decrement) next most significant array */
1017 : : }
1018 : :
1019 : : /*
1020 : : * The array keys are now exhausted.
1021 : : *
1022 : : * Restore the array keys to the state they were in immediately before we
1023 : : * were called. This ensures that the arrays only ever ratchet in the
1024 : : * current scan direction.
1025 : : *
1026 : : * Without this, scans could overlook matching tuples when the scan
1027 : : * direction gets reversed just before btgettuple runs out of items to
1028 : : * return, but just after _bt_readpage prepares all the items from the
1029 : : * scan's final page in so->currPos. When we're on the final page it is
1030 : : * typical for so->currPos to get invalidated once btgettuple finally
1031 : : * returns false, which'll effectively invalidate the scan's array keys.
1032 : : * That hasn't happened yet, though -- and in general it may never happen.
1033 : : */
570 1034 : 4155 : _bt_start_array_keys(scan, -dir);
1035 : :
1036 : 4155 : return false;
1037 : : }
1038 : :
1039 : : /*
1040 : : * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
1041 : : *
1042 : : * We always compare the tuple using the current array keys (which we assume
1043 : : * are already set in so->keyData[]). readpagetup indicates if tuple is the
1044 : : * scan's current _bt_readpage-wise tuple.
1045 : : *
1046 : : * readpagetup callers must only call here when _bt_check_compare already set
1047 : : * continuescan=false. We help these callers deal with _bt_check_compare's
1048 : : * inability to distinguish between the < and > cases (it uses equality
1049 : : * operator scan keys, whereas we use 3-way ORDER procs). These callers pass
1050 : : * a _bt_check_compare-set sktrig value that indicates which scan key
1051 : : * triggered the call (!readpagetup callers just pass us sktrig=0 instead).
1052 : : * This information allows us to avoid wastefully checking earlier scan keys
1053 : : * that were already deemed to have been satisfied inside _bt_check_compare.
1054 : : *
1055 : : * Returns false when caller's tuple is >= the current required equality scan
1056 : : * keys (or <=, in the case of backwards scans). This happens to readpagetup
1057 : : * callers when the scan has reached the point of needing its array keys
1058 : : * advanced; caller will need to advance required and non-required arrays at
1059 : : * scan key offsets >= sktrig, plus scan keys < sktrig iff sktrig rolls over.
1060 : : * (When we return false to readpagetup callers, tuple can only be == current
1061 : : * required equality scan keys when caller's sktrig indicates that the arrays
1062 : : * need to be advanced due to an unsatisfied required inequality key trigger.)
1063 : : *
1064 : : * Returns true when caller passes a tuple that is < the current set of
1065 : : * equality keys for the most significant non-equal required scan key/column
1066 : : * (or > the keys, during backwards scans). This happens to readpagetup
1067 : : * callers when tuple is still before the start of matches for the scan's
1068 : : * required equality strategy scan keys. (sktrig can't have indicated that an
1069 : : * inequality strategy scan key wasn't satisfied in _bt_check_compare when we
1070 : : * return true. In fact, we automatically return false when passed such an
1071 : : * inequality sktrig by readpagetup callers -- _bt_check_compare's initial
1072 : : * continuescan=false doesn't really need to be confirmed here by us.)
1073 : : *
1074 : : * !readpagetup callers optionally pass us *scanBehind, which tracks whether
1075 : : * any missing truncated attributes might have affected array advancement
1076 : : * (compared to what would happen if it was shown the first non-pivot tuple on
1077 : : * the page to the right of caller's finaltup/high key tuple instead). It's
1078 : : * only possible that we'll set *scanBehind to true when caller passes us a
1079 : : * pivot tuple (with truncated -inf attributes) that we return false for.
1080 : : */
1081 : : static bool
1082 : 330740 : _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
1083 : : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
1084 : : bool readpagetup, int sktrig, bool *scanBehind)
1085 : : {
1086 : 330740 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1087 : :
1088 [ - + ]: 330740 : Assert(so->numArrayKeys);
1089 [ - + ]: 330740 : Assert(so->numberOfKeys);
1090 [ + + - + ]: 330740 : Assert(sktrig == 0 || readpagetup);
1091 [ + + - + ]: 330740 : Assert(!readpagetup || scanBehind == NULL);
1092 : :
1093 [ + + ]: 330740 : if (scanBehind)
1094 : 42336 : *scanBehind = false;
1095 : :
1096 [ + + ]: 397142 : for (int ikey = sktrig; ikey < so->numberOfKeys; ikey++)
1097 : : {
1098 : 392788 : ScanKey cur = so->keyData + ikey;
1099 : : Datum tupdatum;
1100 : : bool tupnull;
1101 : : int32 result;
1102 : :
1103 : : /* readpagetup calls require one ORDER proc comparison (at most) */
1104 [ + + - + ]: 392788 : Assert(!readpagetup || ikey == sktrig);
1105 : :
1106 : : /*
1107 : : * Once we reach a non-required scan key, we're completely done.
1108 : : *
1109 : : * Note: we deliberately don't consider the scan direction here.
1110 : : * _bt_advance_array_keys caller requires that we track *scanBehind
1111 : : * without concern for scan direction.
1112 : : */
1113 [ - + ]: 392788 : if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) == 0)
1114 : : {
570 pg@bowt.ie 1115 [ # # ]:UBC 0 : Assert(!readpagetup);
1116 [ # # # # ]: 0 : Assert(ikey > sktrig || ikey == 0);
570 pg@bowt.ie 1117 :CBC 326386 : return false;
1118 : : }
1119 : :
1120 [ + + ]: 392788 : if (cur->sk_attno > tupnatts)
1121 : : {
1122 [ - + ]: 1126 : Assert(!readpagetup);
1123 : :
1124 : : /*
1125 : : * When we reach a high key's truncated attribute, assume that the
1126 : : * tuple attribute's value is >= the scan's equality constraint
1127 : : * scan keys (but set *scanBehind to let interested callers know
1128 : : * that a truncated attribute might have affected our answer).
1129 : : */
1130 [ + + ]: 1126 : if (scanBehind)
1131 : 16 : *scanBehind = true;
1132 : :
1133 : 1126 : return false;
1134 : : }
1135 : :
1136 : : /*
1137 : : * Deal with inequality strategy scan keys that _bt_check_compare set
1138 : : * continuescan=false for
1139 : : */
1140 [ + + ]: 391662 : if (cur->sk_strategy != BTEqualStrategyNumber)
1141 : : {
1142 : : /*
1143 : : * When _bt_check_compare indicated that a required inequality
1144 : : * scan key wasn't satisfied, there's no need to verify anything;
1145 : : * caller always calls _bt_advance_array_keys with this sktrig.
1146 : : */
1147 [ + + ]: 8041 : if (readpagetup)
1148 : 174 : return false;
1149 : :
1150 : : /*
1151 : : * Otherwise we can't give up, since we must check all required
1152 : : * scan keys (required in either direction) in order to correctly
1153 : : * track *scanBehind for caller
1154 : : */
1155 : 7867 : continue;
1156 : : }
1157 : :
1158 : 383621 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1159 : :
207 1160 [ + + ]: 383621 : if (likely(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL))))
1161 : : {
1162 : : /* Scankey has a valid/comparable sk_argument value */
1163 : 378225 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
1164 : : tupdatum, tupnull,
1165 : : cur->sk_argument, cur);
1166 : :
1167 [ + + ]: 378225 : if (result == 0)
1168 : : {
1169 : : /*
1170 : : * Interpret result in a way that takes NEXT/PRIOR into
1171 : : * account
1172 : : */
1173 [ + + ]: 72700 : if (cur->sk_flags & SK_BT_NEXT)
1174 : 14144 : result = -1;
1175 [ + + ]: 58556 : else if (cur->sk_flags & SK_BT_PRIOR)
1176 : 21 : result = 1;
1177 : :
1178 [ + + - + ]: 72700 : Assert(result == 0 || (cur->sk_flags & SK_BT_SKIP));
1179 : : }
1180 : : }
1181 : : else
1182 : : {
1183 : 5396 : BTArrayKeyInfo *array = NULL;
1184 : :
1185 : : /*
1186 : : * Current array element/array = scan key value is a sentinel
1187 : : * value that represents the lowest (or highest) possible value
1188 : : * that's still within the range of the array.
1189 : : *
1190 : : * Like _bt_first, we only see MINVAL keys during forwards scans
1191 : : * (and similarly only see MAXVAL keys during backwards scans).
1192 : : * Even if the scan's direction changes, we'll stop at some higher
1193 : : * order key before we can ever reach any MAXVAL (or MINVAL) keys.
1194 : : * (However, unlike _bt_first we _can_ get to keys marked either
1195 : : * NEXT or PRIOR, regardless of the scan's current direction.)
1196 : : */
1197 [ + + - + ]: 5396 : Assert(ScanDirectionIsForward(dir) ?
1198 : : !(cur->sk_flags & SK_BT_MAXVAL) :
1199 : : !(cur->sk_flags & SK_BT_MINVAL));
1200 : :
1201 : : /*
1202 : : * There are no valid sk_argument values in MINVAL/MAXVAL keys.
1203 : : * Check if tupdatum is within the range of skip array instead.
1204 : : */
1205 [ + - ]: 5932 : for (int arrayidx = 0; arrayidx < so->numArrayKeys; arrayidx++)
1206 : : {
1207 : 5932 : array = &so->arrayKeys[arrayidx];
1208 [ + + ]: 5932 : if (array->scan_key == ikey)
1209 : 5396 : break;
1210 : : }
1211 : :
1212 : 5396 : _bt_binsrch_skiparray_skey(false, dir, tupdatum, tupnull,
1213 : : array, cur, &result);
1214 : :
1215 [ + + ]: 5396 : if (result == 0)
1216 : : {
1217 : : /*
1218 : : * tupdatum satisfies both low_compare and high_compare, so
1219 : : * it's time to advance the array keys.
1220 : : *
1221 : : * Note: It's possible that the skip array will "advance" from
1222 : : * its MINVAL (or MAXVAL) representation to an alternative,
1223 : : * logically equivalent representation of the same value: a
1224 : : * representation where the = key gets a valid datum in its
1225 : : * sk_argument. This is only possible when low_compare uses
1226 : : * the >= strategy (or high_compare uses the <= strategy).
1227 : : */
1228 : 5390 : return false;
1229 : : }
1230 : : }
1231 : :
1232 : : /*
1233 : : * Does this comparison indicate that caller must _not_ advance the
1234 : : * scan's arrays just yet?
1235 : : */
570 1236 [ + + + + : 378231 : if ((ScanDirectionIsForward(dir) && result < 0) ||
+ + ]
1237 [ + + ]: 3312 : (ScanDirectionIsBackward(dir) && result > 0))
1238 : 98728 : return true;
1239 : :
1240 : : /*
1241 : : * Does this comparison indicate that caller should now advance the
1242 : : * scan's arrays? (Must be if we get here during a readpagetup call.)
1243 : : */
1244 [ + + + + ]: 279503 : if (readpagetup || result != 0)
1245 : : {
1246 [ - + ]: 220968 : Assert(result != 0);
1247 : 220968 : return false;
1248 : : }
1249 : :
1250 : : /*
1251 : : * Inconclusive -- need to check later scan keys, too.
1252 : : *
1253 : : * This must be a finaltup precheck, or a call made from an assertion.
1254 : : */
1255 [ - + ]: 58535 : Assert(result == 0);
1256 : : }
1257 : :
1258 [ - + ]: 4354 : Assert(!readpagetup);
1259 : :
1260 : 4354 : return false;
1261 : : }
1262 : :
1263 : : /*
1264 : : * _bt_start_prim_scan() -- start scheduled primitive index scan?
1265 : : *
1266 : : * Returns true if _bt_checkkeys scheduled another primitive index scan, just
1267 : : * as the last one ended. Otherwise returns false, indicating that the array
1268 : : * keys are now fully exhausted.
1269 : : *
1270 : : * Only call here during scans with one or more equality type array scan keys,
1271 : : * after _bt_first or _bt_next return false.
1272 : : */
1273 : : bool
1274 : 44344 : _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
1275 : : {
1276 : 44344 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1277 : :
1278 [ - + ]: 44344 : Assert(so->numArrayKeys);
1279 : :
377 1280 : 44344 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
1281 : :
1282 : : /*
1283 : : * Array keys are advanced within _bt_checkkeys when the scan reaches the
1284 : : * leaf level (more precisely, they're advanced when the scan reaches the
1285 : : * end of each distinct set of array elements). This process avoids
1286 : : * repeat access to leaf pages (across multiple primitive index scans) by
1287 : : * advancing the scan's array keys when it allows the primitive index scan
1288 : : * to find nearby matching tuples (or when it eliminates ranges of array
1289 : : * key space that can't possibly be satisfied by any index tuple).
1290 : : *
1291 : : * _bt_checkkeys sets a simple flag variable to schedule another primitive
1292 : : * index scan. The flag tells us what to do.
1293 : : *
1294 : : * We cannot rely on _bt_first always reaching _bt_checkkeys. There are
1295 : : * various cases where that won't happen. For example, if the index is
1296 : : * completely empty, then _bt_first won't call _bt_readpage/_bt_checkkeys.
1297 : : * We also don't expect a call to _bt_checkkeys during searches for a
1298 : : * non-existent value that happens to be lower/higher than any existing
1299 : : * value in the index.
1300 : : *
1301 : : * We don't require special handling for these cases -- we don't need to
1302 : : * be explicitly instructed to _not_ perform another primitive index scan.
1303 : : * It's up to code under the control of _bt_first to always set the flag
1304 : : * when another primitive index scan will be required.
1305 : : *
1306 : : * This works correctly, even with the tricky cases listed above, which
1307 : : * all involve access to leaf pages "near the boundaries of the key space"
1308 : : * (whether it's from a leftmost/rightmost page, or an imaginary empty
1309 : : * leaf root page). If _bt_checkkeys cannot be reached by a primitive
1310 : : * index scan for one set of array keys, then it also won't be reached for
1311 : : * any later set ("later" in terms of the direction that we scan the index
1312 : : * and advance the arrays). The array keys won't have advanced in these
1313 : : * cases, but that's the correct behavior (even _bt_advance_array_keys
1314 : : * won't always advance the arrays at the point they become "exhausted").
1315 : : */
570 1316 [ + + ]: 44344 : if (so->needPrimScan)
1317 : : {
1318 : : /*
1319 : : * Flag was set -- must call _bt_first again, which will reset the
1320 : : * scan's needPrimScan flag
1321 : : */
1322 : 8767 : return true;
1323 : : }
1324 : :
1325 : : /* The top-level index scan ran out of tuples in this scan direction */
1326 [ + + ]: 35577 : if (scan->parallel_scan != NULL)
1327 : 15 : _bt_parallel_done(scan);
1328 : :
1329 : 35577 : return false;
1330 : : }
1331 : :
1332 : : /*
1333 : : * _bt_advance_array_keys() -- Advance array elements using a tuple
1334 : : *
1335 : : * The scan always gets a new qual as a consequence of calling here (except
1336 : : * when we determine that the top-level scan has run out of matching tuples).
1337 : : * All later _bt_check_compare calls also use the same new qual that was first
1338 : : * used here (at least until the next call here advances the keys once again).
1339 : : * It's convenient to structure _bt_check_compare rechecks of caller's tuple
1340 : : * (using the new qual) as one the steps of advancing the scan's array keys,
1341 : : * so this function works as a wrapper around _bt_check_compare.
1342 : : *
1343 : : * Like _bt_check_compare, we'll set pstate.continuescan on behalf of the
1344 : : * caller, and return a boolean indicating if caller's tuple satisfies the
1345 : : * scan's new qual. But unlike _bt_check_compare, we set so->needPrimScan
1346 : : * when we set continuescan=false, indicating if a new primitive index scan
1347 : : * has been scheduled (otherwise, the top-level scan has run out of tuples in
1348 : : * the current scan direction).
1349 : : *
1350 : : * Caller must use _bt_tuple_before_array_skeys to determine if the current
1351 : : * place in the scan is >= the current array keys _before_ calling here.
1352 : : * We're responsible for ensuring that caller's tuple is <= the newly advanced
1353 : : * required array keys once we return. We try to find an exact match, but
1354 : : * failing that we'll advance the array keys to whatever set of array elements
1355 : : * comes next in the key space for the current scan direction. Required array
1356 : : * keys "ratchet forwards" (or backwards). They can only advance as the scan
1357 : : * itself advances through the index/key space.
1358 : : *
1359 : : * (The rules are the same for backwards scans, except that the operators are
1360 : : * flipped: just replace the precondition's >= operator with a <=, and the
1361 : : * postcondition's <= operator with a >=. In other words, just swap the
1362 : : * precondition with the postcondition.)
1363 : : *
1364 : : * We also deal with "advancing" non-required arrays here (or arrays that are
1365 : : * treated as non-required for the duration of a _bt_readpage call). Callers
1366 : : * whose sktrig scan key is non-required specify sktrig_required=false. These
1367 : : * calls are the only exception to the general rule about always advancing the
1368 : : * required array keys (the scan may not even have a required array). These
1369 : : * callers should just pass a NULL pstate (since there is never any question
1370 : : * of stopping the scan). No call to _bt_tuple_before_array_skeys is required
1371 : : * ahead of these calls (it's already clear that any required scan keys must
1372 : : * be satisfied by caller's tuple).
1373 : : *
1374 : : * Note that we deal with non-array required equality strategy scan keys as
1375 : : * degenerate single element arrays here. Obviously, they can never really
1376 : : * advance in the way that real arrays can, but they must still affect how we
1377 : : * advance real array scan keys (exactly like true array equality scan keys).
1378 : : * We have to keep around a 3-way ORDER proc for these (using the "=" operator
1379 : : * won't do), since in general whether the tuple is < or > _any_ unsatisfied
1380 : : * required equality key influences how the scan's real arrays must advance.
1381 : : *
1382 : : * Note also that we may sometimes need to advance the array keys when the
1383 : : * existing required array keys (and other required equality keys) are already
1384 : : * an exact match for every corresponding value from caller's tuple. We must
1385 : : * do this for inequalities that _bt_check_compare set continuescan=false for.
1386 : : * They'll advance the array keys here, just like any other scan key that
1387 : : * _bt_check_compare stops on. (This can even happen _after_ we advance the
1388 : : * array keys, in which case we'll advance the array keys a second time. That
1389 : : * way _bt_checkkeys caller always has its required arrays advance to the
1390 : : * maximum possible extent that its tuple will allow.)
1391 : : */
1392 : : static bool
1393 : 99395 : _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
1394 : : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
1395 : : int sktrig, bool sktrig_required)
1396 : : {
1397 : 99395 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1398 : 99395 : Relation rel = scan->indexRelation;
363 1399 : 99395 : ScanDirection dir = so->currPos.dir;
570 1400 : 99395 : int arrayidx = 0;
1401 : 99395 : bool beyond_end_advance = false,
207 1402 : 99395 : skip_array_advanced = false,
570 1403 : 99395 : has_required_opposite_direction_only = false,
1404 : 99395 : all_required_satisfied = true,
1405 : 99395 : all_satisfied = true;
1406 : :
220 1407 [ + - + - : 99395 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
- + ]
207 1408 [ - + ]: 99395 : Assert(_bt_verify_keys_with_arraykeys(scan));
1409 : :
570 1410 [ + + ]: 99395 : if (sktrig_required)
1411 : : {
1412 : : /*
1413 : : * Precondition array state assertion
1414 : : */
1415 [ - + ]: 95132 : Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
1416 : : tupnatts, false, 0, NULL));
1417 : :
1418 : : /*
1419 : : * Once we return we'll have a new set of required array keys, so
1420 : : * reset state used by "look ahead" optimization
1421 : : */
1422 : 95132 : pstate->rechecks = 0;
1423 : 95132 : pstate->targetdistance = 0;
1424 : : }
207 1425 [ + - ]: 4263 : else if (sktrig < so->numberOfKeys - 1 &&
1426 [ + - ]: 4263 : !(so->keyData[so->numberOfKeys - 1].sk_flags & SK_SEARCHARRAY))
1427 : : {
1428 : 4263 : int least_sign_ikey = so->numberOfKeys - 1;
1429 : : bool continuescan;
1430 : :
1431 : : /*
1432 : : * Optimization: perform a precheck of the least significant key
1433 : : * during !sktrig_required calls when it isn't already our sktrig
1434 : : * (provided the precheck key is not itself an array).
1435 : : *
1436 : : * When the precheck works out we'll avoid an expensive binary search
1437 : : * of sktrig's array (plus any other arrays before least_sign_ikey).
1438 : : */
1439 [ - + ]: 4263 : Assert(so->keyData[sktrig].sk_flags & SK_SEARCHARRAY);
1440 [ + + ]: 4263 : if (!_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
1441 : : false, &continuescan,
1442 : : &least_sign_ikey))
1443 : 1119 : return false;
1444 : : }
1445 : :
570 1446 [ + + ]: 288542 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1447 : : {
1448 : 193149 : ScanKey cur = so->keyData + ikey;
1449 : 193149 : BTArrayKeyInfo *array = NULL;
1450 : : Datum tupdatum;
1451 : 193149 : bool required = false,
1452 : : tupnull;
1453 : : int32 result;
1454 : 193149 : int set_elem = 0;
1455 : :
1456 [ + + ]: 193149 : if (cur->sk_strategy == BTEqualStrategyNumber)
1457 : : {
1458 : : /* Manage array state */
1459 [ + + ]: 169087 : if (cur->sk_flags & SK_SEARCHARRAY)
1460 : : {
1461 : 103463 : array = &so->arrayKeys[arrayidx++];
1462 [ - + ]: 103463 : Assert(array->scan_key == ikey);
1463 : : }
1464 : : }
1465 : : else
1466 : : {
1467 : : /*
1468 : : * Are any inequalities required in the opposite direction only
1469 : : * present here?
1470 : : */
1471 [ + - ]: 24062 : if (((ScanDirectionIsForward(dir) &&
1472 [ + + - + ]: 24062 : (cur->sk_flags & (SK_BT_REQBKWD))) ||
570 pg@bowt.ie 1473 :UBC 0 : (ScanDirectionIsBackward(dir) &&
1474 [ # # ]: 0 : (cur->sk_flags & (SK_BT_REQFWD)))))
16 pg@bowt.ie 1475 :CBC 7790 : has_required_opposite_direction_only = true;
1476 : : }
1477 : :
1478 : : /* Optimization: skip over known-satisfied scan keys */
570 1479 [ + + ]: 193149 : if (ikey < sktrig)
1480 : 38112 : continue;
1481 : :
1482 [ + - ]: 184954 : if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
1483 : : {
1484 : 184954 : required = true;
1485 : :
1486 [ + + ]: 184954 : if (cur->sk_attno > tupnatts)
1487 : : {
1488 : : /* Set this just like _bt_tuple_before_array_skeys */
1489 [ - + ]: 1160 : Assert(sktrig < ikey);
1490 : 1160 : so->scanBehind = true;
1491 : : }
1492 : : }
1493 : :
1494 : : /*
1495 : : * Handle a required non-array scan key that the initial call to
1496 : : * _bt_check_compare indicated triggered array advancement, if any.
1497 : : *
1498 : : * The non-array scan key's strategy will be <, <=, or = during a
1499 : : * forwards scan (or any one of =, >=, or > during a backwards scan).
1500 : : * It follows that the corresponding tuple attribute's value must now
1501 : : * be either > or >= the scan key value (for backwards scans it must
1502 : : * be either < or <= that value).
1503 : : *
1504 : : * If this is a required equality strategy scan key, this is just an
1505 : : * optimization; _bt_tuple_before_array_skeys already confirmed that
1506 : : * this scan key places us ahead of caller's tuple. There's no need
1507 : : * to repeat that work now. (The same underlying principle also gets
1508 : : * applied by the cur_elem_trig optimization used to speed up searches
1509 : : * for the next array element.)
1510 : : *
1511 : : * If this is a required inequality strategy scan key, we _must_ rely
1512 : : * on _bt_check_compare like this; we aren't capable of directly
1513 : : * evaluating required inequality strategy scan keys here, on our own.
1514 : : */
1515 [ + + + + ]: 184954 : if (ikey == sktrig && !array)
1516 : : {
1517 [ + - + - : 3727 : Assert(sktrig_required && required && all_required_satisfied);
- + ]
1518 : :
1519 : : /* Use "beyond end" advancement. See below for an explanation. */
1520 : 3727 : beyond_end_advance = true;
1521 : 3727 : all_satisfied = all_required_satisfied = false;
1522 : :
1523 : 3727 : continue;
1524 : : }
1525 : :
1526 : : /*
1527 : : * Nothing more for us to do with an inequality strategy scan key that
1528 : : * wasn't the one that _bt_check_compare stopped on, though.
1529 : : *
1530 : : * Note: if our later call to _bt_check_compare (to recheck caller's
1531 : : * tuple) sets continuescan=false due to finding this same inequality
1532 : : * unsatisfied (possible when it's required in the scan direction),
1533 : : * we'll deal with it via a recursive "second pass" call.
1534 : : */
1535 [ + + ]: 181227 : else if (cur->sk_strategy != BTEqualStrategyNumber)
1536 : 23777 : continue;
1537 : :
1538 : : /*
1539 : : * Nothing for us to do with an equality strategy scan key that isn't
1540 : : * marked required, either -- unless it's a non-required array
1541 : : */
1542 [ - + - - ]: 157450 : else if (!required && !array)
570 pg@bowt.ie 1543 :UBC 0 : continue;
1544 : :
1545 : : /*
1546 : : * Here we perform steps for all array scan keys after a required
1547 : : * array scan key whose binary search triggered "beyond end of array
1548 : : * element" array advancement due to encountering a tuple attribute
1549 : : * value > the closest matching array key (or < for backwards scans).
1550 : : */
570 pg@bowt.ie 1551 [ + + ]:CBC 157450 : if (beyond_end_advance)
1552 : : {
207 1553 [ + + ]: 716 : if (array)
1554 : 300 : _bt_array_set_low_or_high(rel, cur, array,
1555 : : ScanDirectionIsBackward(dir));
1556 : :
570 1557 : 716 : continue;
1558 : : }
1559 : :
1560 : : /*
1561 : : * Here we perform steps for all array scan keys after a required
1562 : : * array scan key whose tuple attribute was < the closest matching
1563 : : * array key when we dealt with it (or > for backwards scans).
1564 : : *
1565 : : * This earlier required array key already puts us ahead of caller's
1566 : : * tuple in the key space (for the current scan direction). We must
1567 : : * make sure that subsequent lower-order array keys do not put us too
1568 : : * far ahead (ahead of tuples that have yet to be seen by our caller).
1569 : : * For example, when a tuple "(a, b) = (42, 5)" advances the array
1570 : : * keys on "a" from 40 to 45, we must also set "b" to whatever the
1571 : : * first array element for "b" is. It would be wrong to allow "b" to
1572 : : * be set based on the tuple value.
1573 : : *
1574 : : * Perform the same steps with truncated high key attributes. You can
1575 : : * think of this as a "binary search" for the element closest to the
1576 : : * value -inf. Again, the arrays must never get ahead of the scan.
1577 : : */
1578 [ + + + + ]: 156734 : if (!all_required_satisfied || cur->sk_attno > tupnatts)
1579 : : {
207 1580 [ + + ]: 1697 : if (array)
1581 : 398 : _bt_array_set_low_or_high(rel, cur, array,
1582 : : ScanDirectionIsForward(dir));
1583 : :
570 1584 : 1697 : continue;
1585 : : }
1586 : :
1587 : : /*
1588 : : * Search in scankey's array for the corresponding tuple attribute
1589 : : * value from caller's tuple
1590 : : */
1591 : 155037 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1592 : :
1593 [ + + ]: 155037 : if (array)
1594 : : {
1595 [ + + + + ]: 95459 : bool cur_elem_trig = (sktrig_required && ikey == sktrig);
1596 : :
1597 : : /*
1598 : : * "Binary search" by checking if tupdatum/tupnull are within the
1599 : : * range of the skip array
1600 : : */
207 1601 [ + + ]: 95459 : if (array->num_elems == -1)
1602 : 79588 : _bt_binsrch_skiparray_skey(cur_elem_trig, dir,
1603 : : tupdatum, tupnull, array, cur,
1604 : : &result);
1605 : :
1606 : : /*
1607 : : * Binary search for the closest match from the SAOP array
1608 : : */
1609 : : else
1610 : 15871 : set_elem = _bt_binsrch_array_skey(&so->orderProcs[ikey],
1611 : : cur_elem_trig, dir,
1612 : : tupdatum, tupnull, array, cur,
1613 : : &result);
1614 : : }
1615 : : else
1616 : : {
1617 [ - + ]: 59578 : Assert(required);
1618 : :
1619 : : /*
1620 : : * This is a required non-array equality strategy scan key, which
1621 : : * we'll treat as a degenerate single element array.
1622 : : *
1623 : : * This scan key's imaginary "array" can't really advance, but it
1624 : : * can still roll over like any other array. (Actually, this is
1625 : : * no different to real single value arrays, which never advance
1626 : : * without rolling over -- they can never truly advance, either.)
1627 : : */
570 1628 : 59578 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
1629 : : tupdatum, tupnull,
1630 : : cur->sk_argument, cur);
1631 : : }
1632 : :
1633 : : /*
1634 : : * Consider "beyond end of array element" array advancement.
1635 : : *
1636 : : * When the tuple attribute value is > the closest matching array key
1637 : : * (or < in the backwards scan case), we need to ratchet this array
1638 : : * forward (backward) by one increment, so that caller's tuple ends up
1639 : : * being < final array value instead (or > final array value instead).
1640 : : * This process has to work for all of the arrays, not just this one:
1641 : : * it must "carry" to higher-order arrays when the set_elem that we
1642 : : * just found happens to be the final one for the scan's direction.
1643 : : * Incrementing (decrementing) set_elem itself isn't good enough.
1644 : : *
1645 : : * Our approach is to provisionally use set_elem as if it was an exact
1646 : : * match now, then set each later/less significant array to whatever
1647 : : * its final element is. Once outside the loop we'll then "increment
1648 : : * this array's set_elem" by calling _bt_advance_array_keys_increment.
1649 : : * That way the process rolls over to higher order arrays as needed.
1650 : : *
1651 : : * Under this scheme any required arrays only ever ratchet forwards
1652 : : * (or backwards), and always do so to the maximum possible extent
1653 : : * that we can know will be safe without seeing the scan's next tuple.
1654 : : * We don't need any special handling for required scan keys that lack
1655 : : * a real array to advance, nor for redundant scan keys that couldn't
1656 : : * be eliminated by _bt_preprocess_keys. It won't matter if some of
1657 : : * our "true" array scan keys (or even all of them) are non-required.
1658 : : */
207 1659 [ + + + - : 155037 : if (sktrig_required && required &&
+ + ]
570 1660 [ + + + + ]: 151893 : ((ScanDirectionIsForward(dir) && result > 0) ||
1661 [ + + ]: 858 : (ScanDirectionIsBackward(dir) && result < 0)))
1662 : 11568 : beyond_end_advance = true;
1663 : :
1664 [ + - - + ]: 155037 : Assert(all_required_satisfied && all_satisfied);
1665 [ + + ]: 155037 : if (result != 0)
1666 : : {
1667 : : /*
1668 : : * Track whether caller's tuple satisfies our new post-advancement
1669 : : * qual, for required scan keys, as well as for the entire set of
1670 : : * interesting scan keys (all required scan keys plus non-required
1671 : : * array scan keys are considered interesting.)
1672 : : */
1673 : 71030 : all_satisfied = false;
207 1674 [ + + + - ]: 71030 : if (sktrig_required && required)
570 1675 : 68147 : all_required_satisfied = false;
1676 : : else
1677 : : {
1678 : : /*
1679 : : * There's no need to advance the arrays using the best
1680 : : * available match for a non-required array. Give up now.
1681 : : * (Though note that sktrig_required calls still have to do
1682 : : * all the usual post-advancement steps, including the recheck
1683 : : * call to _bt_check_compare.)
1684 : : */
1685 : : break;
1686 : : }
1687 : : }
1688 : :
1689 : : /* Advance array keys, even when we don't have an exact match */
207 1690 [ + + ]: 152154 : if (array)
1691 : : {
1692 [ + + ]: 92576 : if (array->num_elems == -1)
1693 : : {
1694 : : /* Skip array's new element is tupdatum (or MINVAL/MAXVAL) */
1695 : 76705 : _bt_skiparray_set_element(rel, cur, array, result,
1696 : : tupdatum, tupnull);
1697 : 76705 : skip_array_advanced = true;
1698 : : }
1699 [ + + ]: 15871 : else if (array->cur_elem != set_elem)
1700 : : {
1701 : : /* SAOP array's new element is set_elem datum */
1702 : 11795 : array->cur_elem = set_elem;
1703 : 11795 : cur->sk_argument = array->elem_values[set_elem];
1704 : : }
1705 : : }
1706 : : }
1707 : :
1708 : : /*
1709 : : * Advance the array keys incrementally whenever "beyond end of array
1710 : : * element" array advancement happens, so that advancement will carry to
1711 : : * higher-order arrays (might exhaust all the scan's arrays instead, which
1712 : : * ends the top-level scan).
1713 : : */
1714 [ + + ]: 98276 : if (beyond_end_advance &&
1715 [ + + ]: 15295 : !_bt_advance_array_keys_increment(scan, dir, &skip_array_advanced))
570 1716 : 4155 : goto end_toplevel_scan;
1717 : :
1718 [ - + ]: 94121 : Assert(_bt_verify_keys_with_arraykeys(scan));
1719 : :
1720 : : /*
1721 : : * Maintain a page-level count of the number of times the scan's array
1722 : : * keys advanced in a way that affected at least one skip array
1723 : : */
207 1724 [ + + + + ]: 94121 : if (sktrig_required && skip_array_advanced)
1725 : 79713 : pstate->nskipadvances++;
1726 : :
1727 : : /*
1728 : : * Does tuple now satisfy our new qual? Recheck with _bt_check_compare.
1729 : : *
1730 : : * Calls triggered by an unsatisfied required scan key, whose tuple now
1731 : : * satisfies all required scan keys, but not all nonrequired array keys,
1732 : : * will still require a recheck call to _bt_check_compare. They'll still
1733 : : * need its "second pass" handling of required inequality scan keys.
1734 : : * (Might have missed a still-unsatisfied required inequality scan key
1735 : : * that caller didn't detect as the sktrig scan key during its initial
1736 : : * _bt_check_compare call that used the old/original qual.)
1737 : : *
1738 : : * Calls triggered by an unsatisfied nonrequired array scan key never need
1739 : : * "second pass" handling of required inequalities (nor any other handling
1740 : : * of any required scan key). All that matters is whether caller's tuple
1741 : : * satisfies the new qual, so it's safe to just skip the _bt_check_compare
1742 : : * recheck when we've already determined that it can only return 'false'.
1743 : : *
1744 : : * Note: In practice most scan keys are marked required by preprocessing,
1745 : : * if necessary by generating a preceding skip array. We nevertheless
1746 : : * often handle array keys marked required as if they were nonrequired.
1747 : : * This behavior is requested by our _bt_check_compare caller, though only
1748 : : * when it is passed "forcenonrequired=true" by _bt_checkkeys.
1749 : : */
570 1750 [ + + + + ]: 94121 : if ((sktrig_required && all_required_satisfied) ||
1751 [ + + + + ]: 70863 : (!sktrig_required && all_satisfied))
1752 : : {
1753 : 23519 : int nsktrig = sktrig + 1;
1754 : : bool continuescan;
1755 : :
1756 [ - + ]: 23519 : Assert(all_required_satisfied);
1757 : :
1758 : : /* Recheck _bt_check_compare on behalf of caller */
207 1759 [ + + ]: 23519 : if (_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
179 1760 : 23519 : !sktrig_required, &continuescan,
207 1761 : 23519 : &nsktrig) &&
570 1762 [ + + ]: 19687 : !so->scanBehind)
1763 : : {
1764 : : /* This tuple satisfies the new qual */
1765 [ + - - + ]: 18577 : Assert(all_satisfied && continuescan);
1766 : :
1767 [ + + ]: 18577 : if (pstate)
1768 : 18316 : pstate->continuescan = true;
1769 : :
1770 : 18688 : return true;
1771 : : }
1772 : :
1773 : : /*
1774 : : * Consider "second pass" handling of required inequalities.
1775 : : *
1776 : : * It's possible that our _bt_check_compare call indicated that the
1777 : : * scan should end due to some unsatisfied inequality that wasn't
1778 : : * initially recognized as such by us. Handle this by calling
1779 : : * ourselves recursively, this time indicating that the trigger is the
1780 : : * inequality that we missed first time around (and using a set of
1781 : : * required array/equality keys that are now exact matches for tuple).
1782 : : *
1783 : : * We make a strong, general guarantee that every _bt_checkkeys call
1784 : : * here will advance the array keys to the maximum possible extent
1785 : : * that we can know to be safe based on caller's tuple alone. If we
1786 : : * didn't perform this step, then that guarantee wouldn't quite hold.
1787 : : */
1788 [ + + ]: 4942 : if (unlikely(!continuescan))
1789 : : {
1790 : : bool satisfied PG_USED_FOR_ASSERTS_ONLY;
1791 : :
1792 [ - + ]: 111 : Assert(sktrig_required);
1793 [ - + ]: 111 : Assert(so->keyData[nsktrig].sk_strategy != BTEqualStrategyNumber);
1794 : :
1795 : : /*
1796 : : * The tuple must use "beyond end" advancement during the
1797 : : * recursive call, so we cannot possibly end up back here when
1798 : : * recursing. We'll consume a small, fixed amount of stack space.
1799 : : */
1800 [ - + ]: 111 : Assert(!beyond_end_advance);
1801 : :
1802 : : /* Advance the array keys a second time using same tuple */
1803 : 111 : satisfied = _bt_advance_array_keys(scan, pstate, tuple, tupnatts,
1804 : : tupdesc, nsktrig, true);
1805 : :
1806 : : /* This tuple doesn't satisfy the inequality */
1807 [ - + ]: 111 : Assert(!satisfied);
1808 : 111 : return false;
1809 : : }
1810 : :
1811 : : /*
1812 : : * Some non-required scan key (from new qual) still not satisfied.
1813 : : *
1814 : : * All scan keys required in the current scan direction must still be
1815 : : * satisfied, though, so we can trust all_required_satisfied below.
1816 : : */
1817 : : }
1818 : :
1819 : : /*
1820 : : * When we were called just to deal with "advancing" non-required arrays,
1821 : : * this is as far as we can go (cannot stop the scan for these callers)
1822 : : */
1823 [ + + ]: 75433 : if (!sktrig_required)
1824 : : {
1825 : : /* Caller's tuple doesn't match any qual */
1826 : 2883 : return false;
1827 : : }
1828 : :
1829 : : /*
1830 : : * Postcondition array state assertion (for still-unsatisfied tuples).
1831 : : *
1832 : : * By here we have established that the scan's required arrays (scan must
1833 : : * have at least one required array) advanced, without becoming exhausted.
1834 : : *
1835 : : * Caller's tuple is now < the newly advanced array keys (or > when this
1836 : : * is a backwards scan), except in the case where we only got this far due
1837 : : * to an unsatisfied non-required scan key. Verify that with an assert.
1838 : : *
1839 : : * Note: we don't just quit at this point when all required scan keys were
1840 : : * found to be satisfied because we need to consider edge-cases involving
1841 : : * scan keys required in the opposite direction only; those aren't tracked
1842 : : * by all_required_satisfied.
1843 : : */
1844 [ - + ]: 72550 : Assert(_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts,
1845 : : false, 0, NULL) ==
1846 : : !all_required_satisfied);
1847 : :
1848 : : /*
1849 : : * We generally permit primitive index scans to continue onto the next
1850 : : * sibling page when the page's finaltup satisfies all required scan keys
1851 : : * at the point where we're between pages.
1852 : : *
1853 : : * If caller's tuple is also the page's finaltup, and we see that required
1854 : : * scan keys still aren't satisfied, start a new primitive index scan.
1855 : : */
1856 [ + + + + ]: 72550 : if (!all_required_satisfied && pstate->finaltup == tuple)
1857 : 264 : goto new_prim_scan;
1858 : :
1859 : : /*
1860 : : * Proactively check finaltup (don't wait until finaltup is reached by the
1861 : : * scan) when it might well turn out to not be satisfied later on.
1862 : : *
1863 : : * Note: if so->scanBehind hasn't already been set for finaltup by us,
1864 : : * it'll be set during this call to _bt_tuple_before_array_skeys. Either
1865 : : * way, it'll be set correctly (for the whole page) after this point.
1866 : : */
1867 [ + + + + : 113322 : if (!all_required_satisfied && pstate->finaltup &&
+ + ]
1868 [ + + ]: 82072 : _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1869 : 82072 : BTreeTupleGetNAtts(pstate->finaltup, rel),
1870 : : false, 0, &so->scanBehind))
1871 : 8727 : goto new_prim_scan;
1872 : :
1873 : : /*
1874 : : * When we encounter a truncated finaltup high key attribute, we're
1875 : : * optimistic about the chances of its corresponding required scan key
1876 : : * being satisfied when we go on to recheck it against tuples from this
1877 : : * page's right sibling leaf page. We consider truncated attributes to be
1878 : : * satisfied by required scan keys, which allows the primitive index scan
1879 : : * to continue to the next leaf page. We must set so->scanBehind to true
1880 : : * to remember that the last page's finaltup had "satisfied" required scan
1881 : : * keys for one or more truncated attribute values (scan keys required in
1882 : : * _either_ scan direction).
1883 : : *
1884 : : * There is a chance that _bt_readpage (which checks so->scanBehind) will
1885 : : * find that even the sibling leaf page's finaltup is < the new array
1886 : : * keys. When that happens, our optimistic policy will have incurred a
1887 : : * single extra leaf page access that could have been avoided.
1888 : : *
1889 : : * A pessimistic policy would give backward scans a gratuitous advantage
1890 : : * over forward scans. We'd punish forward scans for applying more
1891 : : * accurate information from the high key, rather than just using the
1892 : : * final non-pivot tuple as finaltup, in the style of backward scans.
1893 : : * Being pessimistic would also give some scans with non-required arrays a
1894 : : * perverse advantage over similar scans that use required arrays instead.
1895 : : *
1896 : : * This is similar to our scan-level heuristics, below. They also set
1897 : : * scanBehind to speculatively continue the primscan onto the next page.
1898 : : */
220 1899 [ + + ]: 63559 : if (so->scanBehind)
1900 : : {
1901 : : /* Truncated high key -- _bt_scanbehind_checkkeys recheck scheduled */
1902 : : }
1903 : :
1904 : : /*
1905 : : * Handle inequalities marked required in the opposite scan direction.
1906 : : * They can also signal that we should start a new primitive index scan.
1907 : : *
1908 : : * It's possible that the scan is now positioned where "matching" tuples
1909 : : * begin, and that caller's tuple satisfies all scan keys required in the
1910 : : * current scan direction. But if caller's tuple still doesn't satisfy
1911 : : * other scan keys that are required in the opposite scan direction only
1912 : : * (e.g., a required >= strategy scan key when scan direction is forward),
1913 : : * it's still possible that there are many leaf pages before the page that
1914 : : * _bt_first could skip straight to. Groveling through all those pages
1915 : : * will always give correct answers, but it can be very inefficient. We
1916 : : * must avoid needlessly scanning extra pages.
1917 : : *
1918 : : * Separately, it's possible that _bt_check_compare set continuescan=false
1919 : : * for a scan key that's required in the opposite direction only. This is
1920 : : * a special case, that happens only when _bt_check_compare sees that the
1921 : : * inequality encountered a NULL value. This signals the end of non-NULL
1922 : : * values in the current scan direction, which is reason enough to end the
1923 : : * (primitive) scan. If this happens at the start of a large group of
1924 : : * NULL values, then we shouldn't expect to be called again until after
1925 : : * the scan has already read indefinitely-many leaf pages full of tuples
1926 : : * with NULL suffix values. (_bt_first is expected to skip over the group
1927 : : * of NULLs by applying a similar "deduce NOT NULL" rule of its own, which
1928 : : * involves consing up an explicit SK_SEARCHNOTNULL key.)
1929 : : *
1930 : : * Apply a test against finaltup to detect and recover from the problem:
1931 : : * if even finaltup doesn't satisfy such an inequality, we just skip by
1932 : : * starting a new primitive index scan. When we skip, we know for sure
1933 : : * that all of the tuples on the current page following caller's tuple are
1934 : : * also before the _bt_first-wise start of tuples for our new qual. That
1935 : : * at least suggests many more skippable pages beyond the current page.
1936 : : * (when so->scanBehind and so->oppositeDirCheck are set, this'll happen
1937 : : * when we test the next page's finaltup/high key instead.)
1938 : : */
288 1939 [ + + + + ]: 62433 : else if (has_required_opposite_direction_only && pstate->finaltup &&
1940 [ + + ]: 2146 : unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
288 pg@bowt.ie 1941 :GBC 1 : goto new_prim_scan;
1942 : :
220 pg@bowt.ie 1943 :CBC 62432 : continue_scan:
1944 : :
1945 : : /*
1946 : : * Stick with the ongoing primitive index scan for now.
1947 : : *
1948 : : * It's possible that later tuples will also turn out to have values that
1949 : : * are still < the now-current array keys (or > the current array keys).
1950 : : * Our caller will handle this by performing what amounts to a linear
1951 : : * search of the page, implemented by calling _bt_check_compare and then
1952 : : * _bt_tuple_before_array_skeys for each tuple.
1953 : : *
1954 : : * This approach has various advantages over a binary search of the page.
1955 : : * Repeated binary searches of the page (one binary search for every array
1956 : : * advancement) won't outperform a continuous linear search. While there
1957 : : * are workloads that a naive linear search won't handle well, our caller
1958 : : * has a "look ahead" fallback mechanism to deal with that problem.
1959 : : */
288 1960 : 63988 : pstate->continuescan = true; /* Override _bt_check_compare */
1961 : 63988 : so->needPrimScan = false; /* _bt_readpage has more tuples to check */
1962 : :
1963 [ + + ]: 63988 : if (so->scanBehind)
1964 : : {
1965 : : /*
1966 : : * Remember if recheck needs to call _bt_oppodir_checkkeys for next
1967 : : * page's finaltup (see above comments about "Handle inequalities
1968 : : * marked required in the opposite scan direction" for why).
1969 : : */
207 1970 : 1556 : so->oppositeDirCheck = has_required_opposite_direction_only;
1971 : :
1972 : : /*
1973 : : * skip by setting "look ahead" mechanism's offnum for forwards scans
1974 : : * (backwards scans check scanBehind flag directly instead)
1975 : : */
220 1976 [ + + ]: 1556 : if (ScanDirectionIsForward(dir))
1977 : 1547 : pstate->skip = pstate->maxoff + 1;
1978 : : }
1979 : :
1980 : : /* Caller's tuple doesn't match the new qual */
288 1981 : 63988 : return false;
1982 : :
1983 : 8992 : new_prim_scan:
1984 : :
1985 [ - + ]: 8992 : Assert(pstate->finaltup); /* not on rightmost/leftmost page */
1986 : :
1987 : : /*
1988 : : * Looks like another primitive index scan is required. But consider
1989 : : * continuing the current primscan based on scan-level heuristics.
1990 : : *
1991 : : * Continue the ongoing primitive scan (and schedule a recheck for when
1992 : : * the scan arrives on the next sibling leaf page) when it has already
1993 : : * read at least one leaf page before the one we're reading now. This
1994 : : * makes primscan scheduling more efficient when scanning subsets of an
1995 : : * index with many distinct attribute values matching many array elements.
1996 : : * It encourages fewer, larger primitive scans where that makes sense.
1997 : : * This will in turn encourage _bt_readpage to apply the pstate.startikey
1998 : : * optimization more often.
1999 : : *
2000 : : * Also continue the ongoing primitive index scan when it is still on the
2001 : : * first page if there have been more than NSKIPADVANCES_THRESHOLD calls
2002 : : * here that each advanced at least one of the scan's skip arrays
2003 : : * (deliberately ignore advancements that only affected SAOP arrays here).
2004 : : * A page that cycles through this many skip array elements is quite
2005 : : * likely to neighbor similar pages, that we'll also need to read.
2006 : : *
2007 : : * Note: These heuristics aren't as aggressive as you might think. We're
2008 : : * conservative about allowing a primitive scan to step from the first
2009 : : * leaf page it reads to the page's sibling page (we only allow it on
2010 : : * first pages whose finaltup strongly suggests that it'll work out, as
2011 : : * well as first pages that have a large number of skip array advances).
2012 : : * Clearing this first page finaltup hurdle is a strong signal in itself.
2013 : : *
2014 : : * Note: The NSKIPADVANCES_THRESHOLD heuristic exists only to avoid
2015 : : * pathological cases. Specifically, cases where a skip scan should just
2016 : : * behave like a traditional full index scan, but ends up "skipping" again
2017 : : * and again, descending to the prior leaf page's direct sibling leaf page
2018 : : * each time. This misbehavior would otherwise be possible during scans
2019 : : * that never quite manage to "clear the first page finaltup hurdle".
2020 : : */
207 2021 [ + + + + ]: 8992 : if (!pstate->firstpage || pstate->nskipadvances > NSKIPADVANCES_THRESHOLD)
2022 : : {
2023 : : /* Schedule a recheck once on the next (or previous) page */
220 2024 : 430 : so->scanBehind = true;
2025 : :
2026 : : /* Continue the current primitive scan after all */
2027 : 430 : goto continue_scan;
2028 : : }
2029 : :
2030 : : /*
2031 : : * End this primitive index scan, but schedule another.
2032 : : *
2033 : : * Note: We make a soft assumption that the current scan direction will
2034 : : * also be used within _bt_next, when it is asked to step off this page.
2035 : : * It is up to _bt_next to cancel this scheduled primitive index scan
2036 : : * whenever it steps to a page in the direction opposite currPos.dir.
2037 : : */
288 2038 : 8562 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
2039 : 8562 : so->needPrimScan = true; /* ...but call _bt_first again */
2040 : :
2041 [ + + ]: 8562 : if (scan->parallel_scan)
2042 : 18 : _bt_parallel_primscan_schedule(scan, so->currPos.currPage);
2043 : :
2044 : : /* Caller's tuple doesn't match the new qual */
2045 : 8562 : return false;
2046 : :
2047 : 4155 : end_toplevel_scan:
2048 : :
2049 : : /*
2050 : : * End the current primitive index scan, but don't schedule another.
2051 : : *
2052 : : * This ends the entire top-level scan in the current scan direction.
2053 : : *
2054 : : * Note: The scan's arrays (including any non-required arrays) are now in
2055 : : * their final positions for the current scan direction. If the scan
2056 : : * direction happens to change, then the arrays will already be in their
2057 : : * first positions for what will then be the current scan direction.
2058 : : */
2059 : 4155 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
220 2060 : 4155 : so->needPrimScan = false; /* ...and don't call _bt_first again */
2061 : :
2062 : : /* Caller's tuple doesn't match any qual */
288 2063 : 4155 : return false;
2064 : : }
2065 : :
2066 : : #ifdef USE_ASSERT_CHECKING
2067 : : /*
2068 : : * Verify that the scan's "so->keyData[]" scan keys are in agreement with
2069 : : * its array key state
2070 : : */
2071 : : static bool
570 2072 : 193516 : _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
2073 : : {
2074 : 193516 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2075 : 193516 : int last_sk_attno = InvalidAttrNumber,
2076 : 193516 : arrayidx = 0;
118 2077 : 193516 : bool nonrequiredseen = false;
2078 : :
570 2079 [ - + ]: 193516 : if (!so->qual_ok)
570 pg@bowt.ie 2080 :UBC 0 : return false;
2081 : :
570 pg@bowt.ie 2082 [ + + ]:CBC 583920 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
2083 : : {
2084 : 390404 : ScanKey cur = so->keyData + ikey;
2085 : : BTArrayKeyInfo *array;
2086 : :
2087 [ + + ]: 390404 : if (cur->sk_strategy != BTEqualStrategyNumber ||
2088 [ + + ]: 335428 : !(cur->sk_flags & SK_SEARCHARRAY))
2089 : 185535 : continue;
2090 : :
2091 : 204869 : array = &so->arrayKeys[arrayidx++];
2092 [ - + ]: 204869 : if (array->scan_key != ikey)
570 pg@bowt.ie 2093 :UBC 0 : return false;
2094 : :
207 pg@bowt.ie 2095 [ + - - + ]:CBC 204869 : if (array->num_elems == 0 || array->num_elems < -1)
570 pg@bowt.ie 2096 :UBC 0 : return false;
2097 : :
207 pg@bowt.ie 2098 [ + + ]:CBC 204869 : if (array->num_elems != -1 &&
2099 [ - + ]: 28952 : cur->sk_argument != array->elem_values[array->cur_elem])
570 pg@bowt.ie 2100 :UBC 0 : return false;
118 pg@bowt.ie 2101 [ + - ]:CBC 204869 : if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
2102 : : {
2103 [ - + ]: 204869 : if (last_sk_attno > cur->sk_attno)
118 pg@bowt.ie 2104 :UBC 0 : return false;
118 pg@bowt.ie 2105 [ - + ]:CBC 204869 : if (nonrequiredseen)
118 pg@bowt.ie 2106 :UBC 0 : return false;
2107 : : }
2108 : : else
2109 : 0 : nonrequiredseen = true;
2110 : :
570 pg@bowt.ie 2111 :CBC 204869 : last_sk_attno = cur->sk_attno;
2112 : : }
2113 : :
2114 [ - + ]: 193516 : if (arrayidx != so->numArrayKeys)
570 pg@bowt.ie 2115 :UBC 0 : return false;
2116 : :
570 pg@bowt.ie 2117 :CBC 193516 : return true;
2118 : : }
2119 : : #endif
2120 : :
2121 : : /*
2122 : : * Test whether an indextuple satisfies all the scankey conditions.
2123 : : *
2124 : : * Return true if so, false if not. If the tuple fails to pass the qual,
2125 : : * we also determine whether there's any need to continue the scan beyond
2126 : : * this tuple, and set pstate.continuescan accordingly. See comments for
2127 : : * _bt_preprocess_keys() about how this is done.
2128 : : *
2129 : : * Forward scan callers can pass a high key tuple in the hopes of having
2130 : : * us set *continuescan to false, and avoiding an unnecessary visit to
2131 : : * the page to the right.
2132 : : *
2133 : : * Advances the scan's array keys when necessary for arrayKeys=true callers.
2134 : : * Scans without any array keys must always pass arrayKeys=false.
2135 : : *
2136 : : * Also stops and starts primitive index scans for arrayKeys=true callers.
2137 : : * Scans with array keys are required to set up page state that helps us with
2138 : : * this. The page's finaltup tuple (the page high key for a forward scan, or
2139 : : * the page's first non-pivot tuple for a backward scan) must be set in
2140 : : * pstate.finaltup ahead of the first call here for the page. Set this to
2141 : : * NULL for rightmost page (or the leftmost page for backwards scans).
2142 : : *
2143 : : * scan: index scan descriptor (containing a search-type scankey)
2144 : : * pstate: page level input and output parameters
2145 : : * arrayKeys: should we advance the scan's array keys if necessary?
2146 : : * tuple: index tuple to test
2147 : : * tupnatts: number of attributes in tupnatts (high key may be truncated)
2148 : : */
2149 : : bool
2150 : 28410497 : _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
2151 : : IndexTuple tuple, int tupnatts)
2152 : : {
2153 : 28410497 : TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
2154 : 28410497 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
363 2155 : 28410497 : ScanDirection dir = so->currPos.dir;
207 2156 : 28410497 : int ikey = pstate->startikey;
2157 : : bool res;
2158 : :
2411 2159 [ + + - + ]: 28410497 : Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
220 2160 [ + - + - : 28410497 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
- + ]
207 2161 [ + + - + ]: 28410497 : Assert(arrayKeys || so->numArrayKeys == 0);
2162 : :
2163 : 28410497 : res = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, arrayKeys,
2164 : 28410497 : pstate->forcenonrequired, &pstate->continuescan,
2165 : : &ikey);
2166 : :
2167 : : /*
2168 : : * If _bt_check_compare relied on the pstate.startikey optimization, call
2169 : : * again (in assert-enabled builds) to verify it didn't affect our answer.
2170 : : *
2171 : : * Note: we can't do this when !pstate.forcenonrequired, since any arrays
2172 : : * before pstate.startikey won't have advanced on this page at all.
2173 : : */
2174 [ + + - + ]: 28410497 : Assert(!pstate->forcenonrequired || arrayKeys);
2175 : : #ifdef USE_ASSERT_CHECKING
2176 [ + + + + ]: 28410497 : if (pstate->startikey > 0 && !pstate->forcenonrequired)
2177 : : {
2178 : : bool dres,
2179 : : dcontinuescan;
570 2180 : 683487 : int dikey = 0;
2181 : :
2182 : : /* Pass arrayKeys=false to avoid array side-effects */
207 2183 : 683487 : dres = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
2184 : 683487 : pstate->forcenonrequired, &dcontinuescan,
2185 : : &dikey);
2186 [ - + ]: 683487 : Assert(res == dres);
2187 [ - + ]: 683487 : Assert(pstate->continuescan == dcontinuescan);
2188 : :
2189 : : /*
2190 : : * Should also get the same ikey result. We need a slightly weaker
2191 : : * assertion during arrayKeys calls, since they might be using an
2192 : : * array that couldn't be marked required during preprocessing.
2193 : : */
2194 [ + - - + ]: 683487 : Assert(arrayKeys || ikey == dikey);
2195 [ - + ]: 683487 : Assert(ikey <= dikey);
2196 : : }
2197 : : #endif
2198 : :
2199 : : /*
2200 : : * Only one _bt_check_compare call is required in the common case where
2201 : : * there are no equality strategy array scan keys. Otherwise we can only
2202 : : * accept _bt_check_compare's answer unreservedly when it didn't set
2203 : : * pstate.continuescan=false.
2204 : : */
570 2205 [ + + + + ]: 28410497 : if (!arrayKeys || pstate->continuescan)
2206 : 28295298 : return res;
2207 : :
2208 : : /*
2209 : : * _bt_check_compare call set continuescan=false in the presence of
2210 : : * equality type array keys. This could mean that the tuple is just past
2211 : : * the end of matches for the current array keys.
2212 : : *
2213 : : * It's also possible that the scan is still _before_ the _start_ of
2214 : : * tuples matching the current set of array keys. Check for that first.
2215 : : */
207 2216 [ - + ]: 115199 : Assert(!pstate->forcenonrequired);
570 2217 [ + + ]: 115199 : if (_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts, true,
2218 : : ikey, NULL))
2219 : : {
2220 : : /* Override _bt_check_compare, continue primitive scan */
220 2221 : 20178 : pstate->continuescan = true;
2222 : :
2223 : : /*
2224 : : * We will end up here repeatedly given a group of tuples > the
2225 : : * previous array keys and < the now-current keys (for a backwards
2226 : : * scan it's just the same, though the operators swap positions).
2227 : : *
2228 : : * We must avoid allowing this linear search process to scan very many
2229 : : * tuples from well before the start of tuples matching the current
2230 : : * array keys (or from well before the point where we'll once again
2231 : : * have to advance the scan's array keys).
2232 : : *
2233 : : * We keep the overhead under control by speculatively "looking ahead"
2234 : : * to later still-unscanned items from this same leaf page. We'll
2235 : : * only attempt this once the number of tuples that the linear search
2236 : : * process has examined starts to get out of hand.
2237 : : */
2238 : 20178 : pstate->rechecks++;
2239 [ + + ]: 20178 : if (pstate->rechecks >= LOOK_AHEAD_REQUIRED_RECHECKS)
2240 : : {
2241 : : /* See if we should skip ahead within the current leaf page */
2242 : 5696 : _bt_checkkeys_look_ahead(scan, pstate, tupnatts, tupdesc);
2243 : :
2244 : : /*
2245 : : * Might have set pstate.skip to a later page offset. When that
2246 : : * happens then _bt_readpage caller will inexpensively skip ahead
2247 : : * to a later tuple from the same page (the one just after the
2248 : : * tuple we successfully "looked ahead" to).
2249 : : */
2250 : : }
2251 : :
2252 : : /* This indextuple doesn't match the current qual, in any case */
570 2253 : 20178 : return false;
2254 : : }
2255 : :
2256 : : /*
2257 : : * Caller's tuple is >= the current set of array keys and other equality
2258 : : * constraint scan keys (or <= if this is a backwards scan). It's now
2259 : : * clear that we _must_ advance any required array keys in lockstep with
2260 : : * the scan.
2261 : : */
2262 : 95021 : return _bt_advance_array_keys(scan, pstate, tuple, tupnatts, tupdesc,
2263 : : ikey, true);
2264 : : }
2265 : :
2266 : : /*
2267 : : * Test whether caller's finaltup tuple is still before the start of matches
2268 : : * for the current array keys.
2269 : : *
2270 : : * Called at the start of reading a page during a scan with array keys, though
2271 : : * only when the so->scanBehind flag was set on the scan's prior page.
2272 : : *
2273 : : * Returns false if the tuple is still before the start of matches. When that
2274 : : * happens, caller should cut its losses and start a new primitive index scan.
2275 : : * Otherwise returns true.
2276 : : */
2277 : : bool
220 2278 : 1300 : _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir,
2279 : : IndexTuple finaltup)
2280 : : {
2281 : 1300 : Relation rel = scan->indexRelation;
2282 : 1300 : TupleDesc tupdesc = RelationGetDescr(rel);
2283 : 1300 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2284 [ + + ]: 1300 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2285 : : bool scanBehind;
2286 : :
2287 [ - + ]: 1300 : Assert(so->numArrayKeys);
2288 : :
2289 [ + + ]: 1300 : if (_bt_tuple_before_array_skeys(scan, dir, finaltup, tupdesc,
2290 : : nfinaltupatts, false, 0, &scanBehind))
174 2291 : 205 : return false;
2292 : :
2293 : : /*
2294 : : * If scanBehind was set, all of the untruncated attribute values from
2295 : : * finaltup that correspond to an array match the array's current element,
2296 : : * but there are other keys associated with truncated suffix attributes.
2297 : : * Array advancement must have incremented the scan's arrays on the
2298 : : * previous page, resulting in a set of array keys that happen to be an
2299 : : * exact match for the current page high key's untruncated prefix values.
2300 : : *
2301 : : * This page definitely doesn't contain tuples that the scan will need to
2302 : : * return. The next page may or may not contain relevant tuples. Handle
2303 : : * this by cutting our losses and starting a new primscan.
2304 : : */
2305 [ - + ]: 1095 : if (scanBehind)
220 pg@bowt.ie 2306 :UBC 0 : return false;
2307 : :
220 pg@bowt.ie 2308 [ + + ]:CBC 1095 : if (!so->oppositeDirCheck)
2309 : 1033 : return true;
2310 : :
2311 : 62 : return _bt_oppodir_checkkeys(scan, dir, finaltup);
2312 : : }
2313 : :
2314 : : /*
2315 : : * Test whether an indextuple fails to satisfy an inequality required in the
2316 : : * opposite direction only.
2317 : : *
2318 : : * Caller's finaltup tuple is the page high key (for forwards scans), or the
2319 : : * first non-pivot tuple (for backwards scans). Called during scans with
2320 : : * required array keys and required opposite-direction inequalities.
2321 : : *
2322 : : * Returns false if an inequality scan key required in the opposite direction
2323 : : * only isn't satisfied (and any earlier required scan keys are satisfied).
2324 : : * Otherwise returns true.
2325 : : *
2326 : : * An unsatisfied inequality required in the opposite direction only might
2327 : : * well enable skipping over many leaf pages, provided another _bt_first call
2328 : : * takes place. This type of unsatisfied inequality won't usually cause
2329 : : * _bt_checkkeys to stop the scan to consider array advancement/starting a new
2330 : : * primitive index scan.
2331 : : */
2332 : : static bool
377 2333 : 2208 : _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
2334 : : IndexTuple finaltup)
2335 : : {
2336 : 2208 : Relation rel = scan->indexRelation;
2337 : 2208 : TupleDesc tupdesc = RelationGetDescr(rel);
2338 : 2208 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2339 [ + - ]: 2208 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2340 : : bool continuescan;
2341 : 2208 : ScanDirection flipped = -dir;
2342 : 2208 : int ikey = 0;
2343 : :
2344 [ - + ]: 2208 : Assert(so->numArrayKeys);
2345 : :
207 2346 : 2208 : _bt_check_compare(scan, flipped, finaltup, nfinaltupatts, tupdesc, false,
2347 : : false, &continuescan,
2348 : : &ikey);
2349 : :
377 2350 [ + - + + ]: 2208 : if (!continuescan && so->keyData[ikey].sk_strategy != BTEqualStrategyNumber)
377 pg@bowt.ie 2351 :GBC 1 : return false;
2352 : :
377 pg@bowt.ie 2353 :CBC 2207 : return true;
2354 : : }
2355 : :
2356 : : /*
2357 : : * Determines an offset to the first scan key (an so->keyData[]-wise offset)
2358 : : * that is _not_ guaranteed to be satisfied by every tuple from pstate.page,
2359 : : * which is set in pstate.startikey for _bt_checkkeys calls for the page.
2360 : : * This allows caller to save cycles on comparisons of a prefix of keys while
2361 : : * reading pstate.page.
2362 : : *
2363 : : * Also determines if later calls to _bt_checkkeys (for pstate.page) should be
2364 : : * forced to treat all required scan keys >= pstate.startikey as nonrequired
2365 : : * (that is, if they're to be treated as if any SK_BT_REQFWD/SK_BT_REQBKWD
2366 : : * markings that were set by preprocessing were not set at all, for the
2367 : : * duration of _bt_checkkeys calls prior to the call for pstate.finaltup).
2368 : : * This is indicated to caller by setting pstate.forcenonrequired.
2369 : : *
2370 : : * Call here at the start of reading a leaf page beyond the first one for the
2371 : : * primitive index scan. We consider all non-pivot tuples, so it doesn't make
2372 : : * sense to call here when only a subset of those tuples can ever be read.
2373 : : * This is also a good idea on performance grounds; not calling here when on
2374 : : * the first page (first for the current primitive scan) avoids wasting cycles
2375 : : * during selective point queries. They typically don't stand to gain as much
2376 : : * when we can set pstate.startikey, and are likely to notice the overhead of
2377 : : * calling here. (Also, allowing pstate.forcenonrequired to be set on a
2378 : : * primscan's first page would mislead _bt_advance_array_keys, which expects
2379 : : * pstate.nskipadvances to be representative of every first page's key space.)
2380 : : *
2381 : : * Caller must call _bt_start_array_keys and reset startikey/forcenonrequired
2382 : : * ahead of the finaltup _bt_checkkeys call when we set forcenonrequired=true.
2383 : : * This will give _bt_checkkeys the opportunity to call _bt_advance_array_keys
2384 : : * with sktrig_required=true, restoring the invariant that the scan's required
2385 : : * arrays always track the scan's progress through the index's key space.
2386 : : * Caller won't need to do this on the rightmost/leftmost page in the index
2387 : : * (where pstate.finaltup isn't ever set), since forcenonrequired will never
2388 : : * be set here in the first place.
2389 : : */
2390 : : void
207 2391 : 15250 : _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
2392 : : {
2393 : 15250 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2394 : 15250 : Relation rel = scan->indexRelation;
2395 : 15250 : TupleDesc tupdesc = RelationGetDescr(rel);
2396 : : ItemId iid;
2397 : : IndexTuple firsttup,
2398 : : lasttup;
2399 : 15250 : int startikey = 0,
2400 : 15250 : arrayidx = 0,
2401 : : firstchangingattnum;
2402 : 15250 : bool start_past_saop_eq = false;
2403 : :
2404 [ - + ]: 15250 : Assert(!so->scanBehind);
2405 [ - + ]: 15250 : Assert(pstate->minoff < pstate->maxoff);
2406 [ - + ]: 15250 : Assert(!pstate->firstpage);
2407 [ - + ]: 15250 : Assert(pstate->startikey == 0);
2408 [ + + + + : 15250 : Assert(!so->numArrayKeys || pstate->finaltup ||
- + - - ]
2409 : : P_RIGHTMOST(BTPageGetOpaque(pstate->page)) ||
2410 : : P_LEFTMOST(BTPageGetOpaque(pstate->page)));
2411 : :
2412 [ + + ]: 15250 : if (so->numberOfKeys == 0)
2413 : 6557 : return;
2414 : :
2415 : : /* minoff is an offset to the lowest non-pivot tuple on the page */
2416 : 8693 : iid = PageGetItemId(pstate->page, pstate->minoff);
2417 : 8693 : firsttup = (IndexTuple) PageGetItem(pstate->page, iid);
2418 : :
2419 : : /* maxoff is an offset to the highest non-pivot tuple on the page */
2420 : 8693 : iid = PageGetItemId(pstate->page, pstate->maxoff);
2421 : 8693 : lasttup = (IndexTuple) PageGetItem(pstate->page, iid);
2422 : :
2423 : : /* Determine the first attribute whose values change on caller's page */
2424 : 8693 : firstchangingattnum = _bt_keep_natts_fast(rel, firsttup, lasttup);
2425 : :
2426 [ + + ]: 13121 : for (; startikey < so->numberOfKeys; startikey++)
2427 : : {
2428 : 10107 : ScanKey key = so->keyData + startikey;
2429 : : BTArrayKeyInfo *array;
2430 : : Datum firstdatum,
2431 : : lastdatum;
2432 : : bool firstnull,
2433 : : lastnull;
2434 : : int32 result;
2435 : :
2436 : : /*
2437 : : * Determine if it's safe to set pstate.startikey to an offset to a
2438 : : * key that comes after this key, by examining this key
2439 : : */
2440 [ - + ]: 10107 : if (key->sk_flags & SK_ROW_HEADER)
2441 : : {
2442 : : /* RowCompare inequality (header key) */
43 pg@bowt.ie 2443 :UNC 0 : ScanKey subkey = (ScanKey) DatumGetPointer(key->sk_argument);
2444 : 0 : bool satisfied = false;
2445 : :
2446 : : for (;;)
2447 : 0 : {
2448 : : int cmpresult;
2449 : 0 : bool firstsatisfies = false;
2450 : :
2451 [ # # ]: 0 : if (subkey->sk_attno > firstchangingattnum) /* >, not >= */
2452 : 0 : break; /* unsafe, preceding attr has multiple
2453 : : * distinct values */
2454 : :
2455 [ # # ]: 0 : if (subkey->sk_flags & SK_ISNULL)
2456 : 0 : break; /* unsafe, unsatisfiable NULL subkey arg */
2457 : :
2458 : 0 : firstdatum = index_getattr(firsttup, subkey->sk_attno,
2459 : : tupdesc, &firstnull);
2460 : 0 : lastdatum = index_getattr(lasttup, subkey->sk_attno,
2461 : : tupdesc, &lastnull);
2462 : :
2463 [ # # # # ]: 0 : if (firstnull || lastnull)
2464 : : break; /* unsafe, NULL value won't satisfy subkey */
2465 : :
2466 : : /*
2467 : : * Compare the first tuple's datum for this row compare member
2468 : : */
2469 : 0 : cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2470 : : subkey->sk_collation,
2471 : : firstdatum,
2472 : : subkey->sk_argument));
2473 [ # # ]: 0 : if (subkey->sk_flags & SK_BT_DESC)
2474 [ # # ]: 0 : INVERT_COMPARE_RESULT(cmpresult);
2475 : :
2476 [ # # # # ]: 0 : if (cmpresult != 0 || (subkey->sk_flags & SK_ROW_END))
2477 : : {
2478 : 0 : firstsatisfies = _bt_rowcompare_cmpresult(subkey,
2479 : : cmpresult);
2480 [ # # ]: 0 : if (!firstsatisfies)
2481 : : {
2482 : : /* Unsafe, firstdatum does not satisfy subkey */
2483 : 0 : break;
2484 : : }
2485 : : }
2486 : :
2487 : : /*
2488 : : * Compare the last tuple's datum for this row compare member
2489 : : */
2490 : 0 : cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2491 : : subkey->sk_collation,
2492 : : lastdatum,
2493 : : subkey->sk_argument));
2494 [ # # ]: 0 : if (subkey->sk_flags & SK_BT_DESC)
2495 [ # # ]: 0 : INVERT_COMPARE_RESULT(cmpresult);
2496 : :
2497 [ # # # # ]: 0 : if (cmpresult != 0 || (subkey->sk_flags & SK_ROW_END))
2498 : : {
2499 [ # # ]: 0 : if (!firstsatisfies)
2500 : : {
2501 : : /*
2502 : : * It's only safe to set startikey beyond the row
2503 : : * compare header key when both firsttup and lasttup
2504 : : * satisfy the key as a whole based on the same
2505 : : * deciding subkey/attribute. That can't happen now.
2506 : : */
2507 : 0 : break; /* unsafe */
2508 : : }
2509 : :
2510 : 0 : satisfied = _bt_rowcompare_cmpresult(subkey, cmpresult);
2511 : 0 : break; /* safe iff 'satisfied' is true */
2512 : : }
2513 : :
2514 : : /* Move on to next row member/subkey */
2515 [ # # ]: 0 : if (subkey->sk_flags & SK_ROW_END)
2516 : 0 : break; /* defensive */
2517 : 0 : subkey++;
2518 : :
2519 : : /*
2520 : : * We deliberately don't check if the next subkey has the same
2521 : : * strategy as this iteration's subkey (which happens when
2522 : : * subkeys for both ASC and DESC columns are used together),
2523 : : * nor if any subkey is marked required. This is safe because
2524 : : * in general all prior index attributes must have only one
2525 : : * distinct value (across all of the tuples on the page) in
2526 : : * order for us to even consider any subkey's attribute.
2527 : : */
2528 : : }
2529 : :
2530 [ # # ]: 0 : if (satisfied)
2531 : : {
2532 : : /* Safe, row compare satisfied by every tuple on page */
43 pg@bowt.ie 2533 :GNC 4305 : continue;
2534 : : }
2535 : :
2536 : 5679 : break; /* unsafe */
2537 : : }
207 pg@bowt.ie 2538 [ + + ]:CBC 10107 : if (key->sk_strategy != BTEqualStrategyNumber)
2539 : : {
2540 : : /*
2541 : : * Scalar inequality key.
2542 : : *
2543 : : * It's definitely safe for _bt_checkkeys to avoid assessing this
2544 : : * inequality when the page's first and last non-pivot tuples both
2545 : : * satisfy the inequality (since the same must also be true of all
2546 : : * the tuples in between these two).
2547 : : *
2548 : : * Unlike the "=" case, it doesn't matter if this attribute has
2549 : : * more than one distinct value (though it _is_ necessary for any
2550 : : * and all _prior_ attributes to contain no more than one distinct
2551 : : * value amongst all of the tuples from pstate.page).
2552 : : */
2553 [ + + ]: 2175 : if (key->sk_attno > firstchangingattnum) /* >, not >= */
2554 : 179 : break; /* unsafe, preceding attr has multiple
2555 : : * distinct values */
2556 : :
2557 : 1996 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2558 : 1996 : lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2559 : :
2560 [ + + ]: 1996 : if (key->sk_flags & SK_ISNULL)
2561 : : {
2562 : : /* IS NOT NULL key */
2563 [ - + ]: 35 : Assert(key->sk_flags & SK_SEARCHNOTNULL);
2564 : :
2565 [ + - + - ]: 35 : if (firstnull || lastnull)
2566 : : break; /* unsafe */
2567 : :
2568 : : /* Safe, IS NOT NULL key satisfied by every tuple */
2569 : 35 : continue;
2570 : : }
2571 : :
2572 : : /* Test firsttup */
2573 [ + - ]: 1961 : if (firstnull ||
2574 [ + - ]: 1961 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2575 : : key->sk_collation, firstdatum,
2576 : : key->sk_argument)))
2577 : : break; /* unsafe */
2578 : :
2579 : : /* Test lasttup */
2580 [ + - ]: 1961 : if (lastnull ||
2581 [ + + ]: 1961 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2582 : : key->sk_collation, lastdatum,
2583 : : key->sk_argument)))
2584 : : break; /* unsafe */
2585 : :
2586 : : /* Safe, scalar inequality satisfied by every tuple */
2587 : 1919 : continue;
2588 : : }
2589 : :
2590 : : /* Some = key (could be a scalar = key, could be an array = key) */
2591 [ - + ]: 7932 : Assert(key->sk_strategy == BTEqualStrategyNumber);
2592 : :
2593 [ + + ]: 7932 : if (!(key->sk_flags & SK_SEARCHARRAY))
2594 : : {
2595 : : /*
2596 : : * Scalar = key (possibly an IS NULL key).
2597 : : *
2598 : : * It is unsafe to set pstate.startikey to an ikey beyond this
2599 : : * key, unless the = key is satisfied by every possible tuple on
2600 : : * the page (possible only when attribute has just one distinct
2601 : : * value among all tuples on the page).
2602 : : */
2603 [ + + ]: 6187 : if (key->sk_attno >= firstchangingattnum)
2604 : 5025 : break; /* unsafe, multiple distinct attr values */
2605 : :
2606 : 1162 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2607 : : &firstnull);
2608 [ - + ]: 1162 : if (key->sk_flags & SK_ISNULL)
2609 : : {
2610 : : /* IS NULL key */
207 pg@bowt.ie 2611 [ # # ]:UBC 0 : Assert(key->sk_flags & SK_SEARCHNULL);
2612 : :
2613 [ # # ]: 0 : if (!firstnull)
2614 : 0 : break; /* unsafe */
2615 : :
2616 : : /* Safe, IS NULL key satisfied by every tuple */
2617 : 0 : continue;
2618 : : }
207 pg@bowt.ie 2619 [ + - ]:CBC 1162 : if (firstnull ||
2620 [ + - ]: 1162 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2621 : : key->sk_collation, firstdatum,
2622 : : key->sk_argument)))
2623 : : break; /* unsafe */
2624 : :
2625 : : /* Safe, scalar = key satisfied by every tuple */
2626 : 1162 : continue;
2627 : : }
2628 : :
2629 : : /* = array key (could be a SAOP array, could be a skip array) */
2630 : 1745 : array = &so->arrayKeys[arrayidx++];
2631 [ - + ]: 1745 : Assert(array->scan_key == startikey);
2632 [ + + ]: 1745 : if (array->num_elems != -1)
2633 : : {
2634 : : /*
2635 : : * SAOP array = key.
2636 : : *
2637 : : * Handle this like we handle scalar = keys (though binary search
2638 : : * for a matching element, to avoid relying on key's sk_argument).
2639 : : */
2640 [ + - ]: 403 : if (key->sk_attno >= firstchangingattnum)
2641 : 403 : break; /* unsafe, multiple distinct attr values */
2642 : :
207 pg@bowt.ie 2643 :UBC 0 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2644 : : &firstnull);
2645 : 0 : _bt_binsrch_array_skey(&so->orderProcs[startikey],
2646 : : false, NoMovementScanDirection,
2647 : : firstdatum, firstnull, array, key,
2648 : : &result);
2649 [ # # ]: 0 : if (result != 0)
2650 : 0 : break; /* unsafe */
2651 : :
2652 : : /* Safe, SAOP = key satisfied by every tuple */
2653 : 0 : start_past_saop_eq = true;
2654 : 0 : continue;
2655 : : }
2656 : :
2657 : : /*
2658 : : * Skip array = key
2659 : : */
207 pg@bowt.ie 2660 [ - + ]:CBC 1342 : Assert(key->sk_flags & SK_BT_SKIP);
2661 [ + + ]: 1342 : if (array->null_elem)
2662 : : {
2663 : : /*
2664 : : * Non-range skip array = key.
2665 : : *
2666 : : * Safe, non-range skip array "satisfied" by every tuple on page
2667 : : * (safe even when "key->sk_attno > firstchangingattnum").
2668 : : */
2669 : 1189 : continue;
2670 : : }
2671 : :
2672 : : /*
2673 : : * Range skip array = key.
2674 : : *
2675 : : * Handle this like we handle scalar inequality keys (but avoid using
2676 : : * key's sk_argument directly, as in the SAOP array case).
2677 : : */
2678 [ + + ]: 153 : if (key->sk_attno > firstchangingattnum) /* >, not >= */
2679 : 24 : break; /* unsafe, preceding attr has multiple
2680 : : * distinct values */
2681 : :
2682 : 129 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2683 : 129 : lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2684 : :
2685 : : /* Test firsttup */
2686 : 129 : _bt_binsrch_skiparray_skey(false, ForwardScanDirection,
2687 : : firstdatum, firstnull, array, key,
2688 : : &result);
2689 [ - + ]: 129 : if (result != 0)
207 pg@bowt.ie 2690 :UBC 0 : break; /* unsafe */
2691 : :
2692 : : /* Test lasttup */
207 pg@bowt.ie 2693 :CBC 129 : _bt_binsrch_skiparray_skey(false, ForwardScanDirection,
2694 : : lastdatum, lastnull, array, key,
2695 : : &result);
2696 [ + + ]: 129 : if (result != 0)
2697 : 6 : break; /* unsafe */
2698 : :
2699 : : /* Safe, range skip array satisfied by every tuple on page */
2700 : : }
2701 : :
2702 : : /*
2703 : : * Use of forcenonrequired is typically undesirable, since it'll force
2704 : : * _bt_readpage caller to read every tuple on the page -- even though, in
2705 : : * general, it might well be possible to end the scan on an earlier tuple.
2706 : : * However, caller must use forcenonrequired when start_past_saop_eq=true,
2707 : : * since the usual required array behavior might fail to roll over to the
2708 : : * SAOP array.
2709 : : *
2710 : : * We always prefer forcenonrequired=true during scans with skip arrays
2711 : : * (except on the first page of each primitive index scan), though -- even
2712 : : * when "startikey == 0". That way, _bt_advance_array_keys's low-order
2713 : : * key precheck optimization can always be used (unless on the first page
2714 : : * of the scan). It seems slightly preferable to check more tuples when
2715 : : * that allows us to do significantly less skip array maintenance.
2716 : : */
2717 [ + - + + ]: 8693 : pstate->forcenonrequired = (start_past_saop_eq || so->skipScan);
2718 : 8693 : pstate->startikey = startikey;
2719 : :
2720 : : /*
2721 : : * _bt_readpage caller is required to call _bt_checkkeys against page's
2722 : : * finaltup with forcenonrequired=false whenever we initially set
2723 : : * forcenonrequired=true. That way the scan's arrays will reliably track
2724 : : * its progress through the index's key space.
2725 : : *
2726 : : * We don't expect this when _bt_readpage caller has no finaltup due to
2727 : : * its page being the rightmost (or the leftmost, during backwards scans).
2728 : : * When we see that _bt_readpage has no finaltup, back out of everything.
2729 : : */
2730 [ + + - + ]: 8693 : Assert(!pstate->forcenonrequired || so->numArrayKeys);
2731 [ + + + + ]: 8693 : if (pstate->forcenonrequired && !pstate->finaltup)
2732 : : {
2733 : 235 : pstate->forcenonrequired = false;
2734 : 235 : pstate->startikey = 0;
2735 : : }
2736 : : }
2737 : :
2738 : : /*
2739 : : * Test whether an indextuple satisfies current scan condition.
2740 : : *
2741 : : * Return true if so, false if not. If not, also sets *continuescan to false
2742 : : * when it's also not possible for any later tuples to pass the current qual
2743 : : * (with the scan's current set of array keys, in the current scan direction),
2744 : : * in addition to setting *ikey to the so->keyData[] subscript/offset for the
2745 : : * unsatisfied scan key (needed when caller must consider advancing the scan's
2746 : : * array keys).
2747 : : *
2748 : : * This is a subroutine for _bt_checkkeys. We provisionally assume that
2749 : : * reaching the end of the current set of required keys (in particular the
2750 : : * current required array keys) ends the ongoing (primitive) index scan.
2751 : : * Callers without array keys should just end the scan right away when they
2752 : : * find that continuescan has been set to false here by us. Things are more
2753 : : * complicated for callers with array keys.
2754 : : *
2755 : : * Callers with array keys must first consider advancing the arrays when
2756 : : * continuescan has been set to false here by us. They must then consider if
2757 : : * it really does make sense to end the current (primitive) index scan, in
2758 : : * light of everything that is known at that point. (In general when we set
2759 : : * continuescan=false for these callers it must be treated as provisional.)
2760 : : *
2761 : : * We deal with advancing unsatisfied non-required arrays directly, though.
2762 : : * This is safe, since by definition non-required keys can't end the scan.
2763 : : * This is just how we determine if non-required arrays are just unsatisfied
2764 : : * by the current array key, or if they're truly unsatisfied (that is, if
2765 : : * they're unsatisfied by every possible array key).
2766 : : *
2767 : : * Pass advancenonrequired=false to avoid all array related side effects.
2768 : : * This allows _bt_advance_array_keys caller to avoid infinite recursion.
2769 : : *
2770 : : * Pass forcenonrequired=true to instruct us to treat all keys as nonrequired.
2771 : : * This is used to make it safe to temporarily stop properly maintaining the
2772 : : * scan's required arrays. _bt_checkkeys caller (_bt_readpage, actually)
2773 : : * determines a prefix of keys that must satisfy every possible corresponding
2774 : : * index attribute value from its page, which is passed to us via *ikey arg
2775 : : * (this is the first key that might be unsatisfied by tuples on the page).
2776 : : * Obviously, we won't maintain any array keys from before *ikey, so it's
2777 : : * quite possible for such arrays to "fall behind" the index's keyspace.
2778 : : * Caller will need to "catch up" by passing forcenonrequired=true (alongside
2779 : : * an *ikey=0) once the page's finaltup is reached.
2780 : : *
2781 : : * Note: it's safe to pass an *ikey > 0 with forcenonrequired=false, but only
2782 : : * when caller determines that it won't affect array maintenance.
2783 : : */
2784 : : static bool
570 2785 : 29123974 : _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
2786 : : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
2787 : : bool advancenonrequired, bool forcenonrequired,
2788 : : bool *continuescan, int *ikey)
2789 : : {
2790 : 29123974 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2791 : :
2792 : 29123974 : *continuescan = true; /* default assumption */
2793 : :
2794 [ + + ]: 55940823 : for (; *ikey < so->numberOfKeys; (*ikey)++)
2795 : : {
2796 : 32337787 : ScanKey key = so->keyData + *ikey;
2797 : : Datum datum;
2798 : : bool isNull;
753 akorotkov@postgresql 2799 : 32337787 : bool requiredSameDir = false,
570 pg@bowt.ie 2800 : 32337787 : requiredOppositeDirOnly = false;
2801 : :
2802 : : /*
2803 : : * Check if the key is required in the current scan direction, in the
2804 : : * opposite scan direction _only_, or in neither direction (except
2805 : : * when we're forced to treat all scan keys as nonrequired)
2806 : : */
207 2807 [ + + ]: 32337787 : if (forcenonrequired)
2808 : : {
2809 : : /* treating scan's keys as non-required */
2810 : : }
2811 [ + + + + ]: 32135958 : else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
2812 [ + + + + ]: 7471468 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
753 akorotkov@postgresql 2813 : 24676861 : requiredSameDir = true;
2814 [ + + - + ]: 7459097 : else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
2815 [ + - + - ]: 2916686 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
570 pg@bowt.ie 2816 : 7459097 : requiredOppositeDirOnly = true;
2817 : :
2411 2818 [ + + ]: 32337787 : if (key->sk_attno > tupnatts)
2819 : : {
2820 : : /*
2821 : : * This attribute is truncated (must be high key). The value for
2822 : : * this attribute in the first non-pivot tuple on the page to the
2823 : : * right could be any possible value. Assume that truncated
2824 : : * attribute passes the qual.
2825 : : */
2071 2826 [ - + ]: 1158 : Assert(BTreeTupleIsPivot(tuple));
2411 2827 : 9735573 : continue;
2828 : : }
2829 : :
2830 : : /*
2831 : : * A skip array scan key uses one of several sentinel values. We just
2832 : : * fall back on _bt_tuple_before_array_skeys when we see such a value.
2833 : : */
207 2834 [ + + ]: 32336629 : if (key->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL |
2835 : : SK_BT_NEXT | SK_BT_PRIOR))
2836 : : {
2837 [ - + ]: 17685 : Assert(key->sk_flags & SK_SEARCHARRAY);
2838 [ - + ]: 17685 : Assert(key->sk_flags & SK_BT_SKIP);
2839 [ + + - + ]: 17685 : Assert(requiredSameDir || forcenonrequired);
2840 : :
2841 : : /*
2842 : : * Cannot fall back on _bt_tuple_before_array_skeys when we're
2843 : : * treating the scan's keys as nonrequired, though. Just handle
2844 : : * this like any other non-required equality-type array key.
2845 : : */
2846 [ + + ]: 17685 : if (forcenonrequired)
2847 : 5520938 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2848 : : tupdesc, *ikey, false);
2849 : :
2850 : 16671 : *continuescan = false;
2851 : 16671 : return false;
2852 : : }
2853 : :
2854 : : /* row-comparison keys need special processing */
7216 tgl@sss.pgh.pa.us 2855 [ + + ]: 32318944 : if (key->sk_flags & SK_ROW_HEADER)
2856 : : {
2411 pg@bowt.ie 2857 [ + + ]: 1227 : if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
2858 : : forcenonrequired, continuescan))
7216 tgl@sss.pgh.pa.us 2859 : 1194 : continue;
2411 pg@bowt.ie 2860 : 33 : return false;
2861 : : }
2862 : :
10278 bruce@momjian.us 2863 : 32317717 : datum = index_getattr(tuple,
9226 tgl@sss.pgh.pa.us 2864 : 32317717 : key->sk_attno,
2865 : : tupdesc,
2866 : : &isNull);
2867 : :
2868 [ + + ]: 32317717 : if (key->sk_flags & SK_ISNULL)
2869 : : {
2870 : : /* Handle IS NULL/NOT NULL tests */
5779 2871 [ + + ]: 9742107 : if (key->sk_flags & SK_SEARCHNULL)
2872 : : {
2873 [ + + ]: 9064 : if (isNull)
5723 bruce@momjian.us 2874 : 214 : continue; /* tuple satisfies this qual */
2875 : : }
2876 : : else
2877 : : {
5779 tgl@sss.pgh.pa.us 2878 [ - + ]: 9733043 : Assert(key->sk_flags & SK_SEARCHNOTNULL);
207 pg@bowt.ie 2879 [ - + ]: 9733043 : Assert(!(key->sk_flags & SK_BT_SKIP));
5779 tgl@sss.pgh.pa.us 2880 [ + + ]: 9733043 : if (!isNull)
5723 bruce@momjian.us 2881 : 9733007 : continue; /* tuple satisfies this qual */
2882 : : }
2883 : :
2884 : : /*
2885 : : * Tuple fails this qual. If it's a required qual for the current
2886 : : * scan direction, then we can conclude no further tuples will
2887 : : * pass, either.
2888 : : */
753 akorotkov@postgresql 2889 [ + + ]: 8886 : if (requiredSameDir)
6780 tgl@sss.pgh.pa.us 2890 : 102 : *continuescan = false;
207 pg@bowt.ie 2891 [ - + ]: 8784 : else if (unlikely(key->sk_flags & SK_BT_SKIP))
2892 : : {
2893 : : /*
2894 : : * If we're treating scan keys as nonrequired, and encounter a
2895 : : * skip array scan key whose current element is NULL, then it
2896 : : * must be a non-range skip array. It must be satisfied, so
2897 : : * there's no need to call _bt_advance_array_keys to check.
2898 : : */
207 pg@bowt.ie 2899 [ # # # # ]:UBC 0 : Assert(forcenonrequired && *ikey > 0);
2900 : 0 : continue;
2901 : : }
2902 : :
2903 : : /*
2904 : : * This indextuple doesn't match the qual.
2905 : : */
2411 pg@bowt.ie 2906 :CBC 8886 : return false;
2907 : : }
2908 : :
9226 tgl@sss.pgh.pa.us 2909 [ + + ]: 22575610 : if (isNull)
2910 : : {
2911 : : /*
2912 : : * Scalar scan key isn't satisfied by NULL tuple value.
2913 : : *
2914 : : * If we're treating scan keys as nonrequired, and key is for a
2915 : : * skip array, then we must attempt to advance the array to NULL
2916 : : * (if we're successful then the tuple might match the qual).
2917 : : */
183 pg@bowt.ie 2918 [ - + - - : 114 : if (unlikely(forcenonrequired && key->sk_flags & SK_BT_SKIP))
- + ]
183 pg@bowt.ie 2919 :UBC 0 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2920 : : tupdesc, *ikey, false);
2921 : :
5109 tgl@sss.pgh.pa.us 2922 [ - + ]:CBC 114 : if (key->sk_flags & SK_BT_NULLS_FIRST)
2923 : : {
2924 : : /*
2925 : : * Since NULLs are sorted before non-NULLs, we know we have
2926 : : * reached the lower limit of the range of values for this
2927 : : * index attr. On a backward scan, we can stop if this qual
2928 : : * is one of the "must match" subset. We can stop regardless
2929 : : * of whether the qual is > or <, so long as it's required,
2930 : : * because it's not possible for any future tuples to pass. On
2931 : : * a forward scan, however, we must keep going, because we may
2932 : : * have initially positioned to the start of the index.
2933 : : * (_bt_advance_array_keys also relies on this behavior during
2934 : : * forward scans.)
2935 : : */
207 pg@bowt.ie 2936 [ # # # # :UBC 0 : if ((requiredSameDir || requiredOppositeDirOnly) &&
# # ]
2937 : : ScanDirectionIsBackward(dir))
5109 tgl@sss.pgh.pa.us 2938 : 0 : *continuescan = false;
2939 : : }
2940 : : else
2941 : : {
2942 : : /*
2943 : : * Since NULLs are sorted after non-NULLs, we know we have
2944 : : * reached the upper limit of the range of values for this
2945 : : * index attr. On a forward scan, we can stop if this qual is
2946 : : * one of the "must match" subset. We can stop regardless of
2947 : : * whether the qual is > or <, so long as it's required,
2948 : : * because it's not possible for any future tuples to pass. On
2949 : : * a backward scan, however, we must keep going, because we
2950 : : * may have initially positioned to the end of the index.
2951 : : * (_bt_advance_array_keys also relies on this behavior during
2952 : : * backward scans.)
2953 : : */
207 pg@bowt.ie 2954 [ + + + - :CBC 114 : if ((requiredSameDir || requiredOppositeDirOnly) &&
+ + ]
2955 : : ScanDirectionIsForward(dir))
5109 tgl@sss.pgh.pa.us 2956 : 111 : *continuescan = false;
2957 : : }
2958 : :
2959 : : /*
2960 : : * This indextuple doesn't match the qual.
2961 : : */
2411 pg@bowt.ie 2962 : 114 : return false;
2963 : : }
2964 : :
207 2965 [ + + ]: 22575496 : if (!DatumGetBool(FunctionCall2Coll(&key->sk_func, key->sk_collation,
2966 : : datum, key->sk_argument)))
2967 : : {
2968 : : /*
2969 : : * Tuple fails this qual. If it's a required qual for the current
2970 : : * scan direction, then we can conclude no further tuples will
2971 : : * pass, either.
2972 : : *
2973 : : * Note: because we stop the scan as soon as any required equality
2974 : : * qual fails, it is critical that equality quals be used for the
2975 : : * initial positioning in _bt_first() when they are available. See
2976 : : * comments in _bt_first().
2977 : : */
753 akorotkov@postgresql 2978 [ + + ]: 5494220 : if (requiredSameDir)
7218 tgl@sss.pgh.pa.us 2979 : 5317203 : *continuescan = false;
2980 : :
2981 : : /*
2982 : : * If this is a non-required equality-type array key, the tuple
2983 : : * needs to be checked against every possible array key. Handle
2984 : : * this by "advancing" the scan key's array to a matching value
2985 : : * (if we're successful then the tuple might match the qual).
2986 : : */
570 pg@bowt.ie 2987 [ + + ]: 177017 : else if (advancenonrequired &&
2988 [ + + ]: 173296 : key->sk_strategy == BTEqualStrategyNumber &&
2989 [ + + ]: 135394 : (key->sk_flags & SK_SEARCHARRAY))
2990 : 3249 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2991 : : tupdesc, *ikey, false);
2992 : :
2993 : : /*
2994 : : * This indextuple doesn't match the qual.
2995 : : */
2411 2996 : 5490971 : return false;
2997 : : }
2998 : : }
2999 : :
3000 : : /* If we get here, the tuple passes all index quals. */
3001 : 23603036 : return true;
3002 : : }
3003 : :
3004 : : /*
3005 : : * Call here when a row compare member returns a non-zero result, or with the
3006 : : * result for the final ROW_END row compare member (no matter the cmpresult).
3007 : : *
3008 : : * cmpresult indicates the overall result of the row comparison (must already
3009 : : * be commuted for DESC subkeys), and subkey is the deciding row member.
3010 : : */
3011 : : static bool
43 pg@bowt.ie 3012 :GNC 1194 : _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult)
3013 : : {
3014 : : bool satisfied;
3015 : :
3016 [ + + + + : 1194 : switch (subkey->sk_strategy)
- ]
3017 : : {
3018 : 93 : case BTLessStrategyNumber:
3019 : 93 : satisfied = (cmpresult < 0);
3020 : 93 : break;
3021 : 792 : case BTLessEqualStrategyNumber:
3022 : 792 : satisfied = (cmpresult <= 0);
3023 : 792 : break;
3024 : 123 : case BTGreaterEqualStrategyNumber:
3025 : 123 : satisfied = (cmpresult >= 0);
3026 : 123 : break;
3027 : 186 : case BTGreaterStrategyNumber:
3028 : 186 : satisfied = (cmpresult > 0);
3029 : 186 : break;
43 pg@bowt.ie 3030 :UNC 0 : default:
3031 : : /* EQ and NE cases aren't allowed here */
3032 [ # # ]: 0 : elog(ERROR, "unexpected strategy number %d", subkey->sk_strategy);
3033 : : satisfied = false; /* keep compiler quiet */
3034 : : break;
3035 : : }
3036 : :
43 pg@bowt.ie 3037 :GNC 1194 : return satisfied;
3038 : : }
3039 : :
3040 : : /*
3041 : : * Test whether an indextuple satisfies a row-comparison scan condition.
3042 : : *
3043 : : * Return true if so, false if not. If not, also clear *continuescan if
3044 : : * it's not possible for any future tuples in the current scan direction
3045 : : * to pass the qual.
3046 : : *
3047 : : * This is a subroutine for _bt_checkkeys/_bt_check_compare.
3048 : : */
3049 : : static bool
2411 pg@bowt.ie 3050 :CBC 1227 : _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
3051 : : TupleDesc tupdesc, ScanDirection dir,
3052 : : bool forcenonrequired, bool *continuescan)
3053 : : {
7216 tgl@sss.pgh.pa.us 3054 : 1227 : ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
3055 : 1227 : int32 cmpresult = 0;
3056 : : bool result;
3057 : :
3058 : : /* First subkey should be same as the header says */
3059 [ + - ]: 1227 : Assert(subkey->sk_attno == skey->sk_attno);
3060 : :
3061 : : /* Loop over columns of the row condition */
3062 : : for (;;)
3063 : 120 : {
3064 : : Datum datum;
3065 : : bool isNull;
3066 : :
3067 [ - + ]: 1347 : Assert(subkey->sk_flags & SK_ROW_MEMBER);
3068 : :
3069 : : /* When a NULL row member is compared, the row never matches */
118 pg@bowt.ie 3070 [ + + ]: 1347 : if (subkey->sk_flags & SK_ISNULL)
3071 : : {
3072 : : /*
3073 : : * Unlike the simple-scankey case, this isn't a disallowed case
3074 : : * (except when it's the first row element that has the NULL arg).
3075 : : * But it can never match. If all the earlier row comparison
3076 : : * columns are required for the scan direction, we can stop the
3077 : : * scan, because there can't be another tuple that will succeed.
3078 : : */
3079 [ - + ]: 6 : Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
3080 : 6 : subkey--;
3081 [ + - ]: 6 : if (forcenonrequired)
3082 : : {
3083 : : /* treating scan's keys as non-required */
3084 : : }
3085 [ + + + - ]: 6 : else if ((subkey->sk_flags & SK_BT_REQFWD) &&
3086 : : ScanDirectionIsForward(dir))
3087 : 3 : *continuescan = false;
3088 [ + - + - ]: 3 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
3089 : : ScanDirectionIsBackward(dir))
3090 : 3 : *continuescan = false;
3091 : 33 : return false;
3092 : : }
3093 : :
2411 3094 [ + + ]: 1341 : if (subkey->sk_attno > tupnatts)
3095 : : {
3096 : : /*
3097 : : * This attribute is truncated (must be high key). The value for
3098 : : * this attribute in the first non-pivot tuple on the page to the
3099 : : * right could be any possible value. Assume that truncated
3100 : : * attribute passes the qual.
3101 : : */
2071 3102 [ - + ]: 3 : Assert(BTreeTupleIsPivot(tuple));
118 3103 : 3 : return true;
3104 : : }
3105 : :
7216 tgl@sss.pgh.pa.us 3106 : 1338 : datum = index_getattr(tuple,
3107 : 1338 : subkey->sk_attno,
3108 : : tupdesc,
3109 : : &isNull);
3110 : :
3111 [ + + ]: 1338 : if (isNull)
3112 : : {
3113 : : int reqflags;
3114 : :
139 pg@bowt.ie 3115 [ + - ]: 24 : if (forcenonrequired)
3116 : : {
3117 : : /* treating scan's keys as non-required */
3118 : : }
3119 [ - + ]: 24 : else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
3120 : : {
3121 : : /*
3122 : : * Since NULLs are sorted before non-NULLs, we know we have
3123 : : * reached the lower limit of the range of values for this
3124 : : * index attr. On a backward scan, we can stop if this qual
3125 : : * is one of the "must match" subset. However, on a forwards
3126 : : * scan, we must keep going, because we may have initially
3127 : : * positioned to the start of the index.
3128 : : *
3129 : : * All required NULLS FIRST > row members can use NULL tuple
3130 : : * values to end backwards scans, just like with other values.
3131 : : * A qual "WHERE (a, b, c) > (9, 42, 'foo')" can terminate a
3132 : : * backwards scan upon reaching the index's rightmost "a = 9"
3133 : : * tuple whose "b" column contains a NULL (if not sooner).
3134 : : * Since "b" is NULLS FIRST, we can treat its NULLs as "<" 42.
3135 : : */
118 pg@bowt.ie 3136 :UBC 0 : reqflags = SK_BT_REQBKWD;
3137 : :
3138 : : /*
3139 : : * When a most significant required NULLS FIRST < row compare
3140 : : * member sees NULL tuple values during a backwards scan, it
3141 : : * signals the end of matches for the whole row compare/scan.
3142 : : * A qual "WHERE (a, b, c) < (9, 42, 'foo')" will terminate a
3143 : : * backwards scan upon reaching the rightmost tuple whose "a"
3144 : : * column has a NULL. The "a" NULL value is "<" 9, and yet
3145 : : * our < row compare will still end the scan. (This isn't
3146 : : * safe with later/lower-order row members. Notice that it
3147 : : * can only happen with an "a" NULL some time after the scan
3148 : : * completely stops needing to use its "b" and "c" members.)
3149 : : */
3150 [ # # ]: 0 : if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
3151 : 0 : reqflags |= SK_BT_REQFWD; /* safe, first row member */
3152 : :
3153 [ # # # # ]: 0 : if ((subkey->sk_flags & reqflags) &&
3154 : : ScanDirectionIsBackward(dir))
5109 tgl@sss.pgh.pa.us 3155 : 0 : *continuescan = false;
3156 : : }
3157 : : else
3158 : : {
3159 : : /*
3160 : : * Since NULLs are sorted after non-NULLs, we know we have
3161 : : * reached the upper limit of the range of values for this
3162 : : * index attr. On a forward scan, we can stop if this qual is
3163 : : * one of the "must match" subset. However, on a backward
3164 : : * scan, we must keep going, because we may have initially
3165 : : * positioned to the end of the index.
3166 : : *
3167 : : * All required NULLS LAST < row members can use NULL tuple
3168 : : * values to end forwards scans, just like with other values.
3169 : : * A qual "WHERE (a, b, c) < (9, 42, 'foo')" can terminate a
3170 : : * forwards scan upon reaching the index's leftmost "a = 9"
3171 : : * tuple whose "b" column contains a NULL (if not sooner).
3172 : : * Since "b" is NULLS LAST, we can treat its NULLs as ">" 42.
3173 : : */
118 pg@bowt.ie 3174 :CBC 24 : reqflags = SK_BT_REQFWD;
3175 : :
3176 : : /*
3177 : : * When a most significant required NULLS LAST > row compare
3178 : : * member sees NULL tuple values during a forwards scan, it
3179 : : * signals the end of matches for the whole row compare/scan.
3180 : : * A qual "WHERE (a, b, c) > (9, 42, 'foo')" will terminate a
3181 : : * forwards scan upon reaching the leftmost tuple whose "a"
3182 : : * column has a NULL. The "a" NULL value is ">" 9, and yet
3183 : : * our > row compare will end the scan. (This isn't safe with
3184 : : * later/lower-order row members. Notice that it can only
3185 : : * happen with an "a" NULL some time after the scan completely
3186 : : * stops needing to use its "b" and "c" members.)
3187 : : */
3188 [ - + ]: 24 : if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
118 pg@bowt.ie 3189 :UBC 0 : reqflags |= SK_BT_REQBKWD; /* safe, first row member */
3190 : :
118 pg@bowt.ie 3191 [ - + - - ]:CBC 24 : if ((subkey->sk_flags & reqflags) &&
3192 : : ScanDirectionIsForward(dir))
5109 tgl@sss.pgh.pa.us 3193 :UBC 0 : *continuescan = false;
3194 : : }
3195 : :
3196 : : /*
3197 : : * In any case, this indextuple doesn't match the qual.
3198 : : */
7216 tgl@sss.pgh.pa.us 3199 :CBC 24 : return false;
3200 : : }
3201 : :
3202 : : /* Perform the test --- three-way comparison not bool operator */
5313 3203 : 1314 : cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
3204 : : subkey->sk_collation,
3205 : : datum,
3206 : : subkey->sk_argument));
3207 : :
6867 3208 [ - + ]: 1314 : if (subkey->sk_flags & SK_BT_DESC)
2580 tgl@sss.pgh.pa.us 3209 [ # # ]:UBC 0 : INVERT_COMPARE_RESULT(cmpresult);
3210 : :
3211 : : /* Done comparing if unequal, else advance to next column */
7216 tgl@sss.pgh.pa.us 3212 [ + + ]:CBC 1314 : if (cmpresult != 0)
3213 : 1194 : break;
3214 : :
3215 [ - + ]: 120 : if (subkey->sk_flags & SK_ROW_END)
7216 tgl@sss.pgh.pa.us 3216 :UBC 0 : break;
7216 tgl@sss.pgh.pa.us 3217 :CBC 120 : subkey++;
3218 : : }
3219 : :
3220 : : /* Final subkey/column determines if row compare is satisfied */
43 pg@bowt.ie 3221 :GNC 1194 : result = _bt_rowcompare_cmpresult(subkey, cmpresult);
3222 : :
139 pg@bowt.ie 3223 [ + + + - ]:CBC 1194 : if (!result && !forcenonrequired)
3224 : : {
3225 : : /*
3226 : : * Tuple fails this qual. If it's a required qual for the current
3227 : : * scan direction, then we can conclude no further tuples will pass,
3228 : : * either. Note we have to look at the deciding column, not
3229 : : * necessarily the first or last column of the row condition.
3230 : : */
7216 tgl@sss.pgh.pa.us 3231 [ + - + - ]: 3 : if ((subkey->sk_flags & SK_BT_REQFWD) &&
3232 : : ScanDirectionIsForward(dir))
3233 : 3 : *continuescan = false;
7216 tgl@sss.pgh.pa.us 3234 [ # # # # ]:UBC 0 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
3235 : : ScanDirectionIsBackward(dir))
3236 : 0 : *continuescan = false;
3237 : : }
3238 : :
7216 tgl@sss.pgh.pa.us 3239 :CBC 1194 : return result;
3240 : : }
3241 : :
3242 : : /*
3243 : : * Determine if a scan with array keys should skip over uninteresting tuples.
3244 : : *
3245 : : * This is a subroutine for _bt_checkkeys. Called when _bt_readpage's linear
3246 : : * search process (started after it finishes reading an initial group of
3247 : : * matching tuples, used to locate the start of the next group of tuples
3248 : : * matching the next set of required array keys) has already scanned an
3249 : : * excessive number of tuples whose key space is "between arrays".
3250 : : *
3251 : : * When we perform look ahead successfully, we'll sets pstate.skip, which
3252 : : * instructs _bt_readpage to skip ahead to that tuple next (could be past the
3253 : : * end of the scan's leaf page). Pages where the optimization is effective
3254 : : * will generally still need to skip several times. Each call here performs
3255 : : * only a single "look ahead" comparison of a later tuple, whose distance from
3256 : : * the current tuple's offset number is determined by applying heuristics.
3257 : : */
3258 : : static void
570 pg@bowt.ie 3259 : 5696 : _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
3260 : : int tupnatts, TupleDesc tupdesc)
3261 : : {
363 3262 : 5696 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
3263 : 5696 : ScanDirection dir = so->currPos.dir;
3264 : : OffsetNumber aheadoffnum;
3265 : : IndexTuple ahead;
3266 : :
207 3267 [ - + ]: 5696 : Assert(!pstate->forcenonrequired);
3268 : :
3269 : : /* Avoid looking ahead when comparing the page high key */
570 3270 [ - + ]: 5696 : if (pstate->offnum < pstate->minoff)
570 pg@bowt.ie 3271 :UBC 0 : return;
3272 : :
3273 : : /*
3274 : : * Don't look ahead when there aren't enough tuples remaining on the page
3275 : : * (in the current scan direction) for it to be worth our while
3276 : : */
570 pg@bowt.ie 3277 [ + + ]:CBC 5696 : if (ScanDirectionIsForward(dir) &&
3278 [ + + ]: 5657 : pstate->offnum >= pstate->maxoff - LOOK_AHEAD_DEFAULT_DISTANCE)
3279 : 161 : return;
3280 [ + + ]: 5535 : else if (ScanDirectionIsBackward(dir) &&
3281 [ + + ]: 39 : pstate->offnum <= pstate->minoff + LOOK_AHEAD_DEFAULT_DISTANCE)
3282 : 12 : return;
3283 : :
3284 : : /*
3285 : : * The look ahead distance starts small, and ramps up as each call here
3286 : : * allows _bt_readpage to skip over more tuples
3287 : : */
3288 [ + + ]: 5523 : if (!pstate->targetdistance)
3289 : 3236 : pstate->targetdistance = LOOK_AHEAD_DEFAULT_DISTANCE;
428 3290 [ + - ]: 2287 : else if (pstate->targetdistance < MaxIndexTuplesPerPage / 2)
570 3291 : 2287 : pstate->targetdistance *= 2;
3292 : :
3293 : : /* Don't read past the end (or before the start) of the page, though */
3294 [ + + ]: 5523 : if (ScanDirectionIsForward(dir))
3295 : 5496 : aheadoffnum = Min((int) pstate->maxoff,
3296 : : (int) pstate->offnum + pstate->targetdistance);
3297 : : else
3298 : 27 : aheadoffnum = Max((int) pstate->minoff,
3299 : : (int) pstate->offnum - pstate->targetdistance);
3300 : :
3301 : 5523 : ahead = (IndexTuple) PageGetItem(pstate->page,
3302 : 5523 : PageGetItemId(pstate->page, aheadoffnum));
3303 [ + + ]: 5523 : if (_bt_tuple_before_array_skeys(scan, dir, ahead, tupdesc, tupnatts,
3304 : : false, 0, NULL))
3305 : : {
3306 : : /*
3307 : : * Success -- instruct _bt_readpage to skip ahead to very next tuple
3308 : : * after the one we determined was still before the current array keys
3309 : : */
3310 [ + + ]: 1899 : if (ScanDirectionIsForward(dir))
3311 : 1881 : pstate->skip = aheadoffnum + 1;
3312 : : else
3313 : 18 : pstate->skip = aheadoffnum - 1;
3314 : : }
3315 : : else
3316 : : {
3317 : : /*
3318 : : * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
3319 : : *
3320 : : * Reset the number of rechecks, and aggressively reduce the target
3321 : : * distance (we're much more aggressive here than we were when the
3322 : : * distance was initially ramped up).
3323 : : */
3324 : 3624 : pstate->rechecks = 0;
3325 [ + + ]: 3624 : pstate->targetdistance = Max(pstate->targetdistance / 8, 1);
3326 : : }
3327 : : }
3328 : :
3329 : : /*
3330 : : * _bt_killitems - set LP_DEAD state for items an indexscan caller has
3331 : : * told us were killed
3332 : : *
3333 : : * scan->opaque, referenced locally through so, contains information about the
3334 : : * current page and killed tuples thereon (generally, this should only be
3335 : : * called if so->numKilled > 0).
3336 : : *
3337 : : * Caller should not have a lock on the so->currPos page, but must hold a
3338 : : * buffer pin when !so->dropPin. When we return, it still won't be locked.
3339 : : * It'll continue to hold whatever pins were held before calling here.
3340 : : *
3341 : : * We match items by heap TID before assuming they are the right ones to set
3342 : : * LP_DEAD. If the scan is one that holds a buffer pin on the target page
3343 : : * continuously from initially reading the items until applying this function
3344 : : * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
3345 : : * page, so the page's TIDs can't have been recycled by now. There's no risk
3346 : : * that we'll confuse a new index tuple that happens to use a recycled TID
3347 : : * with a now-removed tuple with the same TID (that used to be on this same
3348 : : * page). We can't rely on that during scans that drop buffer pins eagerly
3349 : : * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
3350 : : * the page LSN having not changed since back when _bt_readpage saw the page.
3351 : : * We totally give up on setting LP_DEAD bits when the page LSN changed.
3352 : : *
3353 : : * We give up much less often during !so->dropPin scans, but it still happens.
3354 : : * We cope with cases where items have moved right due to insertions. If an
3355 : : * item has moved off the current page due to a split, we'll fail to find it
3356 : : * and just give up on it.
3357 : : */
3358 : : void
3870 kgrittn@postgresql.o 3359 : 87317 : _bt_killitems(IndexScanDesc scan)
3360 : : {
144 pg@bowt.ie 3361 : 87317 : Relation rel = scan->indexRelation;
7114 tgl@sss.pgh.pa.us 3362 : 87317 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
3363 : : Page page;
3364 : : BTPageOpaque opaque;
3365 : : OffsetNumber minoff;
3366 : : OffsetNumber maxoff;
3870 kgrittn@postgresql.o 3367 : 87317 : int numKilled = so->numKilled;
7114 tgl@sss.pgh.pa.us 3368 : 87317 : bool killedsomething = false;
3369 : : Buffer buf;
3370 : :
144 pg@bowt.ie 3371 [ - + ]: 87317 : Assert(numKilled > 0);
3870 kgrittn@postgresql.o 3372 [ - + - - : 87317 : Assert(BTScanPosIsValid(so->currPos));
- + ]
144 pg@bowt.ie 3373 [ - + ]: 87317 : Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
3374 : :
3375 : : /* Always invalidate so->killedItems[] before leaving so->currPos */
3870 kgrittn@postgresql.o 3376 : 87317 : so->numKilled = 0;
3377 : :
144 pg@bowt.ie 3378 [ + + ]: 87317 : if (!so->dropPin)
3379 : : {
3380 : : /*
3381 : : * We have held the pin on this page since we read the index tuples,
3382 : : * so all we need to do is lock it. The pin will have prevented
3383 : : * concurrent VACUUMs from recycling any of the TIDs on the page.
3384 : : */
3385 [ - + - - : 19612 : Assert(BTScanPosIsPinned(so->currPos));
- + ]
139 3386 : 19612 : buf = so->currPos.buf;
3387 : 19612 : _bt_lockbuf(rel, buf, BT_READ);
3388 : : }
3389 : : else
3390 : : {
3391 : : XLogRecPtr latestlsn;
3392 : :
144 3393 [ - + - - : 67705 : Assert(!BTScanPosIsPinned(so->currPos));
- + ]
3394 [ + - + + : 67705 : Assert(RelationNeedsWAL(rel));
+ - - + ]
3395 : 67705 : buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
3396 : :
3397 : 67705 : latestlsn = BufferGetLSNAtomic(buf);
3398 [ - + ]: 67705 : Assert(so->currPos.lsn <= latestlsn);
3399 [ + + ]: 67705 : if (so->currPos.lsn != latestlsn)
3400 : : {
3401 : : /* Modified, give up on hinting */
3402 : 63 : _bt_relbuf(rel, buf);
3870 kgrittn@postgresql.o 3403 : 63 : return;
3404 : : }
3405 : :
3406 : : /* Unmodified, hinting is safe */
3407 : : }
3408 : :
139 pg@bowt.ie 3409 : 87254 : page = BufferGetPage(buf);
1306 michael@paquier.xyz 3410 : 87254 : opaque = BTPageGetOpaque(page);
7114 tgl@sss.pgh.pa.us 3411 [ + + ]: 87254 : minoff = P_FIRSTDATAKEY(opaque);
3412 : 87254 : maxoff = PageGetMaxOffsetNumber(page);
3413 : :
144 pg@bowt.ie 3414 [ + + ]: 296078 : for (int i = 0; i < numKilled; i++)
3415 : : {
6964 bruce@momjian.us 3416 : 208824 : int itemIndex = so->killedItems[i];
3417 : 208824 : BTScanPosItem *kitem = &so->currPos.items[itemIndex];
3418 : 208824 : OffsetNumber offnum = kitem->indexOffset;
3419 : :
7114 tgl@sss.pgh.pa.us 3420 [ + - - + ]: 208824 : Assert(itemIndex >= so->currPos.firstItem &&
3421 : : itemIndex <= so->currPos.lastItem);
3422 [ - + ]: 208824 : if (offnum < minoff)
7114 tgl@sss.pgh.pa.us 3423 :UBC 0 : continue; /* pure paranoia */
7114 tgl@sss.pgh.pa.us 3424 [ + + ]:CBC 4178897 : while (offnum <= maxoff)
3425 : : {
3426 : 4149480 : ItemId iid = PageGetItemId(page, offnum);
3427 : 4149480 : IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
2071 pg@bowt.ie 3428 : 4149480 : bool killtuple = false;
3429 : :
3430 [ + + ]: 4149480 : if (BTreeTupleIsPosting(ituple))
3431 : : {
3432 : 1022753 : int pi = i + 1;
3433 : 1022753 : int nposting = BTreeTupleGetNPosting(ituple);
3434 : : int j;
3435 : :
3436 : : /*
3437 : : * We rely on the convention that heap TIDs in the scanpos
3438 : : * items array are stored in ascending heap TID order for a
3439 : : * group of TIDs that originally came from a posting list
3440 : : * tuple. This convention even applies during backwards
3441 : : * scans, where returning the TIDs in descending order might
3442 : : * seem more natural. This is about effectiveness, not
3443 : : * correctness.
3444 : : *
3445 : : * Note that the page may have been modified in almost any way
3446 : : * since we first read it (in the !so->dropPin case), so it's
3447 : : * possible that this posting list tuple wasn't a posting list
3448 : : * tuple when we first encountered its heap TIDs.
3449 : : */
3450 [ + + ]: 1049618 : for (j = 0; j < nposting; j++)
3451 : : {
3452 : 1048795 : ItemPointer item = BTreeTupleGetPostingN(ituple, j);
3453 : :
3454 [ + + ]: 1048795 : if (!ItemPointerEquals(item, &kitem->heapTid))
3455 : 1021930 : break; /* out of posting list loop */
3456 : :
3457 : : /*
3458 : : * kitem must have matching offnum when heap TIDs match,
3459 : : * though only in the common case where the page can't
3460 : : * have been concurrently modified
3461 : : */
144 3462 [ - + - - ]: 26865 : Assert(kitem->indexOffset == offnum || !so->dropPin);
3463 : :
3464 : : /*
3465 : : * Read-ahead to later kitems here.
3466 : : *
3467 : : * We rely on the assumption that not advancing kitem here
3468 : : * will prevent us from considering the posting list tuple
3469 : : * fully dead by not matching its next heap TID in next
3470 : : * loop iteration.
3471 : : *
3472 : : * If, on the other hand, this is the final heap TID in
3473 : : * the posting list tuple, then tuple gets killed
3474 : : * regardless (i.e. we handle the case where the last
3475 : : * kitem is also the last heap TID in the last index tuple
3476 : : * correctly -- posting tuple still gets killed).
3477 : : */
2071 3478 [ + + ]: 26865 : if (pi < numKilled)
3479 : 9412 : kitem = &so->currPos.items[so->killedItems[pi++]];
3480 : : }
3481 : :
3482 : : /*
3483 : : * Don't bother advancing the outermost loop's int iterator to
3484 : : * avoid processing killed items that relate to the same
3485 : : * offnum/posting list tuple. This micro-optimization hardly
3486 : : * seems worth it. (Further iterations of the outermost loop
3487 : : * will fail to match on this same posting list's first heap
3488 : : * TID instead, so we'll advance to the next offnum/index
3489 : : * tuple pretty quickly.)
3490 : : */
3491 [ + + ]: 1022753 : if (j == nposting)
3492 : 823 : killtuple = true;
3493 : : }
3494 [ + + ]: 3126727 : else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
3495 : 179228 : killtuple = true;
3496 : :
3497 : : /*
3498 : : * Mark index item as dead, if it isn't already. Since this
3499 : : * happens while holding a buffer lock possibly in shared mode,
3500 : : * it's possible that multiple processes attempt to do this
3501 : : * simultaneously, leading to multiple full-page images being sent
3502 : : * to WAL (if wal_log_hints or data checksums are enabled), which
3503 : : * is undesirable.
3504 : : */
1992 alvherre@alvh.no-ip. 3505 [ + + + + ]: 4149480 : if (killtuple && !ItemIdIsDead(iid))
3506 : : {
3507 : : /* found the item/all posting list items */
6621 tgl@sss.pgh.pa.us 3508 : 179407 : ItemIdMarkDead(iid);
7114 3509 : 179407 : killedsomething = true;
3510 : 179407 : break; /* out of inner search loop */
3511 : : }
3512 : 3970073 : offnum = OffsetNumberNext(offnum);
3513 : : }
3514 : : }
3515 : :
3516 : : /*
3517 : : * Since this can be redone later if needed, mark as dirty hint.
3518 : : *
3519 : : * Whenever we mark anything LP_DEAD, we also set the page's
3520 : : * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
3521 : : * only rely on the page-level flag in !heapkeyspace indexes.)
3522 : : */
3523 [ + + ]: 87254 : if (killedsomething)
3524 : : {
7035 3525 : 69118 : opaque->btpo_flags |= BTP_HAS_GARBAGE;
139 pg@bowt.ie 3526 : 69118 : MarkBufferDirtyHint(buf, true);
3527 : : }
3528 : :
3529 [ + + ]: 87254 : if (!so->dropPin)
3530 : 19612 : _bt_unlockbuf(rel, buf);
3531 : : else
3532 : 67642 : _bt_relbuf(rel, buf);
3533 : : }
3534 : :
3535 : :
3536 : : /*
3537 : : * The following routines manage a shared-memory area in which we track
3538 : : * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
3539 : : * operations. There is a single counter which increments each time we
3540 : : * start a vacuum to assign it a cycle ID. Since multiple vacuums could
3541 : : * be active concurrently, we have to track the cycle ID for each active
3542 : : * vacuum; this requires at most MaxBackends entries (usually far fewer).
3543 : : * We assume at most one vacuum can be active for a given index.
3544 : : *
3545 : : * Access to the shared memory area is controlled by BtreeVacuumLock.
3546 : : * In principle we could use a separate lmgr locktag for each index,
3547 : : * but a single LWLock is much cheaper, and given the short time that
3548 : : * the lock is ever held, the concurrency hit should be minimal.
3549 : : */
3550 : :
3551 : : typedef struct BTOneVacInfo
3552 : : {
3553 : : LockRelId relid; /* global identifier of an index */
3554 : : BTCycleId cycleid; /* cycle ID for its active VACUUM */
3555 : : } BTOneVacInfo;
3556 : :
3557 : : typedef struct BTVacInfo
3558 : : {
3559 : : BTCycleId cycle_ctr; /* cycle ID most recently assigned */
3560 : : int num_vacuums; /* number of currently active VACUUMs */
3561 : : int max_vacuums; /* allocated length of vacuums[] array */
3562 : : BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER];
3563 : : } BTVacInfo;
3564 : :
3565 : : static BTVacInfo *btvacinfo;
3566 : :
3567 : :
3568 : : /*
3569 : : * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
3570 : : * or zero if there is no active VACUUM
3571 : : *
3572 : : * Note: for correct interlocking, the caller must already hold pin and
3573 : : * exclusive lock on each buffer it will store the cycle ID into. This
3574 : : * ensures that even if a VACUUM starts immediately afterwards, it cannot
3575 : : * process those pages until the page split is complete.
3576 : : */
3577 : : BTCycleId
7113 tgl@sss.pgh.pa.us 3578 : 11471 : _bt_vacuum_cycleid(Relation rel)
3579 : : {
3580 : 11471 : BTCycleId result = 0;
3581 : : int i;
3582 : :
3583 : : /* Share lock is enough since this is a read-only operation */
3584 : 11471 : LWLockAcquire(BtreeVacuumLock, LW_SHARED);
3585 : :
3586 [ + + ]: 11492 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3587 : : {
3588 : 21 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3589 : :
3590 [ - + ]: 21 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
7113 tgl@sss.pgh.pa.us 3591 [ # # ]:LBC (1) : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3592 : : {
3593 : (1) : result = vac->cycleid;
3594 : (1) : break;
3595 : : }
3596 : : }
3597 : :
7113 tgl@sss.pgh.pa.us 3598 :CBC 11471 : LWLockRelease(BtreeVacuumLock);
3599 : 11471 : return result;
3600 : : }
3601 : :
3602 : : /*
3603 : : * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
3604 : : *
3605 : : * Note: the caller must guarantee that it will eventually call
3606 : : * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
3607 : : * that this happens even in elog(FATAL) scenarios, the appropriate coding
3608 : : * is not just a PG_TRY, but
3609 : : * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
3610 : : */
3611 : : BTCycleId
3612 : 1562 : _bt_start_vacuum(Relation rel)
3613 : : {
3614 : : BTCycleId result;
3615 : : int i;
3616 : : BTOneVacInfo *vac;
3617 : :
3618 : 1562 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3619 : :
3620 : : /*
3621 : : * Assign the next cycle ID, being careful to avoid zero as well as the
3622 : : * reserved high values.
3623 : : */
6777 3624 : 1562 : result = ++(btvacinfo->cycle_ctr);
3625 [ + - - + ]: 1562 : if (result == 0 || result > MAX_BT_CYCLE_ID)
6777 tgl@sss.pgh.pa.us 3626 :UBC 0 : result = btvacinfo->cycle_ctr = 1;
3627 : :
3628 : : /* Let's just make sure there's no entry already for this index */
7113 tgl@sss.pgh.pa.us 3629 [ + + ]:CBC 1563 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3630 : : {
3631 : 1 : vac = &btvacinfo->vacuums[i];
3632 [ - + ]: 1 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
7113 tgl@sss.pgh.pa.us 3633 [ # # ]:UBC 0 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3634 : : {
3635 : : /*
3636 : : * Unlike most places in the backend, we have to explicitly
3637 : : * release our LWLock before throwing an error. This is because
3638 : : * we expect _bt_end_vacuum() to be called before transaction
3639 : : * abort cleanup can run to release LWLocks.
3640 : : */
6787 3641 : 0 : LWLockRelease(BtreeVacuumLock);
7113 3642 [ # # ]: 0 : elog(ERROR, "multiple active vacuums for index \"%s\"",
3643 : : RelationGetRelationName(rel));
3644 : : }
3645 : : }
3646 : :
3647 : : /* OK, add an entry */
7113 tgl@sss.pgh.pa.us 3648 [ - + ]:CBC 1562 : if (btvacinfo->num_vacuums >= btvacinfo->max_vacuums)
3649 : : {
6787 tgl@sss.pgh.pa.us 3650 :UBC 0 : LWLockRelease(BtreeVacuumLock);
7113 3651 [ # # ]: 0 : elog(ERROR, "out of btvacinfo slots");
3652 : : }
7113 tgl@sss.pgh.pa.us 3653 :CBC 1562 : vac = &btvacinfo->vacuums[btvacinfo->num_vacuums];
3654 : 1562 : vac->relid = rel->rd_lockInfo.lockRelId;
3655 : 1562 : vac->cycleid = result;
3656 : 1562 : btvacinfo->num_vacuums++;
3657 : :
3658 : 1562 : LWLockRelease(BtreeVacuumLock);
3659 : 1562 : return result;
3660 : : }
3661 : :
3662 : : /*
3663 : : * _bt_end_vacuum --- mark a btree VACUUM operation as done
3664 : : *
3665 : : * Note: this is deliberately coded not to complain if no entry is found;
3666 : : * this allows the caller to put PG_TRY around the start_vacuum operation.
3667 : : */
3668 : : void
3669 : 1562 : _bt_end_vacuum(Relation rel)
3670 : : {
3671 : : int i;
3672 : :
3673 : 1562 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3674 : :
3675 : : /* Find the array entry */
3676 [ + - ]: 1562 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3677 : : {
3678 : 1562 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3679 : :
3680 [ + - ]: 1562 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3681 [ + - ]: 1562 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3682 : : {
3683 : : /* Remove it by shifting down the last entry */
3684 : 1562 : *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
3685 : 1562 : btvacinfo->num_vacuums--;
3686 : 1562 : break;
3687 : : }
3688 : : }
3689 : :
3690 : 1562 : LWLockRelease(BtreeVacuumLock);
3691 : 1562 : }
3692 : :
3693 : : /*
3694 : : * _bt_end_vacuum wrapped as an on_shmem_exit callback function
3695 : : */
3696 : : void
6404 tgl@sss.pgh.pa.us 3697 :UBC 0 : _bt_end_vacuum_callback(int code, Datum arg)
3698 : : {
3699 : 0 : _bt_end_vacuum((Relation) DatumGetPointer(arg));
3700 : 0 : }
3701 : :
3702 : : /*
3703 : : * BTreeShmemSize --- report amount of shared memory space needed
3704 : : */
3705 : : Size
7113 tgl@sss.pgh.pa.us 3706 :CBC 2998 : BTreeShmemSize(void)
3707 : : {
3708 : : Size size;
3709 : :
3903 3710 : 2998 : size = offsetof(BTVacInfo, vacuums);
1295 rhaas@postgresql.org 3711 : 2998 : size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
7113 tgl@sss.pgh.pa.us 3712 : 2998 : return size;
3713 : : }
3714 : :
3715 : : /*
3716 : : * BTreeShmemInit --- initialize this module's shared memory
3717 : : */
3718 : : void
3719 : 1049 : BTreeShmemInit(void)
3720 : : {
3721 : : bool found;
3722 : :
3723 : 1049 : btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
3724 : : BTreeShmemSize(),
3725 : : &found);
3726 : :
3727 [ + - ]: 1049 : if (!IsUnderPostmaster)
3728 : : {
3729 : : /* Initialize shared memory area */
3730 [ - + ]: 1049 : Assert(!found);
3731 : :
3732 : : /*
3733 : : * It doesn't really matter what the cycle counter starts at, but
3734 : : * having it always start the same doesn't seem good. Seed with
3735 : : * low-order bits of time() instead.
3736 : : */
3737 : 1049 : btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
3738 : :
3739 : 1049 : btvacinfo->num_vacuums = 0;
1295 rhaas@postgresql.org 3740 : 1049 : btvacinfo->max_vacuums = MaxBackends;
3741 : : }
3742 : : else
7113 tgl@sss.pgh.pa.us 3743 [ # # ]:UBC 0 : Assert(found);
7113 tgl@sss.pgh.pa.us 3744 :CBC 1049 : }
3745 : :
3746 : : bytea *
3572 3747 : 165 : btoptions(Datum reloptions, bool validate)
3748 : : {
3749 : : static const relopt_parse_elt tab[] = {
3750 : : {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
3751 : : {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
3752 : : offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
3753 : : {"deduplicate_items", RELOPT_TYPE_BOOL,
3754 : : offsetof(BTOptions, deduplicate_items)}
3755 : : };
3756 : :
2164 michael@paquier.xyz 3757 : 165 : return (bytea *) build_reloptions(reloptions, validate,
3758 : : RELOPT_KIND_BTREE,
3759 : : sizeof(BTOptions),
3760 : : tab, lengthof(tab));
3761 : : }
3762 : :
3763 : : /*
3764 : : * btproperty() -- Check boolean properties of indexes.
3765 : : *
3766 : : * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
3767 : : * to call btcanreturn.
3768 : : */
3769 : : bool
3363 tgl@sss.pgh.pa.us 3770 : 378 : btproperty(Oid index_oid, int attno,
3771 : : IndexAMProperty prop, const char *propname,
3772 : : bool *res, bool *isnull)
3773 : : {
3774 [ + + ]: 378 : switch (prop)
3775 : : {
3776 : 21 : case AMPROP_RETURNABLE:
3777 : : /* answer only for columns, not AM or whole index */
3778 [ + + ]: 21 : if (attno == 0)
3779 : 6 : return false;
3780 : : /* otherwise, btree can always return data */
3781 : 15 : *res = true;
3782 : 15 : return true;
3783 : :
3784 : 357 : default:
3785 : 357 : return false; /* punt to generic code */
3786 : : }
3787 : : }
3788 : :
3789 : : /*
3790 : : * btbuildphasename() -- Return name of index build phase.
3791 : : */
3792 : : char *
2401 alvherre@alvh.no-ip. 3793 :UBC 0 : btbuildphasename(int64 phasenum)
3794 : : {
3795 [ # # # # : 0 : switch (phasenum)
# # ]
3796 : : {
3797 : 0 : case PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE:
3798 : 0 : return "initializing";
3799 : 0 : case PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN:
3800 : 0 : return "scanning table";
3801 : 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_1:
3802 : 0 : return "sorting live tuples";
3803 : 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_2:
3804 : 0 : return "sorting dead tuples";
3805 : 0 : case PROGRESS_BTREE_PHASE_LEAF_LOAD:
3806 : 0 : return "loading tuples in tree";
3807 : 0 : default:
3808 : 0 : return NULL;
3809 : : }
3810 : : }
3811 : :
3812 : : /*
3813 : : * _bt_truncate() -- create tuple without unneeded suffix attributes.
3814 : : *
3815 : : * Returns truncated pivot index tuple allocated in caller's memory context,
3816 : : * with key attributes copied from caller's firstright argument. If rel is
3817 : : * an INCLUDE index, non-key attributes will definitely be truncated away,
3818 : : * since they're not part of the key space. More aggressive suffix
3819 : : * truncation can take place when it's clear that the returned tuple does not
3820 : : * need one or more suffix key attributes. We only need to keep firstright
3821 : : * attributes up to and including the first non-lastleft-equal attribute.
3822 : : * Caller's insertion scankey is used to compare the tuples; the scankey's
3823 : : * argument values are not considered here.
3824 : : *
3825 : : * Note that returned tuple's t_tid offset will hold the number of attributes
3826 : : * present, so the original item pointer offset is not represented. Caller
3827 : : * should only change truncated tuple's downlink. Note also that truncated
3828 : : * key attributes are treated as containing "minus infinity" values by
3829 : : * _bt_compare().
3830 : : *
3831 : : * In the worst case (when a heap TID must be appended to distinguish lastleft
3832 : : * from firstright), the size of the returned tuple is the size of firstright
3833 : : * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
3834 : : * is important, since callers need to stay under the 1/3 of a page
3835 : : * restriction on tuple size. If this routine is ever taught to truncate
3836 : : * within an attribute/datum, it will need to avoid returning an enlarged
3837 : : * tuple to caller when truncation + TOAST compression ends up enlarging the
3838 : : * final datum.
3839 : : */
3840 : : IndexTuple
2414 pg@bowt.ie 3841 :CBC 31317 : _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
3842 : : BTScanInsert itup_key)
3843 : : {
3844 : 31317 : TupleDesc itupdesc = RelationGetDescr(rel);
3845 : 31317 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
3846 : : int keepnatts;
3847 : : IndexTuple pivot;
3848 : : IndexTuple tidpivot;
3849 : : ItemPointer pivotheaptid;
3850 : : Size newsize;
3851 : :
3852 : : /*
3853 : : * We should only ever truncate non-pivot tuples from leaf pages. It's
3854 : : * never okay to truncate when splitting an internal page.
3855 : : */
2071 3856 [ + - - + ]: 31317 : Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
3857 : :
3858 : : /* Determine how many attributes must be kept in truncated tuple */
2414 3859 : 31317 : keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
3860 : :
3861 : : #ifdef DEBUG_NO_TRUNCATE
3862 : : /* Force truncation to be ineffective for testing purposes */
3863 : : keepnatts = nkeyatts + 1;
3864 : : #endif
3865 : :
2038 3866 : 31317 : pivot = index_truncate_tuple(itupdesc, firstright,
3867 : : Min(keepnatts, nkeyatts));
3868 : :
3869 [ + + ]: 31317 : if (BTreeTupleIsPosting(pivot))
3870 : : {
3871 : : /*
3872 : : * index_truncate_tuple() just returns a straight copy of firstright
3873 : : * when it has no attributes to truncate. When that happens, we may
3874 : : * need to truncate away a posting list here instead.
3875 : : */
3876 [ + + - + ]: 610 : Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
3877 [ - + ]: 610 : Assert(IndexRelationGetNumberOfAttributes(rel) == nkeyatts);
3878 : 610 : pivot->t_info &= ~INDEX_SIZE_MASK;
3879 : 610 : pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
3880 : : }
3881 : :
3882 : : /*
3883 : : * If there is a distinguishing key attribute within pivot tuple, we're
3884 : : * done
3885 : : */
3886 [ + + ]: 31317 : if (keepnatts <= nkeyatts)
3887 : : {
2030 3888 : 30776 : BTreeTupleSetNAtts(pivot, keepnatts, false);
2038 3889 : 30776 : return pivot;
3890 : : }
3891 : :
3892 : : /*
3893 : : * We have to store a heap TID in the new pivot tuple, since no non-TID
3894 : : * key attribute value in firstright distinguishes the right side of the
3895 : : * split from the left side. nbtree conceptualizes this case as an
3896 : : * inability to truncate away any key attributes, since heap TID is
3897 : : * treated as just another key attribute (despite lacking a pg_attribute
3898 : : * entry).
3899 : : *
3900 : : * Use enlarged space that holds a copy of pivot. We need the extra space
3901 : : * to store a heap TID at the end (using the special pivot tuple
3902 : : * representation). Note that the original pivot already has firstright's
3903 : : * possible posting list/non-key attribute values removed at this point.
3904 : : */
3905 : 541 : newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
3906 : 541 : tidpivot = palloc0(newsize);
3907 : 541 : memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
3908 : : /* Cannot leak memory here */
3909 : 541 : pfree(pivot);
3910 : :
3911 : : /*
3912 : : * Store all of firstright's key attribute values plus a tiebreaker heap
3913 : : * TID value in enlarged pivot tuple
3914 : : */
3915 : 541 : tidpivot->t_info &= ~INDEX_SIZE_MASK;
3916 : 541 : tidpivot->t_info |= newsize;
2030 3917 : 541 : BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
2038 3918 : 541 : pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
3919 : :
3920 : : /*
3921 : : * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
3922 : : * consider suffix truncation. It seems like a good idea to follow that
3923 : : * example in cases where no truncation takes place -- use lastleft's heap
3924 : : * TID. (This is also the closest value to negative infinity that's
3925 : : * legally usable.)
3926 : : */
2071 3927 : 541 : ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
3928 : :
3929 : : /*
3930 : : * We're done. Assert() that heap TID invariants hold before returning.
3931 : : *
3932 : : * Lehman and Yao require that the downlink to the right page, which is to
3933 : : * be inserted into the parent page in the second phase of a page split be
3934 : : * a strict lower bound on items on the right page, and a non-strict upper
3935 : : * bound for items on the left page. Assert that heap TIDs follow these
3936 : : * invariants, since a heap TID value is apparently needed as a
3937 : : * tiebreaker.
3938 : : */
3939 : : #ifndef DEBUG_NO_TRUNCATE
3940 [ - + ]: 541 : Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(lastleft),
3941 : : BTreeTupleGetHeapTID(firstright)) < 0);
3942 [ - + ]: 541 : Assert(ItemPointerCompare(pivotheaptid,
3943 : : BTreeTupleGetHeapTID(lastleft)) >= 0);
3944 [ - + ]: 541 : Assert(ItemPointerCompare(pivotheaptid,
3945 : : BTreeTupleGetHeapTID(firstright)) < 0);
3946 : : #else
3947 : :
3948 : : /*
3949 : : * Those invariants aren't guaranteed to hold for lastleft + firstright
3950 : : * heap TID attribute values when they're considered here only because
3951 : : * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
3952 : : * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
3953 : : * TID value that always works as a strict lower bound for items to the
3954 : : * right. In particular, it must avoid using firstright's leading key
3955 : : * attribute values along with lastleft's heap TID value when lastleft's
3956 : : * TID happens to be greater than firstright's TID.
3957 : : */
3958 : : ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
3959 : :
3960 : : /*
3961 : : * Pivot heap TID should never be fully equal to firstright. Note that
3962 : : * the pivot heap TID will still end up equal to lastleft's heap TID when
3963 : : * that's the only usable value.
3964 : : */
3965 : : ItemPointerSetOffsetNumber(pivotheaptid,
3966 : : OffsetNumberPrev(ItemPointerGetOffsetNumber(pivotheaptid)));
3967 : : Assert(ItemPointerCompare(pivotheaptid,
3968 : : BTreeTupleGetHeapTID(firstright)) < 0);
3969 : : #endif
3970 : :
2038 3971 : 541 : return tidpivot;
3972 : : }
3973 : :
3974 : : /*
3975 : : * _bt_keep_natts - how many key attributes to keep when truncating.
3976 : : *
3977 : : * Caller provides two tuples that enclose a split point. Caller's insertion
3978 : : * scankey is used to compare the tuples; the scankey's argument values are
3979 : : * not considered here.
3980 : : *
3981 : : * This can return a number of attributes that is one greater than the
3982 : : * number of key attributes for the index relation. This indicates that the
3983 : : * caller must use a heap TID as a unique-ifier in new pivot tuple.
3984 : : */
3985 : : static int
2414 3986 : 31317 : _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright,
3987 : : BTScanInsert itup_key)
3988 : : {
3989 : 31317 : int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
3990 : 31317 : TupleDesc itupdesc = RelationGetDescr(rel);
3991 : : int keepnatts;
3992 : : ScanKey scankey;
3993 : :
3994 : : /*
3995 : : * _bt_compare() treats truncated key attributes as having the value minus
3996 : : * infinity, which would break searches within !heapkeyspace indexes. We
3997 : : * must still truncate away non-key attribute values, though.
3998 : : */
3999 [ - + ]: 31317 : if (!itup_key->heapkeyspace)
2414 pg@bowt.ie 4000 :UBC 0 : return nkeyatts;
4001 : :
2414 pg@bowt.ie 4002 :CBC 31317 : scankey = itup_key->scankeys;
4003 : 31317 : keepnatts = 1;
4004 [ + + ]: 37600 : for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
4005 : : {
4006 : : Datum datum1,
4007 : : datum2;
4008 : : bool isNull1,
4009 : : isNull2;
4010 : :
4011 : 37059 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
4012 : 37059 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
4013 : :
4014 [ - + ]: 37059 : if (isNull1 != isNull2)
4015 : 30776 : break;
4016 : :
4017 [ + + + + ]: 74103 : if (!isNull1 &&
4018 : 37044 : DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
4019 : : scankey->sk_collation,
4020 : : datum1,
4021 : : datum2)) != 0)
4022 : 30776 : break;
4023 : :
4024 : 6283 : keepnatts++;
4025 : : }
4026 : :
4027 : : /*
4028 : : * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
4029 : : * expected in an allequalimage index.
4030 : : */
2071 4031 [ + + - + ]: 31317 : Assert(!itup_key->allequalimage ||
4032 : : keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
4033 : :
2414 4034 : 31317 : return keepnatts;
4035 : : }
4036 : :
4037 : : /*
4038 : : * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
4039 : : *
4040 : : * This is exported so that a candidate split point can have its effect on
4041 : : * suffix truncation inexpensively evaluated ahead of time when finding a
4042 : : * split location. A naive bitwise approach to datum comparisons is used to
4043 : : * save cycles.
4044 : : *
4045 : : * The approach taken here usually provides the same answer as _bt_keep_natts
4046 : : * will (for the same pair of tuples from a heapkeyspace index), since the
4047 : : * majority of btree opclasses can never indicate that two datums are equal
4048 : : * unless they're bitwise equal after detoasting. When an index only has
4049 : : * "equal image" columns, routine is guaranteed to give the same result as
4050 : : * _bt_keep_natts would.
4051 : : *
4052 : : * Callers can rely on the fact that attributes considered equal here are
4053 : : * definitely also equal according to _bt_keep_natts, even when the index uses
4054 : : * an opclass or collation that is not "allequalimage"/deduplication-safe.
4055 : : * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
4056 : : * negatives generally only have the effect of making leaf page splits use a
4057 : : * more balanced split point.
4058 : : */
4059 : : int
4060 : 6651927 : _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
4061 : : {
4062 : 6651927 : TupleDesc itupdesc = RelationGetDescr(rel);
4063 : 6651927 : int keysz = IndexRelationGetNumberOfKeyAttributes(rel);
4064 : : int keepnatts;
4065 : :
4066 : 6651927 : keepnatts = 1;
4067 [ + + ]: 11111527 : for (int attnum = 1; attnum <= keysz; attnum++)
4068 : : {
4069 : : Datum datum1,
4070 : : datum2;
4071 : : bool isNull1,
4072 : : isNull2;
4073 : : CompactAttribute *att;
4074 : :
4075 : 9934190 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
4076 : 9934190 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
312 drowley@postgresql.o 4077 : 9934190 : att = TupleDescCompactAttr(itupdesc, attnum - 1);
4078 : :
2414 pg@bowt.ie 4079 [ + + ]: 9934190 : if (isNull1 != isNull2)
4080 : 5474590 : break;
4081 : :
4082 [ + + ]: 9934088 : if (!isNull1 &&
2177 4083 [ + + ]: 9910545 : !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
2414 4084 : 5474488 : break;
4085 : :
4086 : 4459600 : keepnatts++;
4087 : : }
4088 : :
4089 : 6651927 : return keepnatts;
4090 : : }
4091 : :
4092 : : /*
4093 : : * _bt_check_natts() -- Verify tuple has expected number of attributes.
4094 : : *
4095 : : * Returns value indicating if the expected number of attributes were found
4096 : : * for a particular offset on page. This can be used as a general purpose
4097 : : * sanity check.
4098 : : *
4099 : : * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
4100 : : * preferred to calling here. That's usually more convenient, and is always
4101 : : * more explicit. Call here instead when offnum's tuple may be a negative
4102 : : * infinity tuple that uses the pre-v11 on-disk representation, or when a low
4103 : : * context check is appropriate. This routine is as strict as possible about
4104 : : * what is expected on each version of btree.
4105 : : */
4106 : : bool
4107 : 132307794 : _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
4108 : : {
2742 tgl@sss.pgh.pa.us 4109 : 132307794 : int16 natts = IndexRelationGetNumberOfAttributes(rel);
4110 : 132307794 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
1306 michael@paquier.xyz 4111 : 132307794 : BTPageOpaque opaque = BTPageGetOpaque(page);
4112 : : IndexTuple itup;
4113 : : int tupnatts;
4114 : :
4115 : : /*
4116 : : * We cannot reliably test a deleted or half-dead page, since they have
4117 : : * dummy high keys
4118 : : */
2749 teodor@sigaev.ru 4119 [ - + ]: 132307794 : if (P_IGNORE(opaque))
2749 teodor@sigaev.ru 4120 :UBC 0 : return true;
4121 : :
2749 teodor@sigaev.ru 4122 [ + - - + ]:CBC 132307794 : Assert(offnum >= FirstOffsetNumber &&
4123 : : offnum <= PageGetMaxOffsetNumber(page));
4124 : :
4125 : 132307794 : itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
2414 pg@bowt.ie 4126 [ + + ]: 132307794 : tupnatts = BTreeTupleGetNAtts(itup, rel);
4127 : :
4128 : : /* !heapkeyspace indexes do not support deduplication */
2071 4129 [ - + - - ]: 132307794 : if (!heapkeyspace && BTreeTupleIsPosting(itup))
2071 pg@bowt.ie 4130 :UBC 0 : return false;
4131 : :
4132 : : /* Posting list tuples should never have "pivot heap TID" bit set */
2071 pg@bowt.ie 4133 [ + + ]:CBC 132307794 : if (BTreeTupleIsPosting(itup) &&
4134 [ - + ]: 1574414 : (ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
4135 : : BT_PIVOT_HEAP_TID_ATTR) != 0)
2071 pg@bowt.ie 4136 :UBC 0 : return false;
4137 : :
4138 : : /* INCLUDE indexes do not support deduplication */
2071 pg@bowt.ie 4139 [ + + - + ]:CBC 132307794 : if (natts != nkeyatts && BTreeTupleIsPosting(itup))
2071 pg@bowt.ie 4140 :UBC 0 : return false;
4141 : :
2749 teodor@sigaev.ru 4142 [ + + ]:CBC 132307794 : if (P_ISLEAF(opaque))
4143 : : {
4144 [ + + + + ]: 97361923 : if (offnum >= P_FIRSTDATAKEY(opaque))
4145 : : {
4146 : : /*
4147 : : * Non-pivot tuple should never be explicitly marked as a pivot
4148 : : * tuple
4149 : : */
2071 pg@bowt.ie 4150 [ - + ]: 89568834 : if (BTreeTupleIsPivot(itup))
2414 pg@bowt.ie 4151 :UBC 0 : return false;
4152 : :
4153 : : /*
4154 : : * Leaf tuples that are not the page high key (non-pivot tuples)
4155 : : * should never be truncated. (Note that tupnatts must have been
4156 : : * inferred, even with a posting list tuple, because only pivot
4157 : : * tuples store tupnatts directly.)
4158 : : */
2414 pg@bowt.ie 4159 :CBC 89568834 : return tupnatts == natts;
4160 : : }
4161 : : else
4162 : : {
4163 : : /*
4164 : : * Rightmost page doesn't contain a page high key, so tuple was
4165 : : * checked above as ordinary leaf tuple
4166 : : */
2749 teodor@sigaev.ru 4167 [ - + ]: 7793089 : Assert(!P_RIGHTMOST(opaque));
4168 : :
4169 : : /*
4170 : : * !heapkeyspace high key tuple contains only key attributes. Note
4171 : : * that tupnatts will only have been explicitly represented in
4172 : : * !heapkeyspace indexes that happen to have non-key attributes.
4173 : : */
2414 pg@bowt.ie 4174 [ - + ]: 7793089 : if (!heapkeyspace)
2414 pg@bowt.ie 4175 :UBC 0 : return tupnatts == nkeyatts;
4176 : :
4177 : : /* Use generic heapkeyspace pivot tuple handling */
4178 : : }
4179 : : }
4180 : : else /* !P_ISLEAF(opaque) */
4181 : : {
2749 teodor@sigaev.ru 4182 [ + + + + ]:CBC 34945871 : if (offnum == P_FIRSTDATAKEY(opaque))
4183 : : {
4184 : : /*
4185 : : * The first tuple on any internal page (possibly the first after
4186 : : * its high key) is its negative infinity tuple. Negative
4187 : : * infinity tuples are always truncated to zero attributes. They
4188 : : * are a particular kind of pivot tuple.
4189 : : */
2414 pg@bowt.ie 4190 [ + - ]: 1541473 : if (heapkeyspace)
4191 : 1541473 : return tupnatts == 0;
4192 : :
4193 : : /*
4194 : : * The number of attributes won't be explicitly represented if the
4195 : : * negative infinity tuple was generated during a page split that
4196 : : * occurred with a version of Postgres before v11. There must be
4197 : : * a problem when there is an explicit representation that is
4198 : : * non-zero, or when there is no explicit representation and the
4199 : : * tuple is evidently not a pre-pg_upgrade tuple.
4200 : : *
4201 : : * Prior to v11, downlinks always had P_HIKEY as their offset.
4202 : : * Accept that as an alternative indication of a valid
4203 : : * !heapkeyspace negative infinity tuple.
4204 : : */
2414 pg@bowt.ie 4205 [ # # # # ]:UBC 0 : return tupnatts == 0 ||
2071 4206 : 0 : ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY;
4207 : : }
4208 : : else
4209 : : {
4210 : : /*
4211 : : * !heapkeyspace downlink tuple with separator key contains only
4212 : : * key attributes. Note that tupnatts will only have been
4213 : : * explicitly represented in !heapkeyspace indexes that happen to
4214 : : * have non-key attributes.
4215 : : */
2414 pg@bowt.ie 4216 [ - + ]:CBC 33404398 : if (!heapkeyspace)
2414 pg@bowt.ie 4217 :UBC 0 : return tupnatts == nkeyatts;
4218 : :
4219 : : /* Use generic heapkeyspace pivot tuple handling */
4220 : : }
4221 : : }
4222 : :
4223 : : /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
2414 pg@bowt.ie 4224 [ - + ]:CBC 41197487 : Assert(heapkeyspace);
4225 : :
4226 : : /*
4227 : : * Explicit representation of the number of attributes is mandatory with
4228 : : * heapkeyspace index pivot tuples, regardless of whether or not there are
4229 : : * non-key attributes.
4230 : : */
2071 4231 [ - + ]: 41197487 : if (!BTreeTupleIsPivot(itup))
2071 pg@bowt.ie 4232 :UBC 0 : return false;
4233 : :
4234 : : /* Pivot tuple should not use posting list representation (redundant) */
2071 pg@bowt.ie 4235 [ - + ]:CBC 41197487 : if (BTreeTupleIsPosting(itup))
2414 pg@bowt.ie 4236 :UBC 0 : return false;
4237 : :
4238 : : /*
4239 : : * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
4240 : : * when any other key attribute is truncated
4241 : : */
2414 pg@bowt.ie 4242 [ + + - + ]:CBC 41197487 : if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
2414 pg@bowt.ie 4243 :UBC 0 : return false;
4244 : :
4245 : : /*
4246 : : * Pivot tuple must have at least one untruncated key attribute (minus
4247 : : * infinity pivot tuples are the only exception). Pivot tuples can never
4248 : : * represent that there is a value present for a key attribute that
4249 : : * exceeds pg_index.indnkeyatts for the index.
4250 : : */
2414 pg@bowt.ie 4251 [ + - + - ]:CBC 41197487 : return tupnatts > 0 && tupnatts <= nkeyatts;
4252 : : }
4253 : :
4254 : : /*
4255 : : *
4256 : : * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
4257 : : *
4258 : : * We actually need to be able to fit three items on every page, so restrict
4259 : : * any one item to 1/3 the per-page available space. Note that itemsz should
4260 : : * not include the ItemId overhead.
4261 : : *
4262 : : * It might be useful to apply TOAST methods rather than throw an error here.
4263 : : * Using out of line storage would break assumptions made by suffix truncation
4264 : : * and by contrib/amcheck, though.
4265 : : */
4266 : : void
4267 : 132 : _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
4268 : : Page page, IndexTuple newtup)
4269 : : {
4270 : : Size itemsz;
4271 : : BTPageOpaque opaque;
4272 : :
4273 : 132 : itemsz = MAXALIGN(IndexTupleSize(newtup));
4274 : :
4275 : : /* Double check item size against limit */
231 4276 [ - + ]: 132 : if (itemsz <= BTMaxItemSize)
2414 pg@bowt.ie 4277 :UBC 0 : return;
4278 : :
4279 : : /*
4280 : : * Tuple is probably too large to fit on page, but it's possible that the
4281 : : * index uses version 2 or version 3, or that page is an internal page, in
4282 : : * which case a slightly higher limit applies.
4283 : : */
231 pg@bowt.ie 4284 [ + - + - ]:CBC 132 : if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
2414 4285 : 132 : return;
4286 : :
4287 : : /*
4288 : : * Internal page insertions cannot fail here, because that would mean that
4289 : : * an earlier leaf level insertion that should have failed didn't
4290 : : */
1306 michael@paquier.xyz 4291 :UBC 0 : opaque = BTPageGetOpaque(page);
2414 pg@bowt.ie 4292 [ # # ]: 0 : if (!P_ISLEAF(opaque))
4293 [ # # ]: 0 : elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
4294 : : itemsz, RelationGetRelationName(rel));
4295 : :
4296 [ # # # # : 0 : ereport(ERROR,
# # ]
4297 : : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
4298 : : errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
4299 : : itemsz,
4300 : : needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
4301 : : needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
4302 : : RelationGetRelationName(rel)),
4303 : : errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
4304 : : ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)),
4305 : : ItemPointerGetOffsetNumber(BTreeTupleGetHeapTID(newtup)),
4306 : : RelationGetRelationName(heap)),
4307 : : errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
4308 : : "Consider a function index of an MD5 hash of the value, "
4309 : : "or use full text indexing."),
4310 : : errtableconstraint(heap, RelationGetRelationName(rel))));
4311 : : }
4312 : :
4313 : : /*
4314 : : * Are all attributes in rel "equality is image equality" attributes?
4315 : : *
4316 : : * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
4317 : : * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
4318 : : * return false; otherwise we return true.
4319 : : *
4320 : : * Returned boolean value is stored in index metapage during index builds.
4321 : : * Deduplication can only be used when we return true.
4322 : : */
4323 : : bool
2071 pg@bowt.ie 4324 :CBC 29269 : _bt_allequalimage(Relation rel, bool debugmessage)
4325 : : {
4326 : 29269 : bool allequalimage = true;
4327 : :
4328 : : /* INCLUDE indexes can never support deduplication */
4329 : 29269 : if (IndexRelationGetNumberOfAttributes(rel) !=
4330 [ + + ]: 29269 : IndexRelationGetNumberOfKeyAttributes(rel))
4331 : 136 : return false;
4332 : :
4333 [ + + ]: 76815 : for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
4334 : : {
4335 : 47940 : Oid opfamily = rel->rd_opfamily[i];
4336 : 47940 : Oid opcintype = rel->rd_opcintype[i];
4337 : 47940 : Oid collation = rel->rd_indcollation[i];
4338 : : Oid equalimageproc;
4339 : :
4340 : 47940 : equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
4341 : : BTEQUALIMAGE_PROC);
4342 : :
4343 : : /*
4344 : : * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
4345 : : * be unsafe. Otherwise, actually call proc and see what it says.
4346 : : */
4347 [ + + ]: 47940 : if (!OidIsValid(equalimageproc) ||
4348 [ + + ]: 47704 : !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
4349 : : ObjectIdGetDatum(opcintype))))
4350 : : {
4351 : 258 : allequalimage = false;
4352 : 258 : break;
4353 : : }
4354 : : }
4355 : :
4356 [ + + ]: 29133 : if (debugmessage)
4357 : : {
4358 [ + + ]: 25095 : if (allequalimage)
4359 [ + + ]: 24837 : elog(DEBUG1, "index \"%s\" can safely use deduplication",
4360 : : RelationGetRelationName(rel));
4361 : : else
4362 [ - + ]: 258 : elog(DEBUG1, "index \"%s\" cannot use deduplication",
4363 : : RelationGetRelationName(rel));
4364 : : }
4365 : :
4366 : 29133 : return allequalimage;
4367 : : }
|