Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * execPartition.c
4 : : * Support routines for partitioning.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : * IDENTIFICATION
10 : : * src/backend/executor/execPartition.c
11 : : *
12 : : *-------------------------------------------------------------------------
13 : : */
14 : : #include "postgres.h"
15 : :
16 : : #include "access/table.h"
17 : : #include "access/tableam.h"
18 : : #include "catalog/index.h"
19 : : #include "catalog/partition.h"
20 : : #include "executor/execPartition.h"
21 : : #include "executor/executor.h"
22 : : #include "executor/nodeModifyTable.h"
23 : : #include "foreign/fdwapi.h"
24 : : #include "mb/pg_wchar.h"
25 : : #include "miscadmin.h"
26 : : #include "partitioning/partbounds.h"
27 : : #include "partitioning/partdesc.h"
28 : : #include "partitioning/partprune.h"
29 : : #include "rewrite/rewriteManip.h"
30 : : #include "utils/acl.h"
31 : : #include "utils/lsyscache.h"
32 : : #include "utils/partcache.h"
33 : : #include "utils/rls.h"
34 : : #include "utils/ruleutils.h"
35 : :
36 : :
37 : : /*-----------------------
38 : : * PartitionTupleRouting - Encapsulates all information required to
39 : : * route a tuple inserted into a partitioned table to one of its leaf
40 : : * partitions.
41 : : *
42 : : * partition_root
43 : : * The partitioned table that's the target of the command.
44 : : *
45 : : * partition_dispatch_info
46 : : * Array of 'max_dispatch' elements containing a pointer to a
47 : : * PartitionDispatch object for every partitioned table touched by tuple
48 : : * routing. The entry for the target partitioned table is *always*
49 : : * present in the 0th element of this array. See comment for
50 : : * PartitionDispatchData->indexes for details on how this array is
51 : : * indexed.
52 : : *
53 : : * nonleaf_partitions
54 : : * Array of 'max_dispatch' elements containing pointers to fake
55 : : * ResultRelInfo objects for nonleaf partitions, useful for checking
56 : : * the partition constraint.
57 : : *
58 : : * num_dispatch
59 : : * The current number of items stored in the 'partition_dispatch_info'
60 : : * array. Also serves as the index of the next free array element for
61 : : * new PartitionDispatch objects that need to be stored.
62 : : *
63 : : * max_dispatch
64 : : * The current allocated size of the 'partition_dispatch_info' array.
65 : : *
66 : : * partitions
67 : : * Array of 'max_partitions' elements containing a pointer to a
68 : : * ResultRelInfo for every leaf partition touched by tuple routing.
69 : : * Some of these are pointers to ResultRelInfos which are borrowed out of
70 : : * the owning ModifyTableState node. The remainder have been built
71 : : * especially for tuple routing. See comment for
72 : : * PartitionDispatchData->indexes for details on how this array is
73 : : * indexed.
74 : : *
75 : : * is_borrowed_rel
76 : : * Array of 'max_partitions' booleans recording whether a given entry
77 : : * in 'partitions' is a ResultRelInfo pointer borrowed from the owning
78 : : * ModifyTableState node, rather than being built here.
79 : : *
80 : : * num_partitions
81 : : * The current number of items stored in the 'partitions' array. Also
82 : : * serves as the index of the next free array element for new
83 : : * ResultRelInfo objects that need to be stored.
84 : : *
85 : : * max_partitions
86 : : * The current allocated size of the 'partitions' array.
87 : : *
88 : : * memcxt
89 : : * Memory context used to allocate subsidiary structs.
90 : : *-----------------------
91 : : */
92 : : struct PartitionTupleRouting
93 : : {
94 : : Relation partition_root;
95 : : PartitionDispatch *partition_dispatch_info;
96 : : ResultRelInfo **nonleaf_partitions;
97 : : int num_dispatch;
98 : : int max_dispatch;
99 : : ResultRelInfo **partitions;
100 : : bool *is_borrowed_rel;
101 : : int num_partitions;
102 : : int max_partitions;
103 : : MemoryContext memcxt;
104 : : };
105 : :
106 : : /*-----------------------
107 : : * PartitionDispatch - information about one partitioned table in a partition
108 : : * hierarchy required to route a tuple to any of its partitions. A
109 : : * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
110 : : * struct and stored inside its 'partition_dispatch_info' array.
111 : : *
112 : : * reldesc
113 : : * Relation descriptor of the table
114 : : *
115 : : * key
116 : : * Partition key information of the table
117 : : *
118 : : * keystate
119 : : * Execution state required for expressions in the partition key
120 : : *
121 : : * partdesc
122 : : * Partition descriptor of the table
123 : : *
124 : : * tupslot
125 : : * A standalone TupleTableSlot initialized with this table's tuple
126 : : * descriptor, or NULL if no tuple conversion between the parent is
127 : : * required.
128 : : *
129 : : * tupmap
130 : : * TupleConversionMap to convert from the parent's rowtype to this table's
131 : : * rowtype (when extracting the partition key of a tuple just before
132 : : * routing it through this table). A NULL value is stored if no tuple
133 : : * conversion is required.
134 : : *
135 : : * indexes
136 : : * Array of partdesc->nparts elements. For leaf partitions the index
137 : : * corresponds to the partition's ResultRelInfo in the encapsulating
138 : : * PartitionTupleRouting's partitions array. For partitioned partitions,
139 : : * the index corresponds to the PartitionDispatch for it in its
140 : : * partition_dispatch_info array. -1 indicates we've not yet allocated
141 : : * anything in PartitionTupleRouting for the partition.
142 : : *-----------------------
143 : : */
144 : : typedef struct PartitionDispatchData
145 : : {
146 : : Relation reldesc;
147 : : PartitionKey key;
148 : : List *keystate; /* list of ExprState */
149 : : PartitionDesc partdesc;
150 : : TupleTableSlot *tupslot;
151 : : AttrMap *tupmap;
152 : : int indexes[FLEXIBLE_ARRAY_MEMBER];
153 : : } PartitionDispatchData;
154 : :
155 : :
156 : : static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
157 : : EState *estate, PartitionTupleRouting *proute,
158 : : PartitionDispatch dispatch,
159 : : ResultRelInfo *rootResultRelInfo,
160 : : int partidx);
161 : : static void ExecInitRoutingInfo(ModifyTableState *mtstate,
162 : : EState *estate,
163 : : PartitionTupleRouting *proute,
164 : : PartitionDispatch dispatch,
165 : : ResultRelInfo *partRelInfo,
166 : : int partidx,
167 : : bool is_borrowed_rel);
168 : : static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate,
169 : : PartitionTupleRouting *proute,
170 : : Oid partoid, PartitionDispatch parent_pd,
171 : : int partidx, ResultRelInfo *rootResultRelInfo);
172 : : static void FormPartitionKeyDatum(PartitionDispatch pd,
173 : : TupleTableSlot *slot,
174 : : EState *estate,
175 : : Datum *values,
176 : : bool *isnull);
177 : : static int get_partition_for_tuple(PartitionDispatch pd, const Datum *values,
178 : : const bool *isnull);
179 : : static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
180 : : const Datum *values,
181 : : const bool *isnull,
182 : : int maxfieldlen);
183 : : static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
184 : : static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
185 : : static PartitionPruneState *CreatePartitionPruneState(EState *estate,
186 : : PartitionPruneInfo *pruneinfo,
187 : : Bitmapset **all_leafpart_rtis);
188 : : static void InitPartitionPruneContext(PartitionPruneContext *context,
189 : : List *pruning_steps,
190 : : PartitionDesc partdesc,
191 : : PartitionKey partkey,
192 : : PlanState *planstate,
193 : : ExprContext *econtext);
194 : : static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
195 : : PlanState *parent_plan,
196 : : Bitmapset *initially_valid_subplans,
197 : : int n_total_subplans);
198 : : static void find_matching_subplans_recurse(PartitionPruningData *prunedata,
199 : : PartitionedRelPruningData *pprune,
200 : : bool initial_prune,
201 : : Bitmapset **validsubplans,
202 : : Bitmapset **validsubplan_rtis);
203 : :
204 : :
205 : : /*
206 : : * ExecSetupPartitionTupleRouting - sets up information needed during
207 : : * tuple routing for partitioned tables, encapsulates it in
208 : : * PartitionTupleRouting, and returns it.
209 : : *
210 : : * Callers must use the returned PartitionTupleRouting during calls to
211 : : * ExecFindPartition(). The actual ResultRelInfo for a partition is only
212 : : * allocated when the partition is found for the first time.
213 : : *
214 : : * The current memory context is used to allocate this struct and all
215 : : * subsidiary structs that will be allocated from it later on. Typically
216 : : * it should be estate->es_query_cxt.
217 : : */
218 : : PartitionTupleRouting *
1715 tgl@sss.pgh.pa.us 219 :CBC 2818 : ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
220 : : {
221 : : PartitionTupleRouting *proute;
222 : :
223 : : /*
224 : : * Here we attempt to expend as little effort as possible in setting up
225 : : * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
226 : : * demand, only when we actually need to route a tuple to that partition.
227 : : * The reason for this is that a common case is for INSERT to insert a
228 : : * single tuple into a partitioned table and this must be fast.
229 : : */
5 michael@paquier.xyz 230 :GNC 2818 : proute = palloc0_object(PartitionTupleRouting);
2587 alvherre@alvh.no-ip. 231 :CBC 2818 : proute->partition_root = rel;
232 : 2818 : proute->memcxt = CurrentMemoryContext;
233 : : /* Rest of members initialized by zeroing */
234 : :
235 : : /*
236 : : * Initialize this table's PartitionDispatch object. Here we pass in the
237 : : * parent as NULL as we don't need to care about any parent of the target
238 : : * partitioned table.
239 : : */
2476 rhaas@postgresql.org 240 : 2818 : ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
241 : : NULL, 0, NULL);
242 : :
2903 243 : 2818 : return proute;
244 : : }
245 : :
246 : : /*
247 : : * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
248 : : * the tuple contained in *slot should belong to.
249 : : *
250 : : * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
251 : : * one up or reuse one from mtstate's resultRelInfo array. When reusing a
252 : : * ResultRelInfo from the mtstate we verify that the relation is a valid
253 : : * target for INSERTs and initialize tuple routing information.
254 : : *
255 : : * rootResultRelInfo is the relation named in the query.
256 : : *
257 : : * estate must be non-NULL; we'll need it to compute any expressions in the
258 : : * partition keys. Also, its per-tuple contexts are used as evaluation
259 : : * scratch space.
260 : : *
261 : : * If no leaf partition is found, this routine errors out with the appropriate
262 : : * error message. An error may also be raised if the found target partition
263 : : * is not a valid target for an INSERT.
264 : : */
265 : : ResultRelInfo *
2587 alvherre@alvh.no-ip. 266 : 516638 : ExecFindPartition(ModifyTableState *mtstate,
267 : : ResultRelInfo *rootResultRelInfo,
268 : : PartitionTupleRouting *proute,
269 : : TupleTableSlot *slot, EState *estate)
270 : : {
271 : 516638 : PartitionDispatch *pd = proute->partition_dispatch_info;
272 : : Datum values[PARTITION_MAX_KEYS];
273 : : bool isnull[PARTITION_MAX_KEYS];
274 : : Relation rel;
275 : : PartitionDispatch dispatch;
276 : : PartitionDesc partdesc;
2953 rhaas@postgresql.org 277 [ + + ]: 516638 : ExprContext *ecxt = GetPerTupleExprContext(estate);
1925 alvherre@alvh.no-ip. 278 : 516638 : TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
279 : 516638 : TupleTableSlot *rootslot = slot;
2694 280 : 516638 : TupleTableSlot *myslot = NULL;
281 : : MemoryContext oldcxt;
1925 282 : 516638 : ResultRelInfo *rri = NULL;
283 : :
284 : : /* use per-tuple context here to avoid leaking memory */
2694 285 [ + - ]: 516638 : oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
286 : :
287 : : /*
288 : : * First check the root table's partition constraint, if any. No point in
289 : : * routing the tuple if it doesn't belong in the root table itself.
290 : : */
1917 tgl@sss.pgh.pa.us 291 [ + + ]: 516638 : if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
2587 alvherre@alvh.no-ip. 292 : 2251 : ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
293 : :
294 : : /* start with the root partitioned table */
2694 295 : 516622 : dispatch = pd[0];
1925 296 [ + + ]: 1091420 : while (dispatch != NULL)
297 : : {
2587 298 : 574894 : int partidx = -1;
299 : : bool is_leaf;
300 : :
301 [ - + ]: 574894 : CHECK_FOR_INTERRUPTS();
302 : :
2694 303 : 574894 : rel = dispatch->reldesc;
2587 304 : 574894 : partdesc = dispatch->partdesc;
305 : :
306 : : /*
307 : : * Extract partition key from tuple. Expression evaluation machinery
308 : : * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
309 : : * point to the correct tuple slot. The slot might have changed from
310 : : * what was used for the parent table if the table of the current
311 : : * partitioning level has different tuple descriptor from the parent.
312 : : * So update ecxt_scantuple accordingly.
313 : : */
2953 rhaas@postgresql.org 314 : 574894 : ecxt->ecxt_scantuple = slot;
2694 alvherre@alvh.no-ip. 315 : 574894 : FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
316 : :
317 : : /*
318 : : * If this partitioned table has no partitions or no partition for
319 : : * these values, error out.
320 : : */
2587 321 [ + + + + ]: 1149761 : if (partdesc->nparts == 0 ||
1349 322 : 574873 : (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
323 : : {
324 : : char *val_desc;
325 : :
2587 326 : 77 : val_desc = ExecBuildSlotPartitionKeyDescription(rel,
327 : : values, isnull, 64);
328 [ - + ]: 77 : Assert(OidIsValid(RelationGetRelid(rel)));
329 [ + - + + ]: 77 : ereport(ERROR,
330 : : (errcode(ERRCODE_CHECK_VIOLATION),
331 : : errmsg("no partition of relation \"%s\" found for row",
332 : : RelationGetRelationName(rel)),
333 : : val_desc ?
334 : : errdetail("Partition key of the failing row contains %s.",
335 : : val_desc) : 0,
336 : : errtable(rel)));
337 : : }
338 : :
1884 heikki.linnakangas@i 339 : 574811 : is_leaf = partdesc->is_leaf[partidx];
340 [ + + ]: 574811 : if (is_leaf)
341 : : {
342 : : /*
343 : : * We've reached the leaf -- hurray, we're done. Look to see if
344 : : * we've already got a ResultRelInfo for this partition.
345 : : */
2587 alvherre@alvh.no-ip. 346 [ + + ]: 516538 : if (likely(dispatch->indexes[partidx] >= 0))
347 : : {
348 : : /* ResultRelInfo already built */
349 [ - + ]: 512652 : Assert(dispatch->indexes[partidx] < proute->num_partitions);
350 : 512652 : rri = proute->partitions[dispatch->indexes[partidx]];
351 : : }
352 : : else
353 : : {
354 : : /*
355 : : * If the partition is known in the owning ModifyTableState
356 : : * node, we can re-use that ResultRelInfo instead of creating
357 : : * a new one with ExecInitPartitionInfo().
358 : : */
1715 tgl@sss.pgh.pa.us 359 : 3886 : rri = ExecLookupResultRelByOid(mtstate,
360 : 3886 : partdesc->oids[partidx],
361 : : true, false);
362 [ + + ]: 3886 : if (rri)
363 : : {
103 dean.a.rasheed@gmail 364 : 254 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
365 : :
366 : : /* Verify this ResultRelInfo allows INSERTs */
367 [ + - ]: 254 : CheckValidResultRel(rri, CMD_INSERT,
368 : : node ? node->onConflictAction : ONCONFLICT_NONE,
369 : : NIL);
370 : :
371 : : /*
372 : : * Initialize information needed to insert this and
373 : : * subsequent tuples routed to this partition.
374 : : */
1715 tgl@sss.pgh.pa.us 375 : 254 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
376 : : rri, partidx, true);
377 : : }
378 : : else
379 : : {
380 : : /* We need to create a new one. */
2587 alvherre@alvh.no-ip. 381 : 3632 : rri = ExecInitPartitionInfo(mtstate, estate, proute,
382 : : dispatch,
383 : : rootResultRelInfo, partidx);
384 : : }
385 : : }
1925 386 [ - + ]: 516526 : Assert(rri != NULL);
387 : :
388 : : /* Signal to terminate the loop */
389 : 516526 : dispatch = NULL;
390 : : }
391 : : else
392 : : {
393 : : /*
394 : : * Partition is a sub-partitioned table; get the PartitionDispatch
395 : : */
2587 396 [ + + ]: 58273 : if (likely(dispatch->indexes[partidx] >= 0))
397 : : {
398 : : /* Already built. */
399 [ - + ]: 57673 : Assert(dispatch->indexes[partidx] < proute->num_dispatch);
400 : :
1925 401 : 57673 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
402 : :
403 : : /*
404 : : * Move down to the next partition level and search again
405 : : * until we find a leaf partition that matches this tuple
406 : : */
2587 407 : 57673 : dispatch = pd[dispatch->indexes[partidx]];
408 : : }
409 : : else
410 : : {
411 : : /* Not yet built. Do that now. */
412 : : PartitionDispatch subdispatch;
413 : :
414 : : /*
415 : : * Create the new PartitionDispatch. We pass the current one
416 : : * in as the parent PartitionDispatch
417 : : */
1772 heikki.linnakangas@i 418 : 600 : subdispatch = ExecInitPartitionDispatchInfo(estate,
419 : : proute,
2587 alvherre@alvh.no-ip. 420 : 600 : partdesc->oids[partidx],
421 : : dispatch, partidx,
422 : : mtstate->rootResultRelInfo);
423 [ + - - + ]: 600 : Assert(dispatch->indexes[partidx] >= 0 &&
424 : : dispatch->indexes[partidx] < proute->num_dispatch);
425 : :
1925 426 : 600 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
2587 427 : 600 : dispatch = subdispatch;
428 : : }
429 : :
430 : : /*
431 : : * Convert the tuple to the new parent's layout, if different from
432 : : * the previous parent.
433 : : */
1925 434 [ + + ]: 58273 : if (dispatch->tupslot)
435 : : {
436 : 30858 : AttrMap *map = dispatch->tupmap;
437 : 30858 : TupleTableSlot *tempslot = myslot;
438 : :
439 : 30858 : myslot = dispatch->tupslot;
440 : 30858 : slot = execute_attr_map_slot(map, slot, myslot);
441 : :
442 [ + + ]: 30858 : if (tempslot != NULL)
443 : 147 : ExecClearTuple(tempslot);
444 : : }
445 : : }
446 : :
447 : : /*
448 : : * If this partition is the default one, we must check its partition
449 : : * constraint now, which may have changed concurrently due to
450 : : * partitions being added to the parent.
451 : : *
452 : : * (We do this here, and do not rely on ExecInsert doing it, because
453 : : * we don't want to miss doing it for non-leaf partitions.)
454 : : */
455 [ + + ]: 574799 : if (partidx == partdesc->boundinfo->default_index)
456 : : {
457 : : /*
458 : : * The tuple must match the partition's layout for the constraint
459 : : * expression to be evaluated successfully. If the partition is
460 : : * sub-partitioned, that would already be the case due to the code
461 : : * above, but for a leaf partition the tuple still matches the
462 : : * parent's layout.
463 : : *
464 : : * Note that we have a map to convert from root to current
465 : : * partition, but not from immediate parent to current partition.
466 : : * So if we have to convert, do it from the root slot; if not, use
467 : : * the root slot as-is.
468 : : */
1884 heikki.linnakangas@i 469 [ + + ]: 416 : if (is_leaf)
470 : : {
1110 alvherre@alvh.no-ip. 471 : 394 : TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
472 : :
1925 473 [ + + ]: 394 : if (map)
474 : 81 : slot = execute_attr_map_slot(map->attrMap, rootslot,
475 : : rri->ri_PartitionTupleSlot);
476 : : else
477 : 313 : slot = rootslot;
478 : : }
479 : :
480 : 416 : ExecPartitionCheck(rri, slot, estate, true);
481 : : }
482 : : }
483 : :
484 : : /* Release the tuple in the lowest parent's dedicated slot. */
485 [ + + ]: 516526 : if (myslot != NULL)
486 : 30692 : ExecClearTuple(myslot);
487 : : /* and restore ecxt's scantuple */
488 : 516526 : ecxt->ecxt_scantuple = ecxt_scantuple_saved;
489 : 516526 : MemoryContextSwitchTo(oldcxt);
490 : :
491 : 516526 : return rri;
492 : : }
493 : :
494 : : /*
495 : : * IsIndexCompatibleAsArbiter
496 : : * Return true if two indexes are identical for INSERT ON CONFLICT
497 : : * purposes.
498 : : *
499 : : * Only indexes of the same relation are supported.
500 : : */
501 : : static bool
14 alvherre@kurilemu.de 502 :GNC 6 : IsIndexCompatibleAsArbiter(Relation arbiterIndexRelation,
503 : : IndexInfo *arbiterIndexInfo,
504 : : Relation indexRelation,
505 : : IndexInfo *indexInfo)
506 : : {
507 [ - + ]: 6 : Assert(arbiterIndexRelation->rd_index->indrelid == indexRelation->rd_index->indrelid);
508 : :
509 : : /* must match whether they're unique */
510 [ - + ]: 6 : if (arbiterIndexInfo->ii_Unique != indexInfo->ii_Unique)
14 alvherre@kurilemu.de 511 :UNC 0 : return false;
512 : :
513 : : /* No support currently for comparing exclusion indexes. */
14 alvherre@kurilemu.de 514 [ + - ]:GNC 6 : if (arbiterIndexInfo->ii_ExclusionOps != NULL ||
515 [ - + ]: 6 : indexInfo->ii_ExclusionOps != NULL)
14 alvherre@kurilemu.de 516 :UNC 0 : return false;
517 : :
518 : : /* the "nulls not distinct" criterion must match */
14 alvherre@kurilemu.de 519 :GNC 6 : if (arbiterIndexInfo->ii_NullsNotDistinct !=
520 [ - + ]: 6 : indexInfo->ii_NullsNotDistinct)
14 alvherre@kurilemu.de 521 :UNC 0 : return false;
522 : :
523 : : /* number of key attributes must match */
14 alvherre@kurilemu.de 524 :GNC 6 : if (arbiterIndexInfo->ii_NumIndexKeyAttrs !=
525 [ - + ]: 6 : indexInfo->ii_NumIndexKeyAttrs)
14 alvherre@kurilemu.de 526 :UNC 0 : return false;
527 : :
14 alvherre@kurilemu.de 528 [ + - ]:GNC 6 : for (int i = 0; i < arbiterIndexInfo->ii_NumIndexKeyAttrs; i++)
529 : : {
530 : 6 : if (arbiterIndexRelation->rd_indcollation[i] !=
531 [ + - ]: 6 : indexRelation->rd_indcollation[i])
532 : 6 : return false;
533 : :
14 alvherre@kurilemu.de 534 :UNC 0 : if (arbiterIndexRelation->rd_opfamily[i] !=
535 [ # # ]: 0 : indexRelation->rd_opfamily[i])
536 : 0 : return false;
537 : :
538 : 0 : if (arbiterIndexRelation->rd_index->indkey.values[i] !=
539 [ # # ]: 0 : indexRelation->rd_index->indkey.values[i])
540 : 0 : return false;
541 : : }
542 : :
543 [ # # ]: 0 : if (list_difference(RelationGetIndexExpressions(arbiterIndexRelation),
544 : 0 : RelationGetIndexExpressions(indexRelation)) != NIL)
545 : 0 : return false;
546 : :
547 [ # # ]: 0 : if (list_difference(RelationGetIndexPredicate(arbiterIndexRelation),
548 : 0 : RelationGetIndexPredicate(indexRelation)) != NIL)
549 : 0 : return false;
550 : 0 : return true;
551 : : }
552 : :
553 : : /*
554 : : * ExecInitPartitionInfo
555 : : * Lock the partition and initialize ResultRelInfo. Also setup other
556 : : * information for the partition and store it in the next empty slot in
557 : : * the proute->partitions array.
558 : : *
559 : : * Returns the ResultRelInfo
560 : : */
561 : : static ResultRelInfo *
2587 alvherre@alvh.no-ip. 562 :CBC 3632 : ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
563 : : PartitionTupleRouting *proute,
564 : : PartitionDispatch dispatch,
565 : : ResultRelInfo *rootResultRelInfo,
566 : : int partidx)
567 : : {
2811 rhaas@postgresql.org 568 : 3632 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
1727 alvherre@alvh.no-ip. 569 : 3632 : Oid partOid = dispatch->partdesc->oids[partidx];
570 : : Relation partrel;
1772 heikki.linnakangas@i 571 : 3632 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
2801 alvherre@alvh.no-ip. 572 : 3632 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
573 : : ResultRelInfo *leaf_part_rri;
574 : : MemoryContext oldcxt;
2190 michael@paquier.xyz 575 : 3632 : AttrMap *part_attmap = NULL;
576 : : bool found_whole_row;
577 : :
2587 alvherre@alvh.no-ip. 578 : 3632 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
579 : :
1727 580 : 3632 : partrel = table_open(partOid, RowExclusiveLock);
581 : :
2822 582 : 3632 : leaf_part_rri = makeNode(ResultRelInfo);
2854 rhaas@postgresql.org 583 : 3632 : InitResultRelInfo(leaf_part_rri,
584 : : partrel,
585 : : 0,
586 : : rootResultRelInfo,
587 : : estate->es_instrument);
588 : :
589 : : /*
590 : : * Verify result relation is a valid target for an INSERT. An UPDATE of a
591 : : * partition-key becomes a DELETE+INSERT operation, so this check is still
592 : : * required when the operation is CMD_UPDATE.
593 : : */
103 dean.a.rasheed@gmail 594 [ + + ]: 3632 : CheckValidResultRel(leaf_part_rri, CMD_INSERT,
595 : : node ? node->onConflictAction : ONCONFLICT_NONE, NIL);
596 : :
597 : : /*
598 : : * Open partition indices. The user may have asked to check for conflicts
599 : : * within this leaf partition and do "nothing" instead of throwing an
600 : : * error. Be prepared in that case by initializing the index information
601 : : * needed by ExecInsert() to perform speculative insertions.
602 : : */
2854 rhaas@postgresql.org 603 [ + + ]: 3626 : if (partrel->rd_rel->relhasindex &&
604 [ + - ]: 1038 : leaf_part_rri->ri_IndexRelationDescs == NULL)
605 : 1038 : ExecOpenIndices(leaf_part_rri,
2829 alvherre@alvh.no-ip. 606 [ + + ]: 1973 : (node != NULL &&
607 [ + + ]: 935 : node->onConflictAction != ONCONFLICT_NONE));
608 : :
609 : : /*
610 : : * Build WITH CHECK OPTION constraints for the partition. Note that we
611 : : * didn't build the withCheckOptionList for partitions within the planner,
612 : : * but simple translation of varattnos will suffice. This only occurs for
613 : : * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
614 : : * didn't find a result rel to reuse.
615 : : */
2854 rhaas@postgresql.org 616 [ + + + + ]: 3626 : if (node && node->withCheckOptionLists != NIL)
617 : : {
618 : : List *wcoList;
619 : 48 : List *wcoExprs = NIL;
620 : : ListCell *ll;
621 : :
622 : : /*
623 : : * In the case of INSERT on a partitioned table, there is only one
624 : : * plan. Likewise, there is only one WCO list, not one per partition.
625 : : * For UPDATE/MERGE, there are as many WCO lists as there are plans.
626 : : */
627 [ + + + - : 48 : Assert((node->operation == CMD_INSERT &&
- + + + -
+ + - -
+ ]
628 : : list_length(node->withCheckOptionLists) == 1 &&
629 : : list_length(node->resultRelations) == 1) ||
630 : : (node->operation == CMD_UPDATE &&
631 : : list_length(node->withCheckOptionLists) ==
632 : : list_length(node->resultRelations)) ||
633 : : (node->operation == CMD_MERGE &&
634 : : list_length(node->withCheckOptionLists) ==
635 : : list_length(node->resultRelations)));
636 : :
637 : : /*
638 : : * Use the WCO list of the first plan as a reference to calculate
639 : : * attno's for the WCO list of this partition. In the INSERT case,
640 : : * that refers to the root partitioned table, whereas in the UPDATE
641 : : * tuple routing case, that refers to the first partition in the
642 : : * mtstate->resultRelInfo array. In any case, both that relation and
643 : : * this partition should have the same columns, so we should be able
644 : : * to map attributes successfully.
645 : : */
646 : 48 : wcoList = linitial(node->withCheckOptionLists);
647 : :
648 : : /*
649 : : * Convert Vars in it to contain this partition's attribute numbers.
650 : : */
651 : : part_attmap =
2190 michael@paquier.xyz 652 : 48 : build_attrmap_by_name(RelationGetDescr(partrel),
653 : : RelationGetDescr(firstResultRel),
654 : : false);
655 : : wcoList = (List *)
2798 alvherre@alvh.no-ip. 656 : 48 : map_variable_attnos((Node *) wcoList,
657 : : firstVarno, 0,
658 : : part_attmap,
659 : 48 : RelationGetForm(partrel)->reltype,
660 : : &found_whole_row);
661 : : /* We ignore the value of found_whole_row. */
662 : :
2854 rhaas@postgresql.org 663 [ + - + + : 135 : foreach(ll, wcoList)
+ + ]
664 : : {
1611 peter@eisentraut.org 665 : 87 : WithCheckOption *wco = lfirst_node(WithCheckOption, ll);
2854 rhaas@postgresql.org 666 : 87 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
667 : : &mtstate->ps);
668 : :
669 : 87 : wcoExprs = lappend(wcoExprs, wcoExpr);
670 : : }
671 : :
672 : 48 : leaf_part_rri->ri_WithCheckOptions = wcoList;
673 : 48 : leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
674 : : }
675 : :
676 : : /*
677 : : * Build the RETURNING projection for the partition. Note that we didn't
678 : : * build the returningList for partitions within the planner, but simple
679 : : * translation of varattnos will suffice. This only occurs for the INSERT
680 : : * case or in the case of UPDATE/MERGE tuple routing where we didn't find
681 : : * a result rel to reuse.
682 : : */
683 [ + + + + ]: 3626 : if (node && node->returningLists != NIL)
684 : : {
685 : : TupleTableSlot *slot;
686 : : ExprContext *econtext;
687 : : List *returningList;
688 : :
689 : : /* See the comment above for WCO lists. */
690 [ + + + - : 106 : Assert((node->operation == CMD_INSERT &&
- + + + -
+ + - -
+ ]
691 : : list_length(node->returningLists) == 1 &&
692 : : list_length(node->resultRelations) == 1) ||
693 : : (node->operation == CMD_UPDATE &&
694 : : list_length(node->returningLists) ==
695 : : list_length(node->resultRelations)) ||
696 : : (node->operation == CMD_MERGE &&
697 : : list_length(node->returningLists) ==
698 : : list_length(node->resultRelations)));
699 : :
700 : : /*
701 : : * Use the RETURNING list of the first plan as a reference to
702 : : * calculate attno's for the RETURNING list of this partition. See
703 : : * the comment above for WCO lists for more details on why this is
704 : : * okay.
705 : : */
706 : 106 : returningList = linitial(node->returningLists);
707 : :
708 : : /*
709 : : * Convert Vars in it to contain this partition's attribute numbers.
710 : : */
2190 michael@paquier.xyz 711 [ + - ]: 106 : if (part_attmap == NULL)
712 : : part_attmap =
713 : 106 : build_attrmap_by_name(RelationGetDescr(partrel),
714 : : RelationGetDescr(firstResultRel),
715 : : false);
716 : : returningList = (List *)
2798 alvherre@alvh.no-ip. 717 : 106 : map_variable_attnos((Node *) returningList,
718 : : firstVarno, 0,
719 : : part_attmap,
720 : 106 : RelationGetForm(partrel)->reltype,
721 : : &found_whole_row);
722 : : /* We ignore the value of found_whole_row. */
723 : :
2811 rhaas@postgresql.org 724 : 106 : leaf_part_rri->ri_returningList = returningList;
725 : :
726 : : /*
727 : : * Initialize the projection itself.
728 : : *
729 : : * Use the slot and the expression context that would have been set up
730 : : * in ExecInitModifyTable() for projection's output.
731 : : */
2854 732 [ - + ]: 106 : Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
733 : 106 : slot = mtstate->ps.ps_ResultTupleSlot;
734 [ - + ]: 106 : Assert(mtstate->ps.ps_ExprContext != NULL);
735 : 106 : econtext = mtstate->ps.ps_ExprContext;
736 : 106 : leaf_part_rri->ri_projectReturning =
737 : 106 : ExecBuildProjectionInfo(returningList, econtext, slot,
738 : : &mtstate->ps, RelationGetDescr(partrel));
739 : : }
740 : :
741 : : /* Set up information needed for routing tuples to the partition. */
2587 alvherre@alvh.no-ip. 742 : 3626 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
743 : : leaf_part_rri, partidx, false);
744 : :
745 : : /*
746 : : * If there is an ON CONFLICT clause, initialize state for it.
747 : : */
2822 748 [ + + + + ]: 3626 : if (node && node->onConflictAction != ONCONFLICT_NONE)
749 : : {
750 : 117 : TupleDesc partrelDesc = RelationGetDescr(partrel);
751 : 117 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
752 : 117 : List *arbiterIndexes = NIL;
14 alvherre@kurilemu.de 753 :GNC 117 : int additional_arbiters = 0;
754 : :
755 : : /*
756 : : * If there is a list of arbiter indexes, map it to a list of indexes
757 : : * in the partition. We also add any "identical indexes" to any of
758 : : * those, to cover the case where one of them is concurrently being
759 : : * reindexed.
760 : : */
1217 tgl@sss.pgh.pa.us 761 [ + + ]:CBC 117 : if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
762 : : {
14 alvherre@kurilemu.de 763 :GNC 89 : List *unparented_idxs = NIL,
764 : 89 : *arbiters_listidxs = NIL;
765 : :
766 [ + + ]: 184 : for (int listidx = 0; listidx < leaf_part_rri->ri_NumIndices; listidx++)
767 : : {
768 : : Oid indexoid;
769 : : List *ancestors;
770 : :
771 : : /*
772 : : * If one of this index's ancestors is in the root's arbiter
773 : : * list, then use this index as arbiter for this partition.
774 : : * Otherwise, if this index has no parent, track it for later,
775 : : * in case REINDEX CONCURRENTLY is working on one of the
776 : : * arbiters.
777 : : *
778 : : * XXX get_partition_ancestors is slow: it scans pg_inherits
779 : : * each time. Consider a syscache or some other way to cache?
780 : : */
781 : 95 : indexoid = RelationGetRelid(leaf_part_rri->ri_IndexRelationDescs[listidx]);
782 : 95 : ancestors = get_partition_ancestors(indexoid);
783 [ + + ]: 95 : if (ancestors != NIL)
784 : : {
785 [ + - + - : 178 : foreach_oid(parent_idx, rootResultRelInfo->ri_onConflictArbiterIndexes)
+ + ]
786 : : {
787 [ + - ]: 89 : if (list_member_oid(ancestors, parent_idx))
788 : : {
789 : 89 : arbiterIndexes = lappend_oid(arbiterIndexes, indexoid);
790 : 89 : arbiters_listidxs = lappend_int(arbiters_listidxs, listidx);
791 : 89 : break;
792 : : }
793 : : }
794 : : }
795 : : else
796 : 6 : unparented_idxs = lappend_int(unparented_idxs, listidx);
2822 alvherre@alvh.no-ip. 797 :CBC 95 : list_free(ancestors);
798 : : }
799 : :
800 : : /*
801 : : * If we found any indexes with no ancestors, it's possible that
802 : : * some arbiter index is undergoing concurrent reindex. Match all
803 : : * unparented indexes against arbiters; add unparented matching
804 : : * ones as "additional arbiters".
805 : : *
806 : : * This is critical so that all concurrent transactions use the
807 : : * same set as arbiters during REINDEX CONCURRENTLY, to avoid
808 : : * spurious "duplicate key" errors.
809 : : */
14 alvherre@kurilemu.de 810 [ + + + - ]:GNC 89 : if (unparented_idxs && arbiterIndexes)
811 : : {
812 [ + - + + : 18 : foreach_int(unparented_i, unparented_idxs)
+ + ]
813 : : {
814 : : Relation unparented_rel;
815 : : IndexInfo *unparenred_ii;
816 : :
817 : 6 : unparented_rel = leaf_part_rri->ri_IndexRelationDescs[unparented_i];
818 : 6 : unparenred_ii = leaf_part_rri->ri_IndexRelationInfo[unparented_i];
819 : :
820 [ - + ]: 6 : Assert(!list_member_oid(arbiterIndexes,
821 : : unparented_rel->rd_index->indexrelid));
822 : :
823 : : /* Ignore indexes not ready */
824 [ - + ]: 6 : if (!unparenred_ii->ii_ReadyForInserts)
14 alvherre@kurilemu.de 825 :UNC 0 : continue;
826 : :
14 alvherre@kurilemu.de 827 [ + - + + :GNC 18 : foreach_int(arbiter_i, arbiters_listidxs)
+ + ]
828 : : {
829 : : Relation arbiter_rel;
830 : : IndexInfo *arbiter_ii;
831 : :
832 : 6 : arbiter_rel = leaf_part_rri->ri_IndexRelationDescs[arbiter_i];
833 : 6 : arbiter_ii = leaf_part_rri->ri_IndexRelationInfo[arbiter_i];
834 : :
835 : : /*
836 : : * If the non-ancestor index is compatible with the
837 : : * arbiter, use the non-ancestor as arbiter too.
838 : : */
839 [ - + ]: 6 : if (IsIndexCompatibleAsArbiter(arbiter_rel,
840 : : arbiter_ii,
841 : : unparented_rel,
842 : : unparenred_ii))
843 : : {
14 alvherre@kurilemu.de 844 :UNC 0 : arbiterIndexes = lappend_oid(arbiterIndexes,
845 : 0 : unparented_rel->rd_index->indexrelid);
846 : 0 : additional_arbiters++;
847 : 0 : break;
848 : : }
849 : : }
850 : : }
851 : : }
14 alvherre@kurilemu.de 852 :GNC 89 : list_free(unparented_idxs);
853 : 89 : list_free(arbiters_listidxs);
854 : : }
855 : :
856 : : /*
857 : : * We expect to find as many arbiter indexes on this partition as the
858 : : * root has, plus however many "additional arbiters" (to wit: those
859 : : * being concurrently rebuilt) we found.
860 : : */
2587 alvherre@alvh.no-ip. 861 [ - + ]:CBC 117 : if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
14 alvherre@kurilemu.de 862 [ - + ]:GNC 117 : list_length(arbiterIndexes) - additional_arbiters)
2822 alvherre@alvh.no-ip. 863 [ # # ]:UBC 0 : elog(ERROR, "invalid arbiter index list");
2822 alvherre@alvh.no-ip. 864 :CBC 117 : leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
865 : :
866 : : /*
867 : : * In the DO UPDATE case, we have some more state to initialize.
868 : : */
869 [ + + ]: 117 : if (node->onConflictAction == ONCONFLICT_UPDATE)
870 : : {
1681 tgl@sss.pgh.pa.us 871 : 83 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
872 : : TupleConversionMap *map;
873 : :
1110 alvherre@alvh.no-ip. 874 : 83 : map = ExecGetRootToChildMap(leaf_part_rri, estate);
875 : :
2822 876 [ - + ]: 83 : Assert(node->onConflictSet != NIL);
2587 877 [ - + ]: 83 : Assert(rootResultRelInfo->ri_onConflict != NULL);
878 : :
1681 tgl@sss.pgh.pa.us 879 : 83 : leaf_part_rri->ri_onConflict = onconfl;
880 : :
881 : : /*
882 : : * Need a separate existing slot for each partition, as the
883 : : * partition could be of a different AM, even if the tuple
884 : : * descriptors match.
885 : : */
886 : 83 : onconfl->oc_Existing =
2472 andres@anarazel.de 887 : 83 : table_slot_create(leaf_part_rri->ri_RelationDesc,
888 : 83 : &mtstate->ps.state->es_tupleTable);
889 : :
890 : : /*
891 : : * If the partition's tuple descriptor matches exactly the root
892 : : * parent (the common case), we can re-use most of the parent's ON
893 : : * CONFLICT SET state, skipping a bunch of work. Otherwise, we
894 : : * need to create state specific to this partition.
895 : : */
2822 alvherre@alvh.no-ip. 896 [ + + ]: 83 : if (map == NULL)
897 : : {
898 : : /*
899 : : * It's safe to reuse these from the partition root, as we
900 : : * only process one tuple at a time (therefore we won't
901 : : * overwrite needed data in slots), and the results of
902 : : * projections are independent of the underlying storage.
903 : : * Projections and where clauses themselves don't store state
904 : : * / are independent of the underlying storage.
905 : : */
1681 tgl@sss.pgh.pa.us 906 : 45 : onconfl->oc_ProjSlot =
2477 andres@anarazel.de 907 : 45 : rootResultRelInfo->ri_onConflict->oc_ProjSlot;
1681 tgl@sss.pgh.pa.us 908 : 45 : onconfl->oc_ProjInfo =
2477 andres@anarazel.de 909 : 45 : rootResultRelInfo->ri_onConflict->oc_ProjInfo;
1681 tgl@sss.pgh.pa.us 910 : 45 : onconfl->oc_WhereClause =
2477 andres@anarazel.de 911 : 45 : rootResultRelInfo->ri_onConflict->oc_WhereClause;
912 : : }
913 : : else
914 : : {
915 : : List *onconflset;
916 : : List *onconflcols;
917 : :
918 : : /*
919 : : * Translate expressions in onConflictSet to account for
920 : : * different attribute numbers. For that, map partition
921 : : * varattnos twice: first to catch the EXCLUDED
922 : : * pseudo-relation (INNER_VAR), and second to handle the main
923 : : * target relation (firstVarno).
924 : : */
1681 tgl@sss.pgh.pa.us 925 : 38 : onconflset = copyObject(node->onConflictSet);
2190 michael@paquier.xyz 926 [ + + ]: 38 : if (part_attmap == NULL)
927 : : part_attmap =
928 : 35 : build_attrmap_by_name(RelationGetDescr(partrel),
929 : : RelationGetDescr(firstResultRel),
930 : : false);
931 : : onconflset = (List *)
2798 alvherre@alvh.no-ip. 932 : 38 : map_variable_attnos((Node *) onconflset,
933 : : INNER_VAR, 0,
934 : : part_attmap,
935 : 38 : RelationGetForm(partrel)->reltype,
936 : : &found_whole_row);
937 : : /* We ignore the value of found_whole_row. */
938 : : onconflset = (List *)
939 : 38 : map_variable_attnos((Node *) onconflset,
940 : : firstVarno, 0,
941 : : part_attmap,
942 : 38 : RelationGetForm(partrel)->reltype,
943 : : &found_whole_row);
944 : : /* We ignore the value of found_whole_row. */
945 : :
946 : : /* Finally, adjust the target colnos to match the partition. */
1681 tgl@sss.pgh.pa.us 947 : 38 : onconflcols = adjust_partition_colnos(node->onConflictCols,
948 : : leaf_part_rri);
949 : :
950 : : /* create the tuple slot for the UPDATE SET projection */
951 : 38 : onconfl->oc_ProjSlot =
952 : 38 : table_slot_create(partrel,
953 : 38 : &mtstate->ps.state->es_tupleTable);
954 : :
955 : : /* build UPDATE SET projection state */
956 : 38 : onconfl->oc_ProjInfo =
957 : 38 : ExecBuildUpdateProjection(onconflset,
958 : : true,
959 : : onconflcols,
960 : : partrelDesc,
961 : : econtext,
962 : : onconfl->oc_ProjSlot,
963 : : &mtstate->ps);
964 : :
965 : : /*
966 : : * If there is a WHERE clause, initialize state where it will
967 : : * be evaluated, mapping the attribute numbers appropriately.
968 : : * As with onConflictSet, we need to map partition varattnos
969 : : * to the partition's tupdesc.
970 : : */
2822 alvherre@alvh.no-ip. 971 [ + + ]: 38 : if (node->onConflictWhere)
972 : : {
973 : : List *clause;
974 : :
975 : 15 : clause = copyObject((List *) node->onConflictWhere);
976 : : clause = (List *)
2798 977 : 15 : map_variable_attnos((Node *) clause,
978 : : INNER_VAR, 0,
979 : : part_attmap,
980 : 15 : RelationGetForm(partrel)->reltype,
981 : : &found_whole_row);
982 : : /* We ignore the value of found_whole_row. */
983 : : clause = (List *)
984 : 15 : map_variable_attnos((Node *) clause,
985 : : firstVarno, 0,
986 : : part_attmap,
987 : 15 : RelationGetForm(partrel)->reltype,
988 : : &found_whole_row);
989 : : /* We ignore the value of found_whole_row. */
1681 tgl@sss.pgh.pa.us 990 : 15 : onconfl->oc_WhereClause =
14 peter@eisentraut.org 991 :GNC 15 : ExecInitQual(clause, &mtstate->ps);
992 : : }
993 : : }
994 : : }
995 : : }
996 : :
997 : : /*
998 : : * Since we've just initialized this ResultRelInfo, it's not in any list
999 : : * attached to the estate as yet. Add it, so that it can be found later.
1000 : : *
1001 : : * Note that the entries in this list appear in no predetermined order,
1002 : : * because partition result rels are initialized as and when they're
1003 : : * needed.
1004 : : */
2587 alvherre@alvh.no-ip. 1005 :CBC 3626 : MemoryContextSwitchTo(estate->es_query_cxt);
1006 : 3626 : estate->es_tuple_routing_result_relations =
1007 : 3626 : lappend(estate->es_tuple_routing_result_relations,
1008 : : leaf_part_rri);
1009 : :
1010 : : /*
1011 : : * Initialize information about this partition that's needed to handle
1012 : : * MERGE. We take the "first" result relation's mergeActionList as
1013 : : * reference and make copy for this relation, converting stuff that
1014 : : * references attribute numbers to match this relation's.
1015 : : *
1016 : : * This duplicates much of the logic in ExecInitMerge(), so if something
1017 : : * changes there, look here too.
1018 : : */
1359 1019 [ + + + + ]: 3626 : if (node && node->operation == CMD_MERGE)
1020 : : {
1021 : 12 : List *firstMergeActionList = linitial(node->mergeActionLists);
1022 : : ListCell *lc;
1023 : 12 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
1024 : : Node *joinCondition;
1025 : :
1026 [ + + ]: 12 : if (part_attmap == NULL)
1027 : : part_attmap =
1028 : 6 : build_attrmap_by_name(RelationGetDescr(partrel),
1029 : : RelationGetDescr(firstResultRel),
1030 : : false);
1031 : :
1032 [ + - ]: 12 : if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
1033 : 12 : ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
1034 : :
1035 : : /* Initialize state for join condition checking. */
1036 : : joinCondition =
626 dean.a.rasheed@gmail 1037 : 12 : map_variable_attnos(linitial(node->mergeJoinConditions),
1038 : : firstVarno, 0,
1039 : : part_attmap,
1040 : 12 : RelationGetForm(partrel)->reltype,
1041 : : &found_whole_row);
1042 : : /* We ignore the value of found_whole_row. */
1043 : 12 : leaf_part_rri->ri_MergeJoinCondition =
1044 : 12 : ExecInitQual((List *) joinCondition, &mtstate->ps);
1045 : :
1359 alvherre@alvh.no-ip. 1046 [ + - + + : 30 : foreach(lc, firstMergeActionList)
+ + ]
1047 : : {
1048 : : /* Make a copy for this relation to be safe. */
1049 : 18 : MergeAction *action = copyObject(lfirst(lc));
1050 : : MergeActionState *action_state;
1051 : :
1052 : : /* Generate the action's state for this relation */
1053 : 18 : action_state = makeNode(MergeActionState);
1054 : 18 : action_state->mas_action = action;
1055 : :
1056 : : /* And put the action in the appropriate list */
626 dean.a.rasheed@gmail 1057 : 36 : leaf_part_rri->ri_MergeActions[action->matchKind] =
1058 : 18 : lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
1059 : : action_state);
1060 : :
1359 alvherre@alvh.no-ip. 1061 [ + + + - ]: 18 : switch (action->commandType)
1062 : : {
1063 : 6 : case CMD_INSERT:
1064 : :
1065 : : /*
1066 : : * ExecCheckPlanOutput() already done on the targetlist
1067 : : * when "first" result relation initialized and it is same
1068 : : * for all result relations.
1069 : : */
1070 : 6 : action_state->mas_proj =
1071 : 6 : ExecBuildProjectionInfo(action->targetList, econtext,
1072 : : leaf_part_rri->ri_newTupleSlot,
1073 : : &mtstate->ps,
1074 : : RelationGetDescr(partrel));
1075 : 6 : break;
1076 : 9 : case CMD_UPDATE:
1077 : :
1078 : : /*
1079 : : * Convert updateColnos from "first" result relation
1080 : : * attribute numbers to this result rel's.
1081 : : */
1082 [ + - ]: 9 : if (part_attmap)
1083 : 9 : action->updateColnos =
1084 : 9 : adjust_partition_colnos_using_map(action->updateColnos,
1085 : : part_attmap);
1086 : 9 : action_state->mas_proj =
1087 : 9 : ExecBuildUpdateProjection(action->targetList,
1088 : : true,
1089 : : action->updateColnos,
1090 : 9 : RelationGetDescr(leaf_part_rri->ri_RelationDesc),
1091 : : econtext,
1092 : : leaf_part_rri->ri_newTupleSlot,
1093 : : NULL);
1094 : 9 : break;
1095 : 3 : case CMD_DELETE:
1096 : : case CMD_NOTHING:
1097 : : /* Nothing to do */
1098 : 3 : break;
1099 : :
1359 alvherre@alvh.no-ip. 1100 :UBC 0 : default:
1101 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
1102 : : }
1103 : :
1104 : : /* found_whole_row intentionally ignored. */
1359 alvherre@alvh.no-ip. 1105 :CBC 18 : action->qual =
1106 : 18 : map_variable_attnos(action->qual,
1107 : : firstVarno, 0,
1108 : : part_attmap,
1109 : 18 : RelationGetForm(partrel)->reltype,
1110 : : &found_whole_row);
1111 : 18 : action_state->mas_whenqual =
1112 : 18 : ExecInitQual((List *) action->qual, &mtstate->ps);
1113 : : }
1114 : : }
2587 1115 : 3626 : MemoryContextSwitchTo(oldcxt);
1116 : :
2854 rhaas@postgresql.org 1117 : 3626 : return leaf_part_rri;
1118 : : }
1119 : :
1120 : : /*
1121 : : * ExecInitRoutingInfo
1122 : : * Set up information needed for translating tuples between root
1123 : : * partitioned table format and partition format, and keep track of it
1124 : : * in PartitionTupleRouting.
1125 : : */
1126 : : static void
2811 1127 : 3880 : ExecInitRoutingInfo(ModifyTableState *mtstate,
1128 : : EState *estate,
1129 : : PartitionTupleRouting *proute,
1130 : : PartitionDispatch dispatch,
1131 : : ResultRelInfo *partRelInfo,
1132 : : int partidx,
1133 : : bool is_borrowed_rel)
1134 : : {
1135 : : MemoryContext oldcxt;
1136 : : int rri_index;
1137 : :
2587 alvherre@alvh.no-ip. 1138 : 3880 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
1139 : :
1140 : : /*
1141 : : * Set up tuple conversion between root parent and the partition if the
1142 : : * two have different rowtypes. If conversion is indeed required, also
1143 : : * initialize a slot dedicated to storing this partition's converted
1144 : : * tuples. Various operations that are applied to tuples after routing,
1145 : : * such as checking constraints, will refer to this slot.
1146 : : */
1110 1147 [ + + ]: 3880 : if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
1148 : : {
2632 andres@anarazel.de 1149 : 674 : Relation partrel = partRelInfo->ri_RelationDesc;
1150 : :
1151 : : /*
1152 : : * This pins the partition's TupleDesc, which will be released at the
1153 : : * end of the command.
1154 : : */
1884 heikki.linnakangas@i 1155 : 674 : partRelInfo->ri_PartitionTupleSlot =
2472 andres@anarazel.de 1156 : 674 : table_slot_create(partrel, &estate->es_tupleTable);
1157 : : }
1158 : : else
1884 heikki.linnakangas@i 1159 : 3206 : partRelInfo->ri_PartitionTupleSlot = NULL;
1160 : :
1161 : : /*
1162 : : * If the partition is a foreign table, let the FDW init itself for
1163 : : * routing tuples to the partition.
1164 : : */
2811 rhaas@postgresql.org 1165 [ + + ]: 3880 : if (partRelInfo->ri_FdwRoutine != NULL &&
1166 [ + - ]: 46 : partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1167 : 46 : partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1168 : :
1169 : : /*
1170 : : * Determine if the FDW supports batch insert and determine the batch size
1171 : : * (a FDW may support batching, but it may be disabled for the
1172 : : * server/table or for this particular query).
1173 : : *
1174 : : * If the FDW does not support batching, we set the batch size to 1.
1175 : : */
1092 efujita@postgresql.o 1176 [ + + ]: 3874 : if (partRelInfo->ri_FdwRoutine != NULL &&
1791 tomas.vondra@postgre 1177 [ + - ]: 40 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1178 [ + - ]: 40 : partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1179 : 40 : partRelInfo->ri_BatchSize =
1180 : 40 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1181 : : else
1182 : 3834 : partRelInfo->ri_BatchSize = 1;
1183 : :
1184 [ - + ]: 3874 : Assert(partRelInfo->ri_BatchSize >= 1);
1185 : :
2448 andres@anarazel.de 1186 : 3874 : partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1187 : :
1188 : : /*
1189 : : * Keep track of it in the PartitionTupleRouting->partitions array.
1190 : : */
2587 alvherre@alvh.no-ip. 1191 [ - + ]: 3874 : Assert(dispatch->indexes[partidx] == -1);
1192 : :
1193 : 3874 : rri_index = proute->num_partitions++;
1194 : :
1195 : : /* Allocate or enlarge the array, as needed */
1196 [ + + ]: 3874 : if (proute->num_partitions >= proute->max_partitions)
1197 : : {
1198 [ + + ]: 2661 : if (proute->max_partitions == 0)
1199 : : {
1200 : 2655 : proute->max_partitions = 8;
5 michael@paquier.xyz 1201 :GNC 2655 : proute->partitions = palloc_array(ResultRelInfo *, proute->max_partitions);
1202 : 2655 : proute->is_borrowed_rel = palloc_array(bool, proute->max_partitions);
1203 : : }
1204 : : else
1205 : : {
2587 alvherre@alvh.no-ip. 1206 :CBC 6 : proute->max_partitions *= 2;
1207 : 6 : proute->partitions = (ResultRelInfo **)
1208 : 6 : repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1209 : 6 : proute->max_partitions);
1715 tgl@sss.pgh.pa.us 1210 : 6 : proute->is_borrowed_rel = (bool *)
1211 : 6 : repalloc(proute->is_borrowed_rel, sizeof(bool) *
1212 : 6 : proute->max_partitions);
1213 : : }
1214 : : }
1215 : :
2587 alvherre@alvh.no-ip. 1216 : 3874 : proute->partitions[rri_index] = partRelInfo;
1715 tgl@sss.pgh.pa.us 1217 : 3874 : proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
2587 alvherre@alvh.no-ip. 1218 : 3874 : dispatch->indexes[partidx] = rri_index;
1219 : :
1220 : 3874 : MemoryContextSwitchTo(oldcxt);
2811 rhaas@postgresql.org 1221 : 3874 : }
1222 : :
1223 : : /*
1224 : : * ExecInitPartitionDispatchInfo
1225 : : * Lock the partitioned table (if not locked already) and initialize
1226 : : * PartitionDispatch for a partitioned table and store it in the next
1227 : : * available slot in the proute->partition_dispatch_info array. Also,
1228 : : * record the index into this array in the parent_pd->indexes[] array in
1229 : : * the partidx element so that we can properly retrieve the newly created
1230 : : * PartitionDispatch later.
1231 : : */
1232 : : static PartitionDispatch
2476 1233 : 3418 : ExecInitPartitionDispatchInfo(EState *estate,
1234 : : PartitionTupleRouting *proute, Oid partoid,
1235 : : PartitionDispatch parent_pd, int partidx,
1236 : : ResultRelInfo *rootResultRelInfo)
1237 : : {
1238 : : Relation rel;
1239 : : PartitionDesc partdesc;
1240 : : PartitionDispatch pd;
1241 : : int dispatchidx;
1242 : : MemoryContext oldcxt;
1243 : :
1244 : : /*
1245 : : * For data modification, it is better that executor does not include
1246 : : * partitions being detached, except when running in snapshot-isolation
1247 : : * mode. This means that a read-committed transaction immediately gets a
1248 : : * "no partition for tuple" error when a tuple is inserted into a
1249 : : * partition that's being detached concurrently, but a transaction in
1250 : : * repeatable-read mode can still use such a partition.
1251 : : */
1252 [ + + ]: 3418 : if (estate->es_partition_directory == NULL)
1253 : 2800 : estate->es_partition_directory =
1727 alvherre@alvh.no-ip. 1254 : 2800 : CreatePartitionDirectory(estate->es_query_cxt,
1255 : : !IsolationUsesXactSnapshot());
1256 : :
2587 1257 : 3418 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
1258 : :
1259 : : /*
1260 : : * Only sub-partitioned tables need to be locked here. The root
1261 : : * partitioned table will already have been locked as it's referenced in
1262 : : * the query's rtable.
1263 : : */
1264 [ + + ]: 3418 : if (partoid != RelationGetRelid(proute->partition_root))
2490 rhaas@postgresql.org 1265 : 600 : rel = table_open(partoid, RowExclusiveLock);
1266 : : else
2587 alvherre@alvh.no-ip. 1267 : 2818 : rel = proute->partition_root;
2476 rhaas@postgresql.org 1268 : 3418 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1269 : :
2587 alvherre@alvh.no-ip. 1270 : 3418 : pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
1271 : 3418 : partdesc->nparts * sizeof(int));
1272 : 3418 : pd->reldesc = rel;
1273 : 3418 : pd->key = RelationGetPartitionKey(rel);
1274 : 3418 : pd->keystate = NIL;
1275 : 3418 : pd->partdesc = partdesc;
1276 [ + + ]: 3418 : if (parent_pd != NULL)
1277 : : {
1278 : 600 : TupleDesc tupdesc = RelationGetDescr(rel);
1279 : :
1280 : : /*
1281 : : * For sub-partitioned tables where the column order differs from its
1282 : : * direct parent partitioned table, we must store a tuple table slot
1283 : : * initialized with its tuple descriptor and a tuple conversion map to
1284 : : * convert a tuple from its parent's rowtype to its own. This is to
1285 : : * make sure that we are looking at the correct row using the correct
1286 : : * tuple descriptor when computing its partition key for tuple
1287 : : * routing.
1288 : : */
2190 michael@paquier.xyz 1289 : 600 : pd->tupmap = build_attrmap_by_name_if_req(RelationGetDescr(parent_pd->reldesc),
1290 : : tupdesc,
1291 : : false);
2587 alvherre@alvh.no-ip. 1292 : 600 : pd->tupslot = pd->tupmap ?
2482 andres@anarazel.de 1293 [ + + ]: 600 : MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1294 : : }
1295 : : else
1296 : : {
1297 : : /* Not required for the root partitioned table */
2587 alvherre@alvh.no-ip. 1298 : 2818 : pd->tupmap = NULL;
1299 : 2818 : pd->tupslot = NULL;
1300 : : }
1301 : :
1302 : : /*
1303 : : * Initialize with -1 to signify that the corresponding partition's
1304 : : * ResultRelInfo or PartitionDispatch has not been created yet.
1305 : : */
1306 : 3418 : memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1307 : :
1308 : : /* Track in PartitionTupleRouting for later use */
1309 : 3418 : dispatchidx = proute->num_dispatch++;
1310 : :
1311 : : /* Allocate or enlarge the array, as needed */
1312 [ + + ]: 3418 : if (proute->num_dispatch >= proute->max_dispatch)
1313 : : {
1314 [ + - ]: 2818 : if (proute->max_dispatch == 0)
1315 : : {
1316 : 2818 : proute->max_dispatch = 4;
5 michael@paquier.xyz 1317 :GNC 2818 : proute->partition_dispatch_info = palloc_array(PartitionDispatch, proute->max_dispatch);
1318 : 2818 : proute->nonleaf_partitions = palloc_array(ResultRelInfo *, proute->max_dispatch);
1319 : : }
1320 : : else
1321 : : {
2587 alvherre@alvh.no-ip. 1322 :UBC 0 : proute->max_dispatch *= 2;
1323 : 0 : proute->partition_dispatch_info = (PartitionDispatch *)
1324 : 0 : repalloc(proute->partition_dispatch_info,
1325 : 0 : sizeof(PartitionDispatch) * proute->max_dispatch);
1925 1326 : 0 : proute->nonleaf_partitions = (ResultRelInfo **)
1327 : 0 : repalloc(proute->nonleaf_partitions,
1328 : 0 : sizeof(ResultRelInfo *) * proute->max_dispatch);
1329 : : }
1330 : : }
2587 alvherre@alvh.no-ip. 1331 :CBC 3418 : proute->partition_dispatch_info[dispatchidx] = pd;
1332 : :
1333 : : /*
1334 : : * If setting up a PartitionDispatch for a sub-partitioned table, we may
1335 : : * also need a minimally valid ResultRelInfo for checking the partition
1336 : : * constraint later; set that up now.
1337 : : */
1925 1338 [ + + ]: 3418 : if (parent_pd)
1339 : : {
1340 : 600 : ResultRelInfo *rri = makeNode(ResultRelInfo);
1341 : :
1772 heikki.linnakangas@i 1342 : 600 : InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1925 alvherre@alvh.no-ip. 1343 : 600 : proute->nonleaf_partitions[dispatchidx] = rri;
1344 : : }
1345 : : else
1346 : 2818 : proute->nonleaf_partitions[dispatchidx] = NULL;
1347 : :
1348 : : /*
1349 : : * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1350 : : * install a downlink in the parent to allow quick descent.
1351 : : */
2587 1352 [ + + ]: 3418 : if (parent_pd)
1353 : : {
1354 [ - + ]: 600 : Assert(parent_pd->indexes[partidx] == -1);
1355 : 600 : parent_pd->indexes[partidx] = dispatchidx;
1356 : : }
1357 : :
1358 : 3418 : MemoryContextSwitchTo(oldcxt);
1359 : :
1360 : 3418 : return pd;
1361 : : }
1362 : :
1363 : : /*
1364 : : * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1365 : : * routing.
1366 : : *
1367 : : * Close all the partitioned tables, leaf partitions, and their indices.
1368 : : */
1369 : : void
2811 rhaas@postgresql.org 1370 : 2401 : ExecCleanupTupleRouting(ModifyTableState *mtstate,
1371 : : PartitionTupleRouting *proute)
1372 : : {
1373 : : int i;
1374 : :
1375 : : /*
1376 : : * Remember, proute->partition_dispatch_info[0] corresponds to the root
1377 : : * partitioned table, which we must not try to close, because it is the
1378 : : * main target table of the query that will be closed by callers such as
1379 : : * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1380 : : * partitioned table.
1381 : : */
2903 1382 [ + + ]: 2889 : for (i = 1; i < proute->num_dispatch; i++)
1383 : : {
1384 : 488 : PartitionDispatch pd = proute->partition_dispatch_info[i];
1385 : :
2521 andres@anarazel.de 1386 : 488 : table_close(pd->reldesc, NoLock);
1387 : :
2587 alvherre@alvh.no-ip. 1388 [ + + ]: 488 : if (pd->tupslot)
1389 : 227 : ExecDropSingleTupleTableSlot(pd->tupslot);
1390 : : }
1391 : :
2903 rhaas@postgresql.org 1392 [ + + ]: 5978 : for (i = 0; i < proute->num_partitions; i++)
1393 : : {
1394 : 3577 : ResultRelInfo *resultRelInfo = proute->partitions[i];
1395 : :
1396 : : /* Allow any FDWs to shut down */
2437 efujita@postgresql.o 1397 [ + + ]: 3577 : if (resultRelInfo->ri_FdwRoutine != NULL &&
1398 [ + - ]: 34 : resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1399 : 34 : resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1400 : : resultRelInfo);
1401 : :
1402 : : /*
1403 : : * Close it if it's not one of the result relations borrowed from the
1404 : : * owning ModifyTableState; those will be closed by ExecEndPlan().
1405 : : */
1715 tgl@sss.pgh.pa.us 1406 [ + + ]: 3577 : if (proute->is_borrowed_rel[i])
1407 : 230 : continue;
1408 : :
2903 rhaas@postgresql.org 1409 : 3347 : ExecCloseIndices(resultRelInfo);
2521 andres@anarazel.de 1410 : 3347 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1411 : : }
2953 rhaas@postgresql.org 1412 : 2401 : }
1413 : :
1414 : : /* ----------------
1415 : : * FormPartitionKeyDatum
1416 : : * Construct values[] and isnull[] arrays for the partition key
1417 : : * of a tuple.
1418 : : *
1419 : : * pd Partition dispatch object of the partitioned table
1420 : : * slot Heap tuple from which to extract partition key
1421 : : * estate executor state for evaluating any partition key
1422 : : * expressions (must be non-NULL)
1423 : : * values Array of partition key Datums (output area)
1424 : : * isnull Array of is-null indicators (output area)
1425 : : *
1426 : : * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1427 : : * the heap tuple passed in.
1428 : : * ----------------
1429 : : */
1430 : : static void
1431 : 574894 : FormPartitionKeyDatum(PartitionDispatch pd,
1432 : : TupleTableSlot *slot,
1433 : : EState *estate,
1434 : : Datum *values,
1435 : : bool *isnull)
1436 : : {
1437 : : ListCell *partexpr_item;
1438 : : int i;
1439 : :
1440 [ + + + + ]: 574894 : if (pd->key->partexprs != NIL && pd->keystate == NIL)
1441 : : {
1442 : : /* Check caller has set up context correctly */
1443 [ + - + - : 273 : Assert(estate != NULL &&
- + ]
1444 : : GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1445 : :
1446 : : /* First time through, set up expression evaluation state */
1447 : 273 : pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1448 : : }
1449 : :
1450 : 574894 : partexpr_item = list_head(pd->keystate);
1451 [ + + ]: 1161326 : for (i = 0; i < pd->key->partnatts; i++)
1452 : : {
1453 : 586432 : AttrNumber keycol = pd->key->partattrs[i];
1454 : : Datum datum;
1455 : : bool isNull;
1456 : :
1457 [ + + ]: 586432 : if (keycol != 0)
1458 : : {
1459 : : /* Plain column; get the value directly from the heap tuple */
1460 : 542614 : datum = slot_getattr(slot, keycol, &isNull);
1461 : : }
1462 : : else
1463 : : {
1464 : : /* Expression; need to evaluate it */
1465 [ - + ]: 43818 : if (partexpr_item == NULL)
2953 rhaas@postgresql.org 1466 [ # # ]:UBC 0 : elog(ERROR, "wrong number of partition key expressions");
2953 rhaas@postgresql.org 1467 :CBC 43818 : datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1468 [ + - ]: 43818 : GetPerTupleExprContext(estate),
1469 : : &isNull);
2346 tgl@sss.pgh.pa.us 1470 : 43818 : partexpr_item = lnext(pd->keystate, partexpr_item);
1471 : : }
2953 rhaas@postgresql.org 1472 : 586432 : values[i] = datum;
1473 : 586432 : isnull[i] = isNull;
1474 : : }
1475 : :
1476 [ - + ]: 574894 : if (partexpr_item != NULL)
2953 rhaas@postgresql.org 1477 [ # # ]:UBC 0 : elog(ERROR, "wrong number of partition key expressions");
2953 rhaas@postgresql.org 1478 :CBC 574894 : }
1479 : :
1480 : : /*
1481 : : * The number of times the same partition must be found in a row before we
1482 : : * switch from a binary search for the given values to just checking if the
1483 : : * values belong to the last found partition. This must be above 0.
1484 : : */
1485 : : #define PARTITION_CACHED_FIND_THRESHOLD 16
1486 : :
1487 : : /*
1488 : : * get_partition_for_tuple
1489 : : * Finds partition of relation which accepts the partition key specified
1490 : : * in values and isnull.
1491 : : *
1492 : : * Calling this function can be quite expensive when LIST and RANGE
1493 : : * partitioned tables have many partitions. This is due to the binary search
1494 : : * that's done to find the correct partition. Many of the use cases for LIST
1495 : : * and RANGE partitioned tables make it likely that the same partition is
1496 : : * found in subsequent ExecFindPartition() calls. This is especially true for
1497 : : * cases such as RANGE partitioned tables on a TIMESTAMP column where the
1498 : : * partition key is the current time. When asked to find a partition for a
1499 : : * RANGE or LIST partitioned table, we record the partition index and datum
1500 : : * offset we've found for the given 'values' in the PartitionDesc (which is
1501 : : * stored in relcache), and if we keep finding the same partition
1502 : : * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
1503 : : * logic and instead of performing a binary search to find the correct
1504 : : * partition, we'll just double-check that 'values' still belong to the last
1505 : : * found partition, and if so, we'll return that partition index, thus
1506 : : * skipping the need for the binary search. If we fail to match the last
1507 : : * partition when double checking, then we fall back on doing a binary search.
1508 : : * In this case, unless we find 'values' belong to the DEFAULT partition,
1509 : : * we'll reset the number of times we've hit the same partition so that we
1510 : : * don't attempt to use the cache again until we've found that partition at
1511 : : * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
1512 : : *
1513 : : * For cases where the partition changes on each lookup, the amount of
1514 : : * additional work required just amounts to recording the last found partition
1515 : : * and bound offset then resetting the found counter. This is cheap and does
1516 : : * not appear to cause any meaningful slowdowns for such cases.
1517 : : *
1518 : : * No caching of partitions is done when the last found partition is the
1519 : : * DEFAULT or NULL partition. For the case of the DEFAULT partition, there
1520 : : * is no bound offset storing the matching datum, so we cannot confirm the
1521 : : * indexes match. For the NULL partition, this is just so cheap, there's no
1522 : : * sense in caching.
1523 : : *
1524 : : * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1525 : : * found or -1 if none found.
1526 : : */
1527 : : static int
46 peter@eisentraut.org 1528 :GNC 574873 : get_partition_for_tuple(PartitionDispatch pd, const Datum *values, const bool *isnull)
1529 : : {
1232 drowley@postgresql.o 1530 :CBC 574873 : int bound_offset = -1;
2803 alvherre@alvh.no-ip. 1531 : 574873 : int part_index = -1;
1349 1532 : 574873 : PartitionKey key = pd->key;
1533 : 574873 : PartitionDesc partdesc = pd->partdesc;
2743 tgl@sss.pgh.pa.us 1534 : 574873 : PartitionBoundInfo boundinfo = partdesc->boundinfo;
1535 : :
1536 : : /*
1537 : : * In the switch statement below, when we perform a cached lookup for
1538 : : * RANGE and LIST partitioned tables, if we find that the last found
1539 : : * partition matches the 'values', we return the partition index right
1540 : : * away. We do this instead of breaking out of the switch as we don't
1541 : : * want to execute the code about the DEFAULT partition or do any updates
1542 : : * for any of the cache-related fields. That would be a waste of effort
1543 : : * as we already know it's not the DEFAULT partition and have no need to
1544 : : * increment the number of times we found the same partition any higher
1545 : : * than PARTITION_CACHED_FIND_THRESHOLD.
1546 : : */
1547 : :
1548 : : /* Route as appropriate based on partitioning strategy. */
2803 alvherre@alvh.no-ip. 1549 [ + + + - ]: 574873 : switch (key->strategy)
1550 : : {
1551 : 105369 : case PARTITION_STRATEGY_HASH:
1552 : : {
1553 : : uint64 rowHash;
1554 : :
1555 : : /* hash partitioning is too cheap to bother caching */
2743 tgl@sss.pgh.pa.us 1556 : 105369 : rowHash = compute_partition_hash_value(key->partnatts,
1557 : : key->partsupfunc,
2461 peter@eisentraut.org 1558 : 105369 : key->partcollation,
1559 : : values, isnull);
1560 : :
1561 : : /*
1562 : : * HASH partitions can't have a DEFAULT partition and we don't
1563 : : * do any caching work for them, so just return the part index
1564 : : */
1232 drowley@postgresql.o 1565 : 105363 : return boundinfo->indexes[rowHash % boundinfo->nindexes];
1566 : : }
1567 : :
2803 alvherre@alvh.no-ip. 1568 : 85599 : case PARTITION_STRATEGY_LIST:
1569 [ + + ]: 85599 : if (isnull[0])
1570 : : {
1571 : : /* this is far too cheap to bother doing any caching */
2743 tgl@sss.pgh.pa.us 1572 [ + + ]: 66 : if (partition_bound_accepts_nulls(boundinfo))
1573 : : {
1574 : : /*
1575 : : * When there is a NULL partition we just return that
1576 : : * directly. We don't have a bound_offset so it's not
1577 : : * valid to drop into the code after the switch which
1578 : : * checks and updates the cache fields. We perhaps should
1579 : : * be invalidating the details of the last cached
1580 : : * partition but there's no real need to. Keeping those
1581 : : * fields set gives a chance at matching to the cached
1582 : : * partition on the next lookup.
1583 : : */
1232 drowley@postgresql.o 1584 : 51 : return boundinfo->null_index;
1585 : : }
1586 : : }
1587 : : else
1588 : : {
1589 : : bool equal;
1590 : :
1591 [ + + ]: 85533 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1592 : : {
1593 : 11946 : int last_datum_offset = partdesc->last_found_datum_index;
1594 : 11946 : Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1595 : : int32 cmpval;
1596 : :
1597 : : /* does the last found datum index match this datum? */
1598 : 11946 : cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1599 : 11946 : key->partcollation[0],
1600 : : lastDatum,
1601 : : values[0]));
1602 : :
1603 [ + + ]: 11946 : if (cmpval == 0)
1604 : 11769 : return boundinfo->indexes[last_datum_offset];
1605 : :
1606 : : /* fall-through and do a manual lookup */
1607 : : }
1608 : :
2803 alvherre@alvh.no-ip. 1609 : 73764 : bound_offset = partition_list_bsearch(key->partsupfunc,
1610 : : key->partcollation,
1611 : : boundinfo,
1612 : : values[0], &equal);
1613 [ + + + + ]: 73764 : if (bound_offset >= 0 && equal)
2743 tgl@sss.pgh.pa.us 1614 : 73564 : part_index = boundinfo->indexes[bound_offset];
1615 : : }
2803 alvherre@alvh.no-ip. 1616 : 73779 : break;
1617 : :
1618 : 383905 : case PARTITION_STRATEGY_RANGE:
1619 : : {
1620 : 383905 : bool equal = false,
1621 : 383905 : range_partkey_has_null = false;
1622 : : int i;
1623 : :
1624 : : /*
1625 : : * No range includes NULL, so this will be accepted by the
1626 : : * default partition if there is one, and otherwise rejected.
1627 : : */
1628 [ + + ]: 779138 : for (i = 0; i < key->partnatts; i++)
1629 : : {
1630 [ + + ]: 395260 : if (isnull[i])
1631 : : {
1632 : 27 : range_partkey_has_null = true;
1633 : 27 : break;
1634 : : }
1635 : : }
1636 : :
1637 : : /* NULLs belong in the DEFAULT partition */
1232 drowley@postgresql.o 1638 [ + + ]: 383905 : if (range_partkey_has_null)
1639 : 27 : break;
1640 : :
1641 [ + + ]: 383878 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1642 : : {
1643 : 124827 : int last_datum_offset = partdesc->last_found_datum_index;
1644 : 124827 : Datum *lastDatums = boundinfo->datums[last_datum_offset];
1645 : 124827 : PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
1646 : : int32 cmpval;
1647 : :
1648 : : /* check if the value is >= to the lower bound */
1649 : 124827 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1650 : : key->partcollation,
1651 : : lastDatums,
1652 : : kind,
1653 : : values,
1654 : 124827 : key->partnatts);
1655 : :
1656 : : /*
1657 : : * If it's equal to the lower bound then no need to check
1658 : : * the upper bound.
1659 : : */
1660 [ + + ]: 124827 : if (cmpval == 0)
1661 : 124672 : return boundinfo->indexes[last_datum_offset + 1];
1662 : :
1663 [ + + + - ]: 121878 : if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1664 : : {
1665 : : /* check if the value is below the upper bound */
1666 : 121848 : lastDatums = boundinfo->datums[last_datum_offset + 1];
1667 : 121848 : kind = boundinfo->kind[last_datum_offset + 1];
1668 : 121848 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1669 : : key->partcollation,
1670 : : lastDatums,
1671 : : kind,
1672 : : values,
1673 : 121848 : key->partnatts);
1674 : :
1675 [ + + ]: 121848 : if (cmpval > 0)
1676 : 121723 : return boundinfo->indexes[last_datum_offset + 1];
1677 : : }
1678 : : /* fall-through and do a manual lookup */
1679 : : }
1680 : :
1681 : 259206 : bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1682 : : key->partcollation,
1683 : : boundinfo,
1684 : 259206 : key->partnatts,
1685 : : values,
1686 : : &equal);
1687 : :
1688 : : /*
1689 : : * The bound at bound_offset is less than or equal to the
1690 : : * tuple value, so the bound at offset+1 is the upper bound of
1691 : : * the partition we're looking for, if there actually exists
1692 : : * one.
1693 : : */
1694 : 259206 : part_index = boundinfo->indexes[bound_offset + 1];
1695 : : }
2803 alvherre@alvh.no-ip. 1696 : 259206 : break;
1697 : :
2803 alvherre@alvh.no-ip. 1698 :UBC 0 : default:
1699 [ # # ]: 0 : elog(ERROR, "unexpected partition strategy: %d",
1700 : : (int) key->strategy);
1701 : : }
1702 : :
1703 : : /*
1704 : : * part_index < 0 means we failed to find a partition of this parent. Use
1705 : : * the default partition, if there is one.
1706 : : */
2803 alvherre@alvh.no-ip. 1707 [ + + ]:CBC 333012 : if (part_index < 0)
1708 : : {
1709 : : /*
1710 : : * No need to reset the cache fields here. The next set of values
1711 : : * might end up belonging to the cached partition, so leaving the
1712 : : * cache alone improves the chances of a cache hit on the next lookup.
1713 : : */
1232 drowley@postgresql.o 1714 : 472 : return boundinfo->default_index;
1715 : : }
1716 : :
1717 : : /* we should only make it here when the code above set bound_offset */
1718 [ - + ]: 332540 : Assert(bound_offset >= 0);
1719 : :
1720 : : /*
1721 : : * Attend to the cache fields. If the bound_offset matches the last
1722 : : * cached bound offset then we've found the same partition as last time,
1723 : : * so bump the count by one. If all goes well, we'll eventually reach
1724 : : * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1725 : : * around. Otherwise, we'll reset the cache count back to 1 to mark that
1726 : : * we've found this partition for the first time.
1727 : : */
1728 [ + + ]: 332540 : if (bound_offset == partdesc->last_found_datum_index)
1729 : 230814 : partdesc->last_found_count++;
1730 : : else
1731 : : {
1732 : 101726 : partdesc->last_found_count = 1;
1733 : 101726 : partdesc->last_found_part_index = part_index;
1734 : 101726 : partdesc->last_found_datum_index = bound_offset;
1735 : : }
1736 : :
2803 alvherre@alvh.no-ip. 1737 : 332540 : return part_index;
1738 : : }
1739 : :
1740 : : /*
1741 : : * ExecBuildSlotPartitionKeyDescription
1742 : : *
1743 : : * This works very much like BuildIndexValueDescription() and is currently
1744 : : * used for building error messages when ExecFindPartition() fails to find
1745 : : * partition for a row.
1746 : : */
1747 : : static char *
2953 rhaas@postgresql.org 1748 : 77 : ExecBuildSlotPartitionKeyDescription(Relation rel,
1749 : : const Datum *values,
1750 : : const bool *isnull,
1751 : : int maxfieldlen)
1752 : : {
1753 : : StringInfoData buf;
1754 : 77 : PartitionKey key = RelationGetPartitionKey(rel);
1755 : 77 : int partnatts = get_partition_natts(key);
1756 : : int i;
1757 : 77 : Oid relid = RelationGetRelid(rel);
1758 : : AclResult aclresult;
1759 : :
1760 [ - + ]: 77 : if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
2953 rhaas@postgresql.org 1761 :UBC 0 : return NULL;
1762 : :
1763 : : /* If the user has table-level access, just go build the description. */
2953 rhaas@postgresql.org 1764 :CBC 77 : aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1765 [ + + ]: 77 : if (aclresult != ACLCHECK_OK)
1766 : : {
1767 : : /*
1768 : : * Step through the columns of the partition key and make sure the
1769 : : * user has SELECT rights on all of them.
1770 : : */
1771 [ + + ]: 12 : for (i = 0; i < partnatts; i++)
1772 : : {
1773 : 9 : AttrNumber attnum = get_partition_col_attnum(key, i);
1774 : :
1775 : : /*
1776 : : * If this partition key column is an expression, we return no
1777 : : * detail rather than try to figure out what column(s) the
1778 : : * expression includes and if the user has SELECT rights on them.
1779 : : */
1780 [ + + + + ]: 15 : if (attnum == InvalidAttrNumber ||
1781 : 6 : pg_attribute_aclcheck(relid, attnum, GetUserId(),
1782 : : ACL_SELECT) != ACLCHECK_OK)
1783 : 6 : return NULL;
1784 : : }
1785 : : }
1786 : :
1787 : 71 : initStringInfo(&buf);
1788 : 71 : appendStringInfo(&buf, "(%s) = (",
1789 : : pg_get_partkeydef_columns(relid, true));
1790 : :
1791 [ + + ]: 169 : for (i = 0; i < partnatts; i++)
1792 : : {
1793 : : char *val;
1794 : : int vallen;
1795 : :
1796 [ + + ]: 98 : if (isnull[i])
1797 : 15 : val = "null";
1798 : : else
1799 : : {
1800 : : Oid foutoid;
1801 : : bool typisvarlena;
1802 : :
1803 : 83 : getTypeOutputInfo(get_partition_col_typid(key, i),
1804 : : &foutoid, &typisvarlena);
1805 : 83 : val = OidOutputFunctionCall(foutoid, values[i]);
1806 : : }
1807 : :
1808 [ + + ]: 98 : if (i > 0)
1809 : 27 : appendStringInfoString(&buf, ", ");
1810 : :
1811 : : /* truncate if needed */
1812 : 98 : vallen = strlen(val);
1813 [ + - ]: 98 : if (vallen <= maxfieldlen)
2338 drowley@postgresql.o 1814 : 98 : appendBinaryStringInfo(&buf, val, vallen);
1815 : : else
1816 : : {
2953 rhaas@postgresql.org 1817 :UBC 0 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1818 : 0 : appendBinaryStringInfo(&buf, val, vallen);
1819 : 0 : appendStringInfoString(&buf, "...");
1820 : : }
1821 : : }
1822 : :
2953 rhaas@postgresql.org 1823 :CBC 71 : appendStringInfoChar(&buf, ')');
1824 : :
1825 : 71 : return buf.data;
1826 : : }
1827 : :
1828 : : /*
1829 : : * adjust_partition_colnos
1830 : : * Adjust the list of UPDATE target column numbers to account for
1831 : : * attribute differences between the parent and the partition.
1832 : : *
1833 : : * Note: mustn't be called if no adjustment is required.
1834 : : */
1835 : : static List *
1681 tgl@sss.pgh.pa.us 1836 : 38 : adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
1837 : : {
1838 : 38 : TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
1839 : :
1344 alvherre@alvh.no-ip. 1840 [ - + ]: 38 : Assert(map != NULL);
1841 : :
1359 1842 : 38 : return adjust_partition_colnos_using_map(colnos, map->attrMap);
1843 : : }
1844 : :
1845 : : /*
1846 : : * adjust_partition_colnos_using_map
1847 : : * Like adjust_partition_colnos, but uses a caller-supplied map instead
1848 : : * of assuming to map from the "root" result relation.
1849 : : *
1850 : : * Note: mustn't be called if no adjustment is required.
1851 : : */
1852 : : static List *
1853 : 47 : adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
1854 : : {
1855 : 47 : List *new_colnos = NIL;
1856 : : ListCell *lc;
1857 : :
1858 [ - + ]: 47 : Assert(attrMap != NULL); /* else we shouldn't be here */
1859 : :
1681 tgl@sss.pgh.pa.us 1860 [ + - + + : 116 : foreach(lc, colnos)
+ + ]
1861 : : {
1862 : 69 : AttrNumber parentattrno = lfirst_int(lc);
1863 : :
1864 [ + - ]: 69 : if (parentattrno <= 0 ||
1865 [ + - ]: 69 : parentattrno > attrMap->maplen ||
1866 [ - + ]: 69 : attrMap->attnums[parentattrno - 1] == 0)
1681 tgl@sss.pgh.pa.us 1867 [ # # ]:UBC 0 : elog(ERROR, "unexpected attno %d in target column list",
1868 : : parentattrno);
1681 tgl@sss.pgh.pa.us 1869 :CBC 69 : new_colnos = lappend_int(new_colnos,
1870 : 69 : attrMap->attnums[parentattrno - 1]);
1871 : : }
1872 : :
1873 : 47 : return new_colnos;
1874 : : }
1875 : :
1876 : : /*-------------------------------------------------------------------------
1877 : : * Run-Time Partition Pruning Support.
1878 : : *
1879 : : * The following series of functions exist to support the removal of unneeded
1880 : : * subplans for queries against partitioned tables. The supporting functions
1881 : : * here are designed to work with any plan type which supports an arbitrary
1882 : : * number of subplans, e.g. Append, MergeAppend.
1883 : : *
1884 : : * When pruning involves comparison of a partition key to a constant, it's
1885 : : * done by the planner. However, if we have a comparison to a non-constant
1886 : : * but not volatile expression, that presents an opportunity for run-time
1887 : : * pruning by the executor, allowing irrelevant partitions to be skipped
1888 : : * dynamically.
1889 : : *
1890 : : * We must distinguish expressions containing PARAM_EXEC Params from
1891 : : * expressions that don't contain those. Even though a PARAM_EXEC Param is
1892 : : * considered to be a stable expression, it can change value from one plan
1893 : : * node scan to the next during query execution. Stable comparison
1894 : : * expressions that don't involve such Params allow partition pruning to be
1895 : : * done once during executor startup. Expressions that do involve such Params
1896 : : * require us to prune separately for each scan of the parent plan node.
1897 : : *
1898 : : * Note that pruning away unneeded subplans during executor startup has the
1899 : : * added benefit of not having to initialize the unneeded subplans at all.
1900 : : *
1901 : : *
1902 : : * Functions:
1903 : : *
1904 : : * ExecDoInitialPruning:
1905 : : * Perform runtime "initial" pruning, if necessary, to determine the set
1906 : : * of child subnodes that need to be initialized during ExecInitNode() for
1907 : : * all plan nodes that contain a PartitionPruneInfo.
1908 : : *
1909 : : * ExecInitPartitionExecPruning:
1910 : : * Updates the PartitionPruneState found at given part_prune_index in
1911 : : * EState.es_part_prune_states for use during "exec" pruning if required.
1912 : : * Also returns the set of subplans to initialize that would be stored at
1913 : : * part_prune_index in EState.es_part_prune_results by
1914 : : * ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
1915 : : * account for initial pruning possibly having eliminated some of the
1916 : : * subplans.
1917 : : *
1918 : : * ExecFindMatchingSubPlans:
1919 : : * Returns indexes of matching subplans after evaluating the expressions
1920 : : * that are safe to evaluate at a given point. This function is first
1921 : : * called during ExecDoInitialPruning() to find the initially matching
1922 : : * subplans based on performing the initial pruning steps and then must be
1923 : : * called again each time the value of a Param listed in
1924 : : * PartitionPruneState's 'execparamids' changes.
1925 : : *-------------------------------------------------------------------------
1926 : : */
1927 : :
1928 : :
1929 : : /*
1930 : : * ExecDoInitialPruning
1931 : : * Perform runtime "initial" pruning, if necessary, to determine the set
1932 : : * of child subnodes that need to be initialized during ExecInitNode() for
1933 : : * plan nodes that support partition pruning.
1934 : : *
1935 : : * This function iterates over each PartitionPruneInfo entry in
1936 : : * estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
1937 : : * and adds it to es_part_prune_states. ExecInitPartitionExecPruning() accesses
1938 : : * these states through their corresponding indexes in es_part_prune_states and
1939 : : * assign each state to the parent node's PlanState, from where it will be used
1940 : : * for "exec" pruning.
1941 : : *
1942 : : * If initial pruning steps exist for a PartitionPruneInfo entry, this function
1943 : : * executes those pruning steps and stores the result as a bitmapset of valid
1944 : : * child subplans, identifying which subplans should be initialized for
1945 : : * execution. The results are saved in estate->es_part_prune_results.
1946 : : *
1947 : : * If no initial pruning is performed for a given PartitionPruneInfo, a NULL
1948 : : * entry is still added to es_part_prune_results to maintain alignment with
1949 : : * es_part_prune_infos. This ensures that ExecInitPartitionExecPruning() can
1950 : : * use the same index to retrieve the pruning results.
1951 : : */
1952 : : void
319 amitlan@postgresql.o 1953 : 288701 : ExecDoInitialPruning(EState *estate)
1954 : : {
1955 : : ListCell *lc;
1956 : :
1957 [ + + + + : 289102 : foreach(lc, estate->es_part_prune_infos)
+ + ]
1958 : : {
1959 : 401 : PartitionPruneInfo *pruneinfo = lfirst_node(PartitionPruneInfo, lc);
1960 : : PartitionPruneState *prunestate;
1961 : 401 : Bitmapset *validsubplans = NULL;
312 1962 : 401 : Bitmapset *all_leafpart_rtis = NULL;
1963 : 401 : Bitmapset *validsubplan_rtis = NULL;
1964 : :
1965 : : /* Create and save the PartitionPruneState. */
1966 : 401 : prunestate = CreatePartitionPruneState(estate, pruneinfo,
1967 : : &all_leafpart_rtis);
319 1968 : 401 : estate->es_part_prune_states = lappend(estate->es_part_prune_states,
1969 : : prunestate);
1970 : :
1971 : : /*
1972 : : * Perform initial pruning steps, if any, and save the result
1973 : : * bitmapset or NULL as described in the header comment.
1974 : : */
1975 [ + + ]: 401 : if (prunestate->do_initial_prune)
312 1976 : 224 : validsubplans = ExecFindMatchingSubPlans(prunestate, true,
1977 : : &validsubplan_rtis);
1978 : : else
1979 : 177 : validsubplan_rtis = all_leafpart_rtis;
1980 : :
1981 : 401 : estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
1982 : : validsubplan_rtis);
319 1983 : 401 : estate->es_part_prune_results = lappend(estate->es_part_prune_results,
1984 : : validsubplans);
1985 : : }
1986 : 288701 : }
1987 : :
1988 : : /*
1989 : : * ExecInitPartitionExecPruning
1990 : : * Initialize the data structures needed for runtime "exec" partition
1991 : : * pruning and return the result of initial pruning, if available.
1992 : : *
1993 : : * 'relids' identifies the relation to which both the parent plan and the
1994 : : * PartitionPruneInfo given by 'part_prune_index' belong.
1995 : : *
1996 : : * On return, *initially_valid_subplans is assigned the set of indexes of
1997 : : * child subplans that must be initialized along with the parent plan node.
1998 : : * Initial pruning would have been performed by ExecDoInitialPruning(), if
1999 : : * necessary, and the bitmapset of surviving subplans' indexes would have
2000 : : * been stored as the part_prune_index'th element of
2001 : : * EState.es_part_prune_results.
2002 : : *
2003 : : * If subplans were indeed pruned during initial pruning, the subplan_map
2004 : : * arrays in the returned PartitionPruneState are re-sequenced to exclude those
2005 : : * subplans, but only if the maps will be needed for subsequent execution
2006 : : * pruning passes.
2007 : : */
2008 : : PartitionPruneState *
2009 : 403 : ExecInitPartitionExecPruning(PlanState *planstate,
2010 : : int n_total_subplans,
2011 : : int part_prune_index,
2012 : : Bitmapset *relids,
2013 : : Bitmapset **initially_valid_subplans)
2014 : : {
2015 : : PartitionPruneState *prunestate;
1351 alvherre@alvh.no-ip. 2016 : 403 : EState *estate = planstate->state;
2017 : : PartitionPruneInfo *pruneinfo;
2018 : :
2019 : : /* Obtain the pruneinfo we need. */
320 amitlan@postgresql.o 2020 : 403 : pruneinfo = list_nth_node(PartitionPruneInfo, estate->es_part_prune_infos,
2021 : : part_prune_index);
2022 : :
2023 : : /* Its relids better match the plan node's or the planner messed up. */
2024 [ - + ]: 403 : if (!bms_equal(relids, pruneinfo->relids))
320 amitlan@postgresql.o 2025 [ # # ]:UBC 0 : elog(ERROR, "wrong pruneinfo with relids=%s found at part_prune_index=%d contained in plan node with relids=%s",
2026 : : bmsToString(pruneinfo->relids), part_prune_index,
2027 : : bmsToString(relids));
2028 : :
2029 : : /*
2030 : : * The PartitionPruneState would have been created by
2031 : : * ExecDoInitialPruning() and stored as the part_prune_index'th element of
2032 : : * EState.es_part_prune_states.
2033 : : */
319 amitlan@postgresql.o 2034 :CBC 403 : prunestate = list_nth(estate->es_part_prune_states, part_prune_index);
2035 [ - + ]: 403 : Assert(prunestate != NULL);
2036 : :
2037 : : /* Use the result of initial pruning done by ExecDoInitialPruning(). */
1351 alvherre@alvh.no-ip. 2038 [ + + ]: 403 : if (prunestate->do_initial_prune)
319 amitlan@postgresql.o 2039 : 225 : *initially_valid_subplans = list_nth_node(Bitmapset,
2040 : : estate->es_part_prune_results,
2041 : : part_prune_index);
2042 : : else
2043 : : {
2044 : : /* No pruning, so we'll need to initialize all subplans */
1351 alvherre@alvh.no-ip. 2045 [ - + ]: 178 : Assert(n_total_subplans > 0);
2046 : 178 : *initially_valid_subplans = bms_add_range(NULL, 0,
2047 : : n_total_subplans - 1);
2048 : : }
2049 : :
2050 : : /*
2051 : : * The exec pruning state must also be initialized, if needed, before it
2052 : : * can be used for pruning during execution.
2053 : : *
2054 : : * This also re-sequences subplan indexes contained in prunestate to
2055 : : * account for any that were removed due to initial pruning; refer to the
2056 : : * condition in InitExecPartitionPruneContexts() that is used to determine
2057 : : * whether to do this. If no exec pruning needs to be done, we would thus
2058 : : * leave the maps to be in an invalid state, but that's ok since that data
2059 : : * won't be consulted again (cf initial Assert in
2060 : : * ExecFindMatchingSubPlans).
2061 : : */
319 amitlan@postgresql.o 2062 [ + + ]: 403 : if (prunestate->do_exec_prune)
2063 : 199 : InitExecPartitionPruneContexts(prunestate, planstate,
2064 : : *initially_valid_subplans,
2065 : : n_total_subplans);
2066 : :
1351 alvherre@alvh.no-ip. 2067 : 403 : return prunestate;
2068 : : }
2069 : :
2070 : : /*
2071 : : * CreatePartitionPruneState
2072 : : * Build the data structure required for calling ExecFindMatchingSubPlans
2073 : : *
2074 : : * This includes PartitionPruneContexts (stored in each
2075 : : * PartitionedRelPruningData corresponding to a PartitionedRelPruneInfo),
2076 : : * which hold the ExprStates needed to evaluate pruning expressions, and
2077 : : * mapping arrays to convert partition indexes from the pruning logic
2078 : : * into subplan indexes in the parent plan node's list of child subplans.
2079 : : *
2080 : : * 'pruneinfo' is a PartitionPruneInfo as generated by
2081 : : * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
2082 : : * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
2083 : : * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
2084 : : * for each PartitionedRelPruneInfo appearing in that sublist. This two-level
2085 : : * system is needed to keep from confusing the different hierarchies when a
2086 : : * UNION ALL contains multiple partitioned tables as children. The data
2087 : : * stored in each PartitionedRelPruningData can be re-used each time we
2088 : : * re-evaluate which partitions match the pruning steps provided in each
2089 : : * PartitionedRelPruneInfo.
2090 : : *
2091 : : * Note that only the PartitionPruneContexts for initial pruning are
2092 : : * initialized here. Those required for exec pruning are initialized later in
2093 : : * ExecInitPartitionExecPruning(), as they depend on the availability of the
2094 : : * parent plan node's PlanState.
2095 : : *
2096 : : * If initial pruning steps are to be skipped (e.g., during EXPLAIN
2097 : : * (GENERIC_PLAN)), *all_leafpart_rtis will be populated with the RT indexes of
2098 : : * all leaf partitions whose scanning subnode is included in the parent plan
2099 : : * node's list of child plans. The caller must add these RT indexes to
2100 : : * estate->es_unpruned_relids.
2101 : : */
2102 : : static PartitionPruneState *
312 amitlan@postgresql.o 2103 : 401 : CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
2104 : : Bitmapset **all_leafpart_rtis)
2105 : : {
2106 : : PartitionPruneState *prunestate;
2107 : : int n_part_hierarchies;
2108 : : ListCell *lc;
2109 : : int i;
2110 : :
2111 : : /*
2112 : : * Expression context that will be used by partkey_datum_from_expr() to
2113 : : * evaluate expressions for comparison against partition bounds.
2114 : : */
319 2115 : 401 : ExprContext *econtext = CreateExprContext(estate);
2116 : :
2117 : : /* For data reading, executor always includes detached partitions */
2476 rhaas@postgresql.org 2118 [ + + ]: 401 : if (estate->es_partition_directory == NULL)
2119 : 377 : estate->es_partition_directory =
1699 alvherre@alvh.no-ip. 2120 : 377 : CreatePartitionDirectory(estate->es_query_cxt, false);
2121 : :
1351 2122 : 401 : n_part_hierarchies = list_length(pruneinfo->prune_infos);
2694 tgl@sss.pgh.pa.us 2123 [ - + ]: 401 : Assert(n_part_hierarchies > 0);
2124 : :
2125 : : /*
2126 : : * Allocate the data structure
2127 : : */
2128 : : prunestate = (PartitionPruneState *)
2129 : 401 : palloc(offsetof(PartitionPruneState, partprunedata) +
2130 : : sizeof(PartitionPruningData *) * n_part_hierarchies);
2131 : :
2132 : : /* Save ExprContext for use during InitExecPartitionPruneContexts(). */
319 amitlan@postgresql.o 2133 : 401 : prunestate->econtext = econtext;
2694 tgl@sss.pgh.pa.us 2134 : 401 : prunestate->execparamids = NULL;
2135 : : /* other_subplans can change at runtime, so we need our own copy */
1351 alvherre@alvh.no-ip. 2136 : 401 : prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
2746 tgl@sss.pgh.pa.us 2137 : 401 : prunestate->do_initial_prune = false; /* may be set below */
2138 : 401 : prunestate->do_exec_prune = false; /* may be set below */
2694 2139 : 401 : prunestate->num_partprunedata = n_part_hierarchies;
2140 : :
2141 : : /*
2142 : : * Create a short-term memory context which we'll use when making calls to
2143 : : * the partition pruning functions. This avoids possible memory leaks,
2144 : : * since the pruning functions call comparison functions that aren't under
2145 : : * our control.
2146 : : */
2810 alvherre@alvh.no-ip. 2147 : 401 : prunestate->prune_context =
2148 : 401 : AllocSetContextCreate(CurrentMemoryContext,
2149 : : "Partition Prune",
2150 : : ALLOCSET_DEFAULT_SIZES);
2151 : :
2152 : 401 : i = 0;
1351 2153 [ + - + + : 814 : foreach(lc, pruneinfo->prune_infos)
+ + ]
2154 : : {
2694 tgl@sss.pgh.pa.us 2155 : 413 : List *partrelpruneinfos = lfirst_node(List, lc);
2156 : 413 : int npartrelpruneinfos = list_length(partrelpruneinfos);
2157 : : PartitionPruningData *prunedata;
2158 : : ListCell *lc2;
2159 : : int j;
2160 : :
2161 : : prunedata = (PartitionPruningData *)
2162 : 413 : palloc(offsetof(PartitionPruningData, partrelprunedata) +
2163 : 413 : npartrelpruneinfos * sizeof(PartitionedRelPruningData));
2164 : 413 : prunestate->partprunedata[i] = prunedata;
2165 : 413 : prunedata->num_partrelprunedata = npartrelpruneinfos;
2166 : :
2167 : 413 : j = 0;
2168 [ + - + + : 1231 : foreach(lc2, partrelpruneinfos)
+ + ]
2169 : : {
2170 : 818 : PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2);
2171 : 818 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2172 : : Relation partrel;
2173 : : PartitionDesc partdesc;
2174 : : PartitionKey partkey;
2175 : :
2176 : : /*
2177 : : * We can rely on the copies of the partitioned table's partition
2178 : : * key and partition descriptor appearing in its relcache entry,
2179 : : * because that entry will be held open and locked for the
2180 : : * duration of this executor run.
2181 : : */
272 amitlan@postgresql.o 2182 : 818 : partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
2183 : :
2184 : : /* Remember for InitExecPartitionPruneContexts(). */
319 2185 : 818 : pprune->partrel = partrel;
2186 : :
2630 tgl@sss.pgh.pa.us 2187 : 818 : partkey = RelationGetPartitionKey(partrel);
2476 rhaas@postgresql.org 2188 : 818 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2189 : : partrel);
2190 : :
2191 : : /*
2192 : : * Initialize the subplan_map and subpart_map.
2193 : : *
2194 : : * The set of partitions that exist now might not be the same that
2195 : : * existed when the plan was made. The normal case is that it is;
2196 : : * optimize for that case with a quick comparison, and just copy
2197 : : * the subplan_map and make subpart_map, leafpart_rti_map point to
2198 : : * the ones in PruneInfo.
2199 : : *
2200 : : * For the case where they aren't identical, we could have more
2201 : : * partitions on either side; or even exactly the same number of
2202 : : * them on both but the set of OIDs doesn't match fully. Handle
2203 : : * this by creating new subplan_map and subpart_map arrays that
2204 : : * corresponds to the ones in the PruneInfo where the new
2205 : : * partition descriptor's OIDs match. Any that don't match can be
2206 : : * set to -1, as if they were pruned. By construction, both
2207 : : * arrays are in partition bounds order.
2208 : : */
2405 tgl@sss.pgh.pa.us 2209 : 818 : pprune->nparts = partdesc->nparts;
5 michael@paquier.xyz 2210 :GNC 818 : pprune->subplan_map = palloc_array(int, partdesc->nparts);
2211 : :
538 alvherre@alvh.no-ip. 2212 [ + + ]:CBC 818 : if (partdesc->nparts == pinfo->nparts &&
2213 : 817 : memcmp(partdesc->oids, pinfo->relid_map,
2214 [ + + ]: 817 : sizeof(int) * partdesc->nparts) == 0)
2215 : : {
2476 rhaas@postgresql.org 2216 : 756 : pprune->subpart_map = pinfo->subpart_map;
312 amitlan@postgresql.o 2217 : 756 : pprune->leafpart_rti_map = pinfo->leafpart_rti_map;
2476 rhaas@postgresql.org 2218 : 756 : memcpy(pprune->subplan_map, pinfo->subplan_map,
2219 : 756 : sizeof(int) * pinfo->nparts);
2220 : : }
2221 : : else
2222 : : {
2400 tgl@sss.pgh.pa.us 2223 : 62 : int pd_idx = 0;
2224 : : int pp_idx;
2225 : :
2226 : : /*
2227 : : * When the partition arrays are not identical, there could be
2228 : : * some new ones but it's also possible that one was removed;
2229 : : * we cope with both situations by walking the arrays and
2230 : : * discarding those that don't match.
2231 : : *
2232 : : * If the number of partitions on both sides match, it's still
2233 : : * possible that one partition has been detached and another
2234 : : * attached. Cope with that by creating a map that skips any
2235 : : * mismatches.
2236 : : */
5 michael@paquier.xyz 2237 :GNC 62 : pprune->subpart_map = palloc_array(int, partdesc->nparts);
2238 : 62 : pprune->leafpart_rti_map = palloc_array(int, partdesc->nparts);
2239 : :
1959 tgl@sss.pgh.pa.us 2240 [ + + ]:CBC 264 : for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
2241 : : {
2242 : : /* Skip any InvalidOid relid_map entries */
2243 [ + + ]: 312 : while (pd_idx < pinfo->nparts &&
2244 [ + + ]: 252 : !OidIsValid(pinfo->relid_map[pd_idx]))
2245 : 110 : pd_idx++;
2246 : :
538 alvherre@alvh.no-ip. 2247 : 202 : recheck:
1959 tgl@sss.pgh.pa.us 2248 [ + + ]: 202 : if (pd_idx < pinfo->nparts &&
2249 [ + + ]: 142 : pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2250 : : {
2251 : : /* match... */
2476 rhaas@postgresql.org 2252 : 91 : pprune->subplan_map[pp_idx] =
2253 : 91 : pinfo->subplan_map[pd_idx];
2254 : 91 : pprune->subpart_map[pp_idx] =
1959 tgl@sss.pgh.pa.us 2255 : 91 : pinfo->subpart_map[pd_idx];
312 amitlan@postgresql.o 2256 : 91 : pprune->leafpart_rti_map[pp_idx] =
2257 : 91 : pinfo->leafpart_rti_map[pd_idx];
1959 tgl@sss.pgh.pa.us 2258 : 91 : pd_idx++;
538 alvherre@alvh.no-ip. 2259 : 91 : continue;
2260 : : }
2261 : :
2262 : : /*
2263 : : * There isn't an exact match in the corresponding
2264 : : * positions of both arrays. Peek ahead in
2265 : : * pinfo->relid_map to see if we have a match for the
2266 : : * current partition in partdesc. Normally if a match
2267 : : * exists it's just one element ahead, and it means the
2268 : : * planner saw one extra partition that we no longer see
2269 : : * now (its concurrent detach finished just in between);
2270 : : * so we skip that one by updating pd_idx to the new
2271 : : * location and jumping above. We can then continue to
2272 : : * match the rest of the elements after skipping the OID
2273 : : * with no match; no future matches are tried for the
2274 : : * element that was skipped, because we know the arrays to
2275 : : * be in the same order.
2276 : : *
2277 : : * If we don't see a match anywhere in the rest of the
2278 : : * pinfo->relid_map array, that means we see an element
2279 : : * now that the planner didn't see, so mark that one as
2280 : : * pruned and move on.
2281 : : */
2282 [ + + ]: 144 : for (int pd_idx2 = pd_idx + 1; pd_idx2 < pinfo->nparts; pd_idx2++)
2283 : : {
2284 [ - + ]: 33 : if (pd_idx2 >= pinfo->nparts)
538 alvherre@alvh.no-ip. 2285 :UBC 0 : break;
538 alvherre@alvh.no-ip. 2286 [ - + ]:CBC 33 : if (pinfo->relid_map[pd_idx2] == partdesc->oids[pp_idx])
2287 : : {
538 alvherre@alvh.no-ip. 2288 :UBC 0 : pd_idx = pd_idx2;
2289 : 0 : goto recheck;
2290 : : }
2291 : : }
2292 : :
538 alvherre@alvh.no-ip. 2293 :CBC 111 : pprune->subpart_map[pp_idx] = -1;
2294 : 111 : pprune->subplan_map[pp_idx] = -1;
312 amitlan@postgresql.o 2295 : 111 : pprune->leafpart_rti_map[pp_idx] = 0;
2296 : : }
2297 : : }
2298 : :
2299 : : /* present_parts is also subject to later modification */
2405 tgl@sss.pgh.pa.us 2300 : 818 : pprune->present_parts = bms_copy(pinfo->present_parts);
2301 : :
2302 : : /*
2303 : : * Only initial_context is initialized here. exec_context is
2304 : : * initialized during ExecInitPartitionExecPruning() when the
2305 : : * parent plan's PlanState is available.
2306 : : *
2307 : : * Note that we must skip execution-time (both "init" and "exec")
2308 : : * partition pruning in EXPLAIN (GENERIC_PLAN), since parameter
2309 : : * values may be missing.
2310 : : */
2311 : 818 : pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
998 2312 [ + + ]: 818 : if (pinfo->initial_pruning_steps &&
2313 [ + + ]: 278 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2314 : : {
1351 alvherre@alvh.no-ip. 2315 : 275 : InitPartitionPruneContext(&pprune->initial_context,
2316 : : pprune->initial_pruning_steps,
2317 : : partdesc, partkey, NULL,
2318 : : econtext);
2319 : : /* Record whether initial pruning is needed at any level */
2405 tgl@sss.pgh.pa.us 2320 : 275 : prunestate->do_initial_prune = true;
2321 : : }
2322 : 818 : pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
998 2323 [ + + ]: 818 : if (pinfo->exec_pruning_steps &&
2324 [ + - ]: 255 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2325 : : {
2326 : : /* Record whether exec pruning is needed at any level */
2405 2327 : 255 : prunestate->do_exec_prune = true;
2328 : : }
2329 : :
2330 : : /*
2331 : : * Accumulate the IDs of all PARAM_EXEC Params affecting the
2332 : : * partitioning decisions at this plan node.
2333 : : */
2694 2334 : 1636 : prunestate->execparamids = bms_add_members(prunestate->execparamids,
2335 : 818 : pinfo->execparamids);
2336 : :
2337 : : /*
2338 : : * Return all leaf partition indexes if we're skipping pruning in
2339 : : * the EXPLAIN (GENERIC_PLAN) case.
2340 : : */
312 amitlan@postgresql.o 2341 [ + + + + ]: 818 : if (pinfo->initial_pruning_steps && !prunestate->do_initial_prune)
2342 : : {
2343 : 3 : int part_index = -1;
2344 : :
2345 : 9 : while ((part_index = bms_next_member(pprune->present_parts,
2346 [ + + ]: 9 : part_index)) >= 0)
2347 : : {
2348 : 6 : Index rtindex = pprune->leafpart_rti_map[part_index];
2349 : :
2350 [ + - ]: 6 : if (rtindex)
2351 : 6 : *all_leafpart_rtis = bms_add_member(*all_leafpart_rtis,
2352 : : rtindex);
2353 : : }
2354 : : }
2355 : :
2694 tgl@sss.pgh.pa.us 2356 : 818 : j++;
2357 : : }
2810 alvherre@alvh.no-ip. 2358 : 413 : i++;
2359 : : }
2360 : :
2361 : 401 : return prunestate;
2362 : : }
2363 : :
2364 : : /*
2365 : : * Initialize a PartitionPruneContext for the given list of pruning steps.
2366 : : */
2367 : : static void
1351 2368 : 531 : InitPartitionPruneContext(PartitionPruneContext *context,
2369 : : List *pruning_steps,
2370 : : PartitionDesc partdesc,
2371 : : PartitionKey partkey,
2372 : : PlanState *planstate,
2373 : : ExprContext *econtext)
2374 : : {
2375 : : int n_steps;
2376 : : int partnatts;
2377 : : ListCell *lc;
2378 : :
2405 tgl@sss.pgh.pa.us 2379 : 531 : n_steps = list_length(pruning_steps);
2380 : :
2381 : 531 : context->strategy = partkey->strategy;
2382 : 531 : context->partnatts = partnatts = partkey->partnatts;
2383 : 531 : context->nparts = partdesc->nparts;
2384 : 531 : context->boundinfo = partdesc->boundinfo;
2385 : 531 : context->partcollation = partkey->partcollation;
2386 : 531 : context->partsupfunc = partkey->partsupfunc;
2387 : :
2388 : : /* We'll look up type-specific support functions as needed */
5 michael@paquier.xyz 2389 :GNC 531 : context->stepcmpfuncs = palloc0_array(FmgrInfo, n_steps * partnatts);
2390 : :
2405 tgl@sss.pgh.pa.us 2391 :CBC 531 : context->ppccontext = CurrentMemoryContext;
2392 : 531 : context->planstate = planstate;
1351 alvherre@alvh.no-ip. 2393 : 531 : context->exprcontext = econtext;
2394 : :
2395 : : /* Initialize expression state for each expression we need */
5 michael@paquier.xyz 2396 :GNC 531 : context->exprstates = palloc0_array(ExprState *, n_steps * partnatts);
2405 tgl@sss.pgh.pa.us 2397 [ + - + + :CBC 1393 : foreach(lc, pruning_steps)
+ + ]
2398 : : {
2399 : 862 : PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc);
795 drowley@postgresql.o 2400 : 862 : ListCell *lc2 = list_head(step->exprs);
2401 : : int keyno;
2402 : :
2403 : : /* not needed for other step kinds */
2405 tgl@sss.pgh.pa.us 2404 [ + + ]: 862 : if (!IsA(step, PartitionPruneStepOp))
2405 : 143 : continue;
2406 : :
2407 [ - + ]: 719 : Assert(list_length(step->exprs) <= partnatts);
2408 : :
795 drowley@postgresql.o 2409 [ + + ]: 1513 : for (keyno = 0; keyno < partnatts; keyno++)
2410 : : {
2411 [ + + ]: 794 : if (bms_is_member(keyno, step->nullkeys))
2412 : 3 : continue;
2413 : :
2414 [ + + ]: 791 : if (lc2 != NULL)
2415 : : {
2416 : 743 : Expr *expr = lfirst(lc2);
2417 : :
2418 : : /* not needed for Consts */
2419 [ + + ]: 743 : if (!IsA(expr, Const))
2420 : : {
2421 : 696 : int stateidx = PruneCxtStateIdx(partnatts,
2422 : : step->step.step_id,
2423 : : keyno);
2424 : :
2425 : : /*
2426 : : * When planstate is NULL, pruning_steps is known not to
2427 : : * contain any expressions that depend on the parent plan.
2428 : : * Information of any available EXTERN parameters must be
2429 : : * passed explicitly in that case, which the caller must
2430 : : * have made available via econtext.
2431 : : */
2432 [ + + ]: 696 : if (planstate == NULL)
2433 : 407 : context->exprstates[stateidx] =
2434 : 407 : ExecInitExprWithParams(expr,
2435 : : econtext->ecxt_param_list_info);
2436 : : else
2437 : 289 : context->exprstates[stateidx] =
2438 : 289 : ExecInitExpr(expr, context->planstate);
2439 : : }
2440 : 743 : lc2 = lnext(step->exprs, lc2);
2441 : : }
2442 : : }
2443 : : }
2405 tgl@sss.pgh.pa.us 2444 : 531 : }
2445 : :
2446 : : /*
2447 : : * InitExecPartitionPruneContexts
2448 : : * Initialize exec pruning contexts deferred by CreatePartitionPruneState()
2449 : : *
2450 : : * This function finalizes exec pruning setup for a PartitionPruneState by
2451 : : * initializing contexts for pruning steps that require the parent plan's
2452 : : * PlanState. It iterates over PartitionPruningData entries and sets up the
2453 : : * necessary execution contexts for pruning during query execution.
2454 : : *
2455 : : * Also fix the mapping of partition indexes to subplan indexes contained in
2456 : : * prunestate by considering the new list of subplans that survived initial
2457 : : * pruning.
2458 : : *
2459 : : * Current values of the indexes present in PartitionPruneState count all the
2460 : : * subplans that would be present before initial pruning was done. If initial
2461 : : * pruning got rid of some of the subplans, any subsequent pruning passes will
2462 : : * be looking at a different set of target subplans to choose from than those
2463 : : * in the pre-initial-pruning set, so the maps in PartitionPruneState
2464 : : * containing those indexes must be updated to reflect the new indexes of
2465 : : * subplans in the post-initial-pruning set.
2466 : : */
2467 : : static void
319 amitlan@postgresql.o 2468 : 199 : InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
2469 : : PlanState *parent_plan,
2470 : : Bitmapset *initially_valid_subplans,
2471 : : int n_total_subplans)
2472 : : {
2473 : : EState *estate;
2474 : 199 : int *new_subplan_indexes = NULL;
2475 : : Bitmapset *new_other_subplans;
2476 : : int i;
2477 : : int newidx;
2478 : 199 : bool fix_subplan_map = false;
2479 : :
2480 [ - + ]: 199 : Assert(prunestate->do_exec_prune);
2481 [ - + ]: 199 : Assert(parent_plan != NULL);
2482 : 199 : estate = parent_plan->state;
2483 : :
2484 : : /*
2485 : : * No need to fix subplans maps if initial pruning didn't eliminate any
2486 : : * subplans.
2487 : : */
2488 [ + + ]: 199 : if (bms_num_members(initially_valid_subplans) < n_total_subplans)
2489 : : {
2490 : 24 : fix_subplan_map = true;
2491 : :
2492 : : /*
2493 : : * First we must build a temporary array which maps old subplan
2494 : : * indexes to new ones. For convenience of initialization, we use
2495 : : * 1-based indexes in this array and leave pruned items as 0.
2496 : : */
5 michael@paquier.xyz 2497 :GNC 24 : new_subplan_indexes = palloc0_array(int, n_total_subplans);
319 amitlan@postgresql.o 2498 :CBC 24 : newidx = 1;
2499 : 24 : i = -1;
2500 [ + + ]: 93 : while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2501 : : {
2502 [ - + ]: 69 : Assert(i < n_total_subplans);
2503 : 69 : new_subplan_indexes[i] = newidx++;
2504 : : }
2505 : : }
2506 : :
2507 : : /*
2508 : : * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2509 : : * subplan indexes. We must also recompute its present_parts bitmap.
2510 : : */
1351 alvherre@alvh.no-ip. 2511 [ + + ]: 410 : for (i = 0; i < prunestate->num_partprunedata; i++)
2512 : : {
2513 : 211 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2514 : : int j;
2515 : :
2516 : : /*
2517 : : * Within each hierarchy, we perform this loop in back-to-front order
2518 : : * so that we determine present_parts for the lowest-level partitioned
2519 : : * tables first. This way we can tell whether a sub-partitioned
2520 : : * table's partitions were entirely pruned so we can exclude it from
2521 : : * the current level's present_parts.
2522 : : */
2523 [ + + ]: 650 : for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2524 : : {
2525 : 439 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2526 : 439 : int nparts = pprune->nparts;
2527 : : int k;
2528 : :
2529 : : /* Initialize PartitionPruneContext for exec pruning, if needed. */
319 amitlan@postgresql.o 2530 [ + + ]: 439 : if (pprune->exec_pruning_steps != NIL)
2531 : : {
2532 : : PartitionKey partkey;
2533 : : PartitionDesc partdesc;
2534 : :
2535 : : /*
2536 : : * See the comment in CreatePartitionPruneState() regarding
2537 : : * the usage of partdesc and partkey.
2538 : : */
2539 : 256 : partkey = RelationGetPartitionKey(pprune->partrel);
2540 : 256 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2541 : : pprune->partrel);
2542 : :
2543 : 256 : InitPartitionPruneContext(&pprune->exec_context,
2544 : : pprune->exec_pruning_steps,
2545 : : partdesc, partkey, parent_plan,
2546 : : prunestate->econtext);
2547 : : }
2548 : :
2549 [ + + ]: 439 : if (!fix_subplan_map)
2550 : 343 : continue;
2551 : :
2552 : : /* We just rebuild present_parts from scratch */
1351 alvherre@alvh.no-ip. 2553 : 96 : bms_free(pprune->present_parts);
2554 : 96 : pprune->present_parts = NULL;
2555 : :
2556 [ + + ]: 354 : for (k = 0; k < nparts; k++)
2557 : : {
2558 : 258 : int oldidx = pprune->subplan_map[k];
2559 : : int subidx;
2560 : :
2561 : : /*
2562 : : * If this partition existed as a subplan then change the old
2563 : : * subplan index to the new subplan index. The new index may
2564 : : * become -1 if the partition was pruned above, or it may just
2565 : : * come earlier in the subplan list due to some subplans being
2566 : : * removed earlier in the list. If it's a subpartition, add
2567 : : * it to present_parts unless it's entirely pruned.
2568 : : */
2569 [ + + ]: 258 : if (oldidx >= 0)
2570 : : {
2571 [ - + ]: 198 : Assert(oldidx < n_total_subplans);
2572 : 198 : pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2573 : :
2574 [ + + ]: 198 : if (new_subplan_indexes[oldidx] > 0)
2575 : 57 : pprune->present_parts =
2576 : 57 : bms_add_member(pprune->present_parts, k);
2577 : : }
2578 [ + - ]: 60 : else if ((subidx = pprune->subpart_map[k]) >= 0)
2579 : : {
2580 : : PartitionedRelPruningData *subprune;
2581 : :
2582 : 60 : subprune = &prunedata->partrelprunedata[subidx];
2583 : :
2584 [ + + ]: 60 : if (!bms_is_empty(subprune->present_parts))
2585 : 24 : pprune->present_parts =
2586 : 24 : bms_add_member(pprune->present_parts, k);
2587 : : }
2588 : : }
2589 : : }
2590 : : }
2591 : :
2592 : : /*
2593 : : * If we fixed subplan maps, we must also recompute the other_subplans
2594 : : * set, since indexes in it may change.
2595 : : */
319 amitlan@postgresql.o 2596 [ + + ]: 199 : if (fix_subplan_map)
2597 : : {
2598 : 24 : new_other_subplans = NULL;
2599 : 24 : i = -1;
2600 [ + + ]: 36 : while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2601 : 12 : new_other_subplans = bms_add_member(new_other_subplans,
2602 : 12 : new_subplan_indexes[i] - 1);
2603 : :
2604 : 24 : bms_free(prunestate->other_subplans);
2605 : 24 : prunestate->other_subplans = new_other_subplans;
2606 : :
2607 : 24 : pfree(new_subplan_indexes);
2608 : : }
2810 alvherre@alvh.no-ip. 2609 : 199 : }
2610 : :
2611 : : /*
2612 : : * ExecFindMatchingSubPlans
2613 : : * Determine which subplans match the pruning steps detailed in
2614 : : * 'prunestate' for the current comparison expression values.
2615 : : *
2616 : : * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated. This
2617 : : * differentiates the initial executor-time pruning step from later
2618 : : * runtime pruning.
2619 : : *
2620 : : * The caller must pass a non-NULL validsubplan_rtis during initial pruning
2621 : : * to collect the RT indexes of leaf partitions whose subnodes will be
2622 : : * executed. These RT indexes are later added to EState.es_unpruned_relids.
2623 : : */
2624 : : Bitmapset *
1351 2625 : 1949 : ExecFindMatchingSubPlans(PartitionPruneState *prunestate,
2626 : : bool initial_prune,
2627 : : Bitmapset **validsubplan_rtis)
2628 : : {
2810 2629 : 1949 : Bitmapset *result = NULL;
2630 : : MemoryContext oldcontext;
2631 : : int i;
2632 : :
2633 : : /*
2634 : : * Either we're here on the initial prune done during pruning
2635 : : * initialization, or we're at a point where PARAM_EXEC Params can be
2636 : : * evaluated *and* there are steps in which to do so.
2637 : : */
1351 2638 [ + + - + ]: 1949 : Assert(initial_prune || prunestate->do_exec_prune);
312 amitlan@postgresql.o 2639 [ + + - + ]: 1949 : Assert(validsubplan_rtis != NULL || !initial_prune);
2640 : :
2641 : : /*
2642 : : * Switch to a temp context to avoid leaking memory in the executor's
2643 : : * query-lifespan memory context.
2644 : : */
2810 alvherre@alvh.no-ip. 2645 : 1949 : oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2646 : :
2647 : : /*
2648 : : * For each hierarchy, do the pruning tests, and add nondeletable
2649 : : * subplans' indexes to "result".
2650 : : */
2694 tgl@sss.pgh.pa.us 2651 [ + + ]: 3919 : for (i = 0; i < prunestate->num_partprunedata; i++)
2652 : : {
1351 alvherre@alvh.no-ip. 2653 : 1970 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2654 : : PartitionedRelPruningData *pprune;
2655 : :
2656 : : /*
2657 : : * We pass the zeroth item, belonging to the root table of the
2658 : : * hierarchy, and find_matching_subplans_recurse() takes care of
2659 : : * recursing to other (lower-level) parents as needed.
2660 : : */
2694 tgl@sss.pgh.pa.us 2661 : 1970 : pprune = &prunedata->partrelprunedata[0];
1351 alvherre@alvh.no-ip. 2662 : 1970 : find_matching_subplans_recurse(prunedata, pprune, initial_prune,
2663 : : &result, validsubplan_rtis);
2664 : :
2665 : : /*
2666 : : * Expression eval may have used space in ExprContext too. Avoid
2667 : : * accessing exec_context during initial pruning, as it is not valid
2668 : : * at that stage.
2669 : : */
319 amitlan@postgresql.o 2670 [ + + + + ]: 1970 : if (!initial_prune && pprune->exec_pruning_steps)
1351 alvherre@alvh.no-ip. 2671 : 1698 : ResetExprContext(pprune->exec_context.exprcontext);
2672 : : }
2673 : :
2674 : : /* Add in any subplans that partition pruning didn't account for */
2588 tgl@sss.pgh.pa.us 2675 : 1949 : result = bms_add_members(result, prunestate->other_subplans);
2676 : :
2810 alvherre@alvh.no-ip. 2677 : 1949 : MemoryContextSwitchTo(oldcontext);
2678 : :
2679 : : /* Copy result out of the temp context before we reset it */
2680 : 1949 : result = bms_copy(result);
312 amitlan@postgresql.o 2681 [ + + ]: 1949 : if (validsubplan_rtis)
2682 : 224 : *validsubplan_rtis = bms_copy(*validsubplan_rtis);
2683 : :
2810 alvherre@alvh.no-ip. 2684 : 1949 : MemoryContextReset(prunestate->prune_context);
2685 : :
2686 : 1949 : return result;
2687 : : }
2688 : :
2689 : : /*
2690 : : * find_matching_subplans_recurse
2691 : : * Recursive worker function for ExecFindMatchingSubPlans
2692 : : *
2693 : : * Adds valid (non-prunable) subplan IDs to *validsubplans. If
2694 : : * *validsubplan_rtis is non-NULL, it also adds the RT indexes of their
2695 : : * corresponding partitions, but only if they are leaf partitions.
2696 : : */
2697 : : static void
2694 tgl@sss.pgh.pa.us 2698 : 2177 : find_matching_subplans_recurse(PartitionPruningData *prunedata,
2699 : : PartitionedRelPruningData *pprune,
2700 : : bool initial_prune,
2701 : : Bitmapset **validsubplans,
2702 : : Bitmapset **validsubplan_rtis)
2703 : : {
2704 : : Bitmapset *partset;
2705 : : int i;
2706 : :
2707 : : /* Guard against stack overflow due to overly deep partition hierarchy. */
2810 alvherre@alvh.no-ip. 2708 : 2177 : check_stack_depth();
2709 : :
2710 : : /*
2711 : : * Prune as appropriate, if we have pruning steps matching the current
2712 : : * execution context. Otherwise just include all partitions at this
2713 : : * level.
2714 : : */
2405 tgl@sss.pgh.pa.us 2715 [ + + + + ]: 2177 : if (initial_prune && pprune->initial_pruning_steps)
2716 : 266 : partset = get_matching_partitions(&pprune->initial_context,
2717 : : pprune->initial_pruning_steps);
2718 [ + + + + ]: 1911 : else if (!initial_prune && pprune->exec_pruning_steps)
2719 : 1740 : partset = get_matching_partitions(&pprune->exec_context,
2720 : : pprune->exec_pruning_steps);
2721 : : else
2810 alvherre@alvh.no-ip. 2722 : 171 : partset = pprune->present_parts;
2723 : :
2724 : : /* Translate partset into subplan indexes */
2725 : 2177 : i = -1;
2726 [ + + ]: 3082 : while ((i = bms_next_member(partset, i)) >= 0)
2727 : : {
2746 tgl@sss.pgh.pa.us 2728 [ + + ]: 905 : if (pprune->subplan_map[i] >= 0)
2729 : : {
2810 alvherre@alvh.no-ip. 2730 : 1394 : *validsubplans = bms_add_member(*validsubplans,
2746 tgl@sss.pgh.pa.us 2731 : 697 : pprune->subplan_map[i]);
2732 : :
2733 : : /*
2734 : : * Only report leaf partitions. Non-leaf partitions may appear
2735 : : * here when they use an unflattened Append or MergeAppend.
2736 : : */
294 amitlan@postgresql.o 2737 [ + + + + ]: 697 : if (validsubplan_rtis && pprune->leafpart_rti_map[i])
312 2738 : 337 : *validsubplan_rtis = bms_add_member(*validsubplan_rtis,
2739 : 337 : pprune->leafpart_rti_map[i]);
2740 : : }
2741 : : else
2742 : : {
2810 alvherre@alvh.no-ip. 2743 : 208 : int partidx = pprune->subpart_map[i];
2744 : :
2746 tgl@sss.pgh.pa.us 2745 [ + + ]: 208 : if (partidx >= 0)
2694 2746 : 207 : find_matching_subplans_recurse(prunedata,
2747 : : &prunedata->partrelprunedata[partidx],
2748 : : initial_prune, validsubplans,
2749 : : validsubplan_rtis);
2750 : : else
2751 : : {
2752 : : /*
2753 : : * We get here if the planner already pruned all the sub-
2754 : : * partitions for this partition. Silently ignore this
2755 : : * partition in this case. The end result is the same: we
2756 : : * would have pruned all partitions just the same, but we
2757 : : * don't have any pruning steps to execute to verify this.
2758 : : */
2759 : : }
2760 : : }
2761 : : }
2810 alvherre@alvh.no-ip. 2762 : 2177 : }
|