Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeModifyTable.c
4 : : * routines to handle ModifyTable nodes.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeModifyTable.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /* INTERFACE ROUTINES
16 : : * ExecInitModifyTable - initialize the ModifyTable node
17 : : * ExecModifyTable - retrieve the next tuple from the node
18 : : * ExecEndModifyTable - shut down the ModifyTable node
19 : : * ExecReScanModifyTable - rescan the ModifyTable node
20 : : *
21 : : * NOTES
22 : : * The ModifyTable node receives input from its outerPlan, which is
23 : : * the data to insert for INSERT cases, the changed columns' new
24 : : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : : * row-locating info for DELETE cases.
26 : : *
27 : : * The relation to modify can be an ordinary table, a foreign table, or a
28 : : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : : * targeted a view not in one of those two categories, earlier processing
31 : : * already pointed the ModifyTable result relation to an underlying
32 : : * relation of that other view. This node does process
33 : : * ri_WithCheckOptions, which may have expressions from those other,
34 : : * automatically updatable views.
35 : : *
36 : : * MERGE runs a join between the source relation and the target table.
37 : : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : : * is an outer join that might output tuples without a matching target
39 : : * tuple. In this case, any unmatched target tuples will have NULL
40 : : * row-locating info, and only INSERT can be run. But for matched target
41 : : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : : * SOURCE, all tuples produced by the join will include a matching target
44 : : * tuple, so all tuples contain row-locating info.
45 : : *
46 : : * If the query specifies RETURNING, then the ModifyTable returns a
47 : : * RETURNING tuple after completing each row insert, update, or delete.
48 : : * It must be called again to continue the operation. Without RETURNING,
49 : : * we just loop within the node until all the work is done, then
50 : : * return NULL. This avoids useless call/return overhead.
51 : : */
52 : :
53 : : #include "postgres.h"
54 : :
55 : : #include "access/htup_details.h"
56 : : #include "access/tableam.h"
57 : : #include "access/xact.h"
58 : : #include "commands/trigger.h"
59 : : #include "executor/execPartition.h"
60 : : #include "executor/executor.h"
61 : : #include "executor/nodeModifyTable.h"
62 : : #include "foreign/fdwapi.h"
63 : : #include "miscadmin.h"
64 : : #include "nodes/nodeFuncs.h"
65 : : #include "optimizer/optimizer.h"
66 : : #include "rewrite/rewriteHandler.h"
67 : : #include "rewrite/rewriteManip.h"
68 : : #include "storage/lmgr.h"
69 : : #include "utils/builtins.h"
70 : : #include "utils/datum.h"
71 : : #include "utils/injection_point.h"
72 : : #include "utils/rel.h"
73 : : #include "utils/snapmgr.h"
74 : :
75 : :
76 : : typedef struct MTTargetRelLookup
77 : : {
78 : : Oid relationOid; /* hash key, must be first */
79 : : int relationIndex; /* rel's index in resultRelInfo[] array */
80 : : } MTTargetRelLookup;
81 : :
82 : : /*
83 : : * Context struct for a ModifyTable operation, containing basic execution
84 : : * state and some output variables populated by ExecUpdateAct() and
85 : : * ExecDeleteAct() to report the result of their actions to callers.
86 : : */
87 : : typedef struct ModifyTableContext
88 : : {
89 : : /* Operation state */
90 : : ModifyTableState *mtstate;
91 : : EPQState *epqstate;
92 : : EState *estate;
93 : :
94 : : /*
95 : : * Slot containing tuple obtained from ModifyTable's subplan. Used to
96 : : * access "junk" columns that are not going to be stored.
97 : : */
98 : : TupleTableSlot *planSlot;
99 : :
100 : : /*
101 : : * Information about the changes that were made concurrently to a tuple
102 : : * being updated or deleted
103 : : */
104 : : TM_FailureData tmfd;
105 : :
106 : : /*
107 : : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
108 : : * clause that refers to OLD columns (converted to the root's tuple
109 : : * descriptor).
110 : : */
111 : : TupleTableSlot *cpDeletedSlot;
112 : :
113 : : /*
114 : : * The tuple projected by the INSERT's RETURNING clause, when doing a
115 : : * cross-partition UPDATE
116 : : */
117 : : TupleTableSlot *cpUpdateReturningSlot;
118 : : } ModifyTableContext;
119 : :
120 : : /*
121 : : * Context struct containing output data specific to UPDATE operations.
122 : : */
123 : : typedef struct UpdateContext
124 : : {
125 : : bool crossPartUpdate; /* was it a cross-partition update? */
126 : : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
127 : :
128 : : /*
129 : : * Lock mode to acquire on the latest tuple version before performing
130 : : * EvalPlanQual on it
131 : : */
132 : : LockTupleMode lockmode;
133 : : } UpdateContext;
134 : :
135 : :
136 : : static void ExecBatchInsert(ModifyTableState *mtstate,
137 : : ResultRelInfo *resultRelInfo,
138 : : TupleTableSlot **slots,
139 : : TupleTableSlot **planSlots,
140 : : int numSlots,
141 : : EState *estate,
142 : : bool canSetTag);
143 : : static void ExecPendingInserts(EState *estate);
144 : : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
145 : : ResultRelInfo *sourcePartInfo,
146 : : ResultRelInfo *destPartInfo,
147 : : ItemPointer tupleid,
148 : : TupleTableSlot *oldslot,
149 : : TupleTableSlot *newslot);
150 : : static bool ExecOnConflictUpdate(ModifyTableContext *context,
151 : : ResultRelInfo *resultRelInfo,
152 : : ItemPointer conflictTid,
153 : : TupleTableSlot *excludedSlot,
154 : : bool canSetTag,
155 : : TupleTableSlot **returning);
156 : : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
157 : : EState *estate,
158 : : PartitionTupleRouting *proute,
159 : : ResultRelInfo *targetRelInfo,
160 : : TupleTableSlot *slot,
161 : : ResultRelInfo **partRelInfo);
162 : :
163 : : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
164 : : ResultRelInfo *resultRelInfo,
165 : : ItemPointer tupleid,
166 : : HeapTuple oldtuple,
167 : : bool canSetTag);
168 : : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
169 : : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
170 : : ResultRelInfo *resultRelInfo,
171 : : ItemPointer tupleid,
172 : : HeapTuple oldtuple,
173 : : bool canSetTag,
174 : : bool *matched);
175 : : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
176 : : ResultRelInfo *resultRelInfo,
177 : : bool canSetTag);
178 : :
179 : :
180 : : /*
181 : : * Verify that the tuples to be produced by INSERT match the
182 : : * target relation's rowtype
183 : : *
184 : : * We do this to guard against stale plans. If plan invalidation is
185 : : * functioning properly then we should never get a failure here, but better
186 : : * safe than sorry. Note that this is called after we have obtained lock
187 : : * on the target rel, so the rowtype can't change underneath us.
188 : : *
189 : : * The plan output is represented by its targetlist, because that makes
190 : : * handling the dropped-column case easier.
191 : : *
192 : : * We used to use this for UPDATE as well, but now the equivalent checks
193 : : * are done in ExecBuildUpdateProjection.
194 : : */
195 : : static void
5911 tgl@sss.pgh.pa.us 196 :CBC 41976 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
197 : : {
198 : 41976 : TupleDesc resultDesc = RelationGetDescr(resultRel);
199 : 41976 : int attno = 0;
200 : : ListCell *lc;
201 : :
202 [ + + + + : 132269 : foreach(lc, targetList)
+ + ]
203 : : {
204 : 90293 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
205 : : Form_pg_attribute attr;
206 : :
1721 207 [ - + ]: 90293 : Assert(!tle->resjunk); /* caller removed junk items already */
208 : :
5911 209 [ - + ]: 90293 : if (attno >= resultDesc->natts)
5911 tgl@sss.pgh.pa.us 210 [ # # ]:UBC 0 : ereport(ERROR,
211 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
212 : : errmsg("table row type and query-specified row type do not match"),
213 : : errdetail("Query has too many columns.")));
3040 andres@anarazel.de 214 :CBC 90293 : attr = TupleDescAttr(resultDesc, attno);
215 : 90293 : attno++;
216 : :
217 : : /*
218 : : * Special cases here should match planner's expand_insert_targetlist.
219 : : */
245 tgl@sss.pgh.pa.us 220 [ + + ]: 90293 : if (attr->attisdropped)
221 : : {
222 : : /*
223 : : * For a dropped column, we can't check atttypid (it's likely 0).
224 : : * In any case the planner has most likely inserted an INT4 null.
225 : : * What we insist on is just *some* NULL constant.
226 : : */
227 [ + - ]: 314 : if (!IsA(tle->expr, Const) ||
228 [ - + ]: 314 : !((Const *) tle->expr)->constisnull)
5911 tgl@sss.pgh.pa.us 229 [ # # ]:UBC 0 : ereport(ERROR,
230 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
231 : : errmsg("table row type and query-specified row type do not match"),
232 : : errdetail("Query provides a value for a dropped column at ordinal position %d.",
233 : : attno)));
234 : : }
245 tgl@sss.pgh.pa.us 235 [ + + ]:CBC 89979 : else if (attr->attgenerated)
236 : : {
237 : : /*
238 : : * For a generated column, the planner will have inserted a null
239 : : * of the column's base type (to avoid possibly failing on domain
240 : : * not-null constraints). It doesn't seem worth insisting on that
241 : : * exact type though, since a null value is type-independent. As
242 : : * above, just insist on *some* NULL constant.
243 : : */
5911 244 [ + - ]: 610 : if (!IsA(tle->expr, Const) ||
245 [ - + ]: 610 : !((Const *) tle->expr)->constisnull)
5911 tgl@sss.pgh.pa.us 246 [ # # ]:UBC 0 : ereport(ERROR,
247 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
248 : : errmsg("table row type and query-specified row type do not match"),
249 : : errdetail("Query provides a value for a generated column at ordinal position %d.",
250 : : attno)));
251 : : }
252 : : else
253 : : {
254 : : /* Normal case: demand type match */
245 tgl@sss.pgh.pa.us 255 [ - + ]:CBC 89369 : if (exprType((Node *) tle->expr) != attr->atttypid)
245 tgl@sss.pgh.pa.us 256 [ # # ]:UBC 0 : ereport(ERROR,
257 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
258 : : errmsg("table row type and query-specified row type do not match"),
259 : : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
260 : : format_type_be(attr->atttypid),
261 : : attno,
262 : : format_type_be(exprType((Node *) tle->expr)))));
263 : : }
264 : : }
5911 tgl@sss.pgh.pa.us 265 [ - + ]:CBC 41976 : if (attno != resultDesc->natts)
5911 tgl@sss.pgh.pa.us 266 [ # # ]:UBC 0 : ereport(ERROR,
267 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
268 : : errmsg("table row type and query-specified row type do not match"),
269 : : errdetail("Query has too few columns.")));
5911 tgl@sss.pgh.pa.us 270 :CBC 41976 : }
271 : :
272 : : /*
273 : : * ExecProcessReturning --- evaluate a RETURNING list
274 : : *
275 : : * context: context for the ModifyTable operation
276 : : * resultRelInfo: current result rel
277 : : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
278 : : * oldSlot: slot holding old tuple deleted or updated
279 : : * newSlot: slot holding new tuple inserted or updated
280 : : * planSlot: slot holding tuple returned by top subplan node
281 : : *
282 : : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
283 : : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
284 : : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
285 : : *
286 : : * Returns a slot holding the result tuple
287 : : */
288 : : static TupleTableSlot *
334 dean.a.rasheed@gmail 289 : 4073 : ExecProcessReturning(ModifyTableContext *context,
290 : : ResultRelInfo *resultRelInfo,
291 : : CmdType cmdType,
292 : : TupleTableSlot *oldSlot,
293 : : TupleTableSlot *newSlot,
294 : : TupleTableSlot *planSlot)
295 : : {
296 : 4073 : EState *estate = context->estate;
3560 rhaas@postgresql.org 297 : 4073 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
5911 tgl@sss.pgh.pa.us 298 : 4073 : ExprContext *econtext = projectReturning->pi_exprContext;
299 : :
300 : : /* Make tuple and any needed join variables available to ExecProject */
334 dean.a.rasheed@gmail 301 [ + + - ]: 4073 : switch (cmdType)
302 : : {
303 : 3352 : case CMD_INSERT:
304 : : case CMD_UPDATE:
305 : : /* return new tuple by default */
306 [ + + ]: 3352 : if (newSlot)
307 : 3124 : econtext->ecxt_scantuple = newSlot;
308 : 3352 : break;
309 : :
310 : 721 : case CMD_DELETE:
311 : : /* return old tuple by default */
312 [ + + ]: 721 : if (oldSlot)
313 : 602 : econtext->ecxt_scantuple = oldSlot;
314 : 721 : break;
315 : :
334 dean.a.rasheed@gmail 316 :UBC 0 : default:
317 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
318 : : }
5911 tgl@sss.pgh.pa.us 319 :CBC 4073 : econtext->ecxt_outertuple = planSlot;
320 : :
321 : : /* Make old/new tuples available to ExecProject, if required */
334 dean.a.rasheed@gmail 322 [ + + ]: 4073 : if (oldSlot)
323 : 1927 : econtext->ecxt_oldtuple = oldSlot;
324 [ + + ]: 2146 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
325 : 92 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
326 : : else
327 : 2054 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
328 : :
329 [ + + ]: 4073 : if (newSlot)
330 : 3124 : econtext->ecxt_newtuple = newSlot;
331 [ + + ]: 949 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
332 : 66 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
333 : : else
334 : 883 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
335 : :
336 : : /*
337 : : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
338 : : * information is required to evaluate ReturningExpr nodes and also in
339 : : * ExecEvalSysVar() and ExecEvalWholeRowVar().
340 : : */
341 [ + + ]: 4073 : if (oldSlot == NULL)
342 : 2146 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
343 : : else
344 : 1927 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
345 : :
346 [ + + ]: 4073 : if (newSlot == NULL)
347 : 949 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
348 : : else
349 : 3124 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
350 : :
351 : : /* Compute the RETURNING expressions */
3253 andres@anarazel.de 352 : 4073 : return ExecProject(projectReturning);
353 : : }
354 : :
355 : : /*
356 : : * ExecCheckTupleVisible -- verify tuple is visible
357 : : *
358 : : * It would not be consistent with guarantees of the higher isolation levels to
359 : : * proceed with avoiding insertion (taking speculative insertion's alternative
360 : : * path) on the basis of another tuple that is not visible to MVCC snapshot.
361 : : * Check for the need to raise a serialization failure, and do so as necessary.
362 : : */
363 : : static void
2460 364 : 2628 : ExecCheckTupleVisible(EState *estate,
365 : : Relation rel,
366 : : TupleTableSlot *slot)
367 : : {
3875 368 [ + + ]: 2628 : if (!IsolationUsesXactSnapshot())
369 : 2596 : return;
370 : :
2460 371 [ + + ]: 32 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
372 : : {
373 : : Datum xminDatum;
374 : : TransactionId xmin;
375 : : bool isnull;
376 : :
377 : 20 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
378 [ - + ]: 20 : Assert(!isnull);
379 : 20 : xmin = DatumGetTransactionId(xminDatum);
380 : :
381 : : /*
382 : : * We should not raise a serialization failure if the conflict is
383 : : * against a tuple inserted by our own transaction, even if it's not
384 : : * visible to our snapshot. (This would happen, for example, if
385 : : * conflicting keys are proposed for insertion in a single command.)
386 : : */
387 [ + + ]: 20 : if (!TransactionIdIsCurrentTransactionId(xmin))
3341 tgl@sss.pgh.pa.us 388 [ + - ]: 10 : ereport(ERROR,
389 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
390 : : errmsg("could not serialize access due to concurrent update")));
391 : : }
392 : : }
393 : :
394 : : /*
395 : : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
396 : : */
397 : : static void
3875 andres@anarazel.de 398 : 112 : ExecCheckTIDVisible(EState *estate,
399 : : ResultRelInfo *relinfo,
400 : : ItemPointer tid,
401 : : TupleTableSlot *tempSlot)
402 : : {
403 : 112 : Relation rel = relinfo->ri_RelationDesc;
404 : :
405 : : /* Redundantly check isolation level */
406 [ + + ]: 112 : if (!IsolationUsesXactSnapshot())
407 : 80 : return;
408 : :
2399 409 [ - + ]: 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
3875 andres@anarazel.de 410 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
2460 andres@anarazel.de 411 :CBC 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
412 : 22 : ExecClearTuple(tempSlot);
413 : : }
414 : :
415 : : /*
416 : : * Initialize generated columns handling for a tuple
417 : : *
418 : : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
419 : : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
420 : : * This is used only for stored generated columns.
421 : : *
422 : : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
423 : : * This is used by both stored and virtual generated columns.
424 : : *
425 : : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
426 : : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
427 : : * cross-partition UPDATEs, since a partition might be the target of both
428 : : * UPDATE and INSERT actions.
429 : : */
430 : : void
312 peter@eisentraut.org 431 : 29936 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
432 : : EState *estate,
433 : : CmdType cmdtype)
434 : : {
2453 435 : 29936 : Relation rel = resultRelInfo->ri_RelationDesc;
436 : 29936 : TupleDesc tupdesc = RelationGetDescr(rel);
437 : 29936 : int natts = tupdesc->natts;
438 : : ExprState **ri_GeneratedExprs;
439 : : int ri_NumGeneratedNeeded;
440 : : Bitmapset *updatedCols;
441 : : MemoryContext oldContext;
442 : :
443 : : /* Nothing to do if no generated columns */
312 444 [ + + + + : 29936 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
+ + ]
1076 tgl@sss.pgh.pa.us 445 : 29339 : return;
446 : :
447 : : /*
448 : : * In an UPDATE, we can skip computing any generated columns that do not
449 : : * depend on any UPDATE target column. But if there is a BEFORE ROW
450 : : * UPDATE trigger, we cannot skip because the trigger might change more
451 : : * columns.
452 : : */
453 [ + + ]: 597 : if (cmdtype == CMD_UPDATE &&
454 [ + + - + ]: 133 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
455 : 111 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
456 : : else
457 : 486 : updatedCols = NULL;
458 : :
459 : : /*
460 : : * Make sure these data structures are built in the per-query memory
461 : : * context so they'll survive throughout the query.
462 : : */
463 : 597 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
464 : :
1016 465 : 597 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
466 : 597 : ri_NumGeneratedNeeded = 0;
467 : :
1076 468 [ + + ]: 2431 : for (int i = 0; i < natts; i++)
469 : : {
312 peter@eisentraut.org 470 : 1837 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
471 : :
472 [ + + ]: 1837 : if (attgenerated)
473 : : {
474 : : Expr *expr;
475 : :
476 : : /* Fetch the GENERATED AS expression tree */
1076 tgl@sss.pgh.pa.us 477 : 639 : expr = (Expr *) build_column_default(rel, i + 1);
478 [ - + ]: 639 : if (expr == NULL)
1076 tgl@sss.pgh.pa.us 479 [ # # ]:UBC 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
480 : : i + 1, RelationGetRelationName(rel));
481 : :
482 : : /*
483 : : * If it's an update with a known set of update target columns,
484 : : * see if we can skip the computation.
485 : : */
1076 tgl@sss.pgh.pa.us 486 [ + + ]:CBC 639 : if (updatedCols)
487 : : {
488 : 118 : Bitmapset *attrs_used = NULL;
489 : :
490 : 118 : pull_varattnos((Node *) expr, 1, &attrs_used);
491 : :
492 [ + + ]: 118 : if (!bms_overlap(updatedCols, attrs_used))
493 : 12 : continue; /* need not update this column */
494 : : }
495 : :
496 : : /* No luck, so prepare the expression for execution */
312 peter@eisentraut.org 497 [ + + ]: 627 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
498 : : {
499 : 585 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
500 : 582 : ri_NumGeneratedNeeded++;
501 : : }
502 : :
503 : : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
1016 tgl@sss.pgh.pa.us 504 [ + + ]: 624 : if (cmdtype == CMD_UPDATE)
505 : 132 : resultRelInfo->ri_extraUpdatedCols =
506 : 132 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
507 : : i + 1 - FirstLowInvalidHeapAttributeNumber);
508 : : }
509 : : }
510 : :
312 peter@eisentraut.org 511 [ + + ]: 594 : if (ri_NumGeneratedNeeded == 0)
512 : : {
513 : : /* didn't need it after all */
514 : 21 : pfree(ri_GeneratedExprs);
515 : 21 : ri_GeneratedExprs = NULL;
516 : : }
517 : :
518 : : /* Save in appropriate set of fields */
1016 tgl@sss.pgh.pa.us 519 [ + + ]: 594 : if (cmdtype == CMD_UPDATE)
520 : : {
521 : : /* Don't call twice */
522 [ - + ]: 133 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
523 : :
524 : 133 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
525 : 133 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
526 : :
312 peter@eisentraut.org 527 : 133 : resultRelInfo->ri_extraUpdatedCols_valid = true;
528 : : }
529 : : else
530 : : {
531 : : /* Don't call twice */
1016 tgl@sss.pgh.pa.us 532 [ - + ]: 461 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
533 : :
534 : 461 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
535 : 461 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
536 : : }
537 : :
1076 538 : 594 : MemoryContextSwitchTo(oldContext);
539 : : }
540 : :
541 : : /*
542 : : * Compute stored generated columns for a tuple
543 : : */
544 : : void
545 : 814 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
546 : : EState *estate, TupleTableSlot *slot,
547 : : CmdType cmdtype)
548 : : {
549 : 814 : Relation rel = resultRelInfo->ri_RelationDesc;
550 : 814 : TupleDesc tupdesc = RelationGetDescr(rel);
551 : 814 : int natts = tupdesc->natts;
552 [ + + ]: 814 : ExprContext *econtext = GetPerTupleExprContext(estate);
553 : : ExprState **ri_GeneratedExprs;
554 : : MemoryContext oldContext;
555 : : Datum *values;
556 : : bool *nulls;
557 : :
558 : : /* We should not be called unless this is true */
559 [ + - - + ]: 814 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
560 : :
561 : : /*
562 : : * Initialize the expressions if we didn't already, and check whether we
563 : : * can exit early because nothing needs to be computed.
564 : : */
1016 565 [ + + ]: 814 : if (cmdtype == CMD_UPDATE)
566 : : {
567 [ + + ]: 140 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
312 peter@eisentraut.org 568 : 108 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
1016 tgl@sss.pgh.pa.us 569 [ + + ]: 140 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
570 : 9 : return;
571 : 131 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
572 : : }
573 : : else
574 : : {
575 [ + + ]: 674 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
312 peter@eisentraut.org 576 : 464 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
577 : : /* Early exit is impossible given the prior Assert */
1016 tgl@sss.pgh.pa.us 578 [ - + ]: 671 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
579 : 671 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
580 : : }
581 : :
2453 peter@eisentraut.org 582 [ + - ]: 802 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
583 : :
6 michael@paquier.xyz 584 :GNC 802 : values = palloc_array(Datum, natts);
585 : 802 : nulls = palloc_array(bool, natts);
586 : :
2407 peter@eisentraut.org 587 :CBC 802 : slot_getallattrs(slot);
588 : 802 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
589 : :
2453 590 [ + + ]: 3265 : for (int i = 0; i < natts; i++)
591 : : {
361 drowley@postgresql.o 592 : 2475 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
593 : :
1016 tgl@sss.pgh.pa.us 594 [ + + ]: 2475 : if (ri_GeneratedExprs[i])
595 : : {
596 : : Datum val;
597 : : bool isnull;
598 : :
361 drowley@postgresql.o 599 [ - + ]: 813 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
600 : :
2453 peter@eisentraut.org 601 : 813 : econtext->ecxt_scantuple = slot;
602 : :
1016 tgl@sss.pgh.pa.us 603 : 813 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
604 : :
605 : : /*
606 : : * We must make a copy of val as we have no guarantees about where
607 : : * memory for a pass-by-reference Datum is located.
608 : : */
2068 drowley@postgresql.o 609 [ + + ]: 801 : if (!isnull)
610 : 777 : val = datumCopy(val, attr->attbyval, attr->attlen);
611 : :
2453 peter@eisentraut.org 612 : 801 : values[i] = val;
613 : 801 : nulls[i] = isnull;
614 : : }
615 : : else
616 : : {
2407 617 [ + + ]: 1662 : if (!nulls[i])
618 : 1588 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
619 : : }
620 : : }
621 : :
622 : 790 : ExecClearTuple(slot);
623 : 790 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
624 : 790 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
625 : 790 : ExecStoreVirtualTuple(slot);
626 : 790 : ExecMaterializeSlot(slot);
627 : :
2453 628 : 790 : MemoryContextSwitchTo(oldContext);
629 : : }
630 : :
631 : : /*
632 : : * ExecInitInsertProjection
633 : : * Do one-time initialization of projection data for INSERT tuples.
634 : : *
635 : : * INSERT queries may need a projection to filter out junk attrs in the tlist.
636 : : *
637 : : * This is also a convenient place to verify that the
638 : : * output of an INSERT matches the target table.
639 : : */
640 : : static void
1715 tgl@sss.pgh.pa.us 641 : 41436 : ExecInitInsertProjection(ModifyTableState *mtstate,
642 : : ResultRelInfo *resultRelInfo)
643 : : {
644 : 41436 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
645 : 41436 : Plan *subplan = outerPlan(node);
646 : 41436 : EState *estate = mtstate->ps.state;
647 : 41436 : List *insertTargetList = NIL;
648 : 41436 : bool need_projection = false;
649 : : ListCell *l;
650 : :
651 : : /* Extract non-junk columns of the subplan's result tlist. */
652 [ + + + + : 130326 : foreach(l, subplan->targetlist)
+ + ]
653 : : {
654 : 88890 : TargetEntry *tle = (TargetEntry *) lfirst(l);
655 : :
656 [ + - ]: 88890 : if (!tle->resjunk)
657 : 88890 : insertTargetList = lappend(insertTargetList, tle);
658 : : else
1715 tgl@sss.pgh.pa.us 659 :UBC 0 : need_projection = true;
660 : : }
661 : :
662 : : /*
663 : : * The junk-free list must produce a tuple suitable for the result
664 : : * relation.
665 : : */
1715 tgl@sss.pgh.pa.us 666 :CBC 41436 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
667 : :
668 : : /* We'll need a slot matching the table's format. */
669 : 41436 : resultRelInfo->ri_newTupleSlot =
670 : 41436 : table_slot_create(resultRelInfo->ri_RelationDesc,
671 : : &estate->es_tupleTable);
672 : :
673 : : /* Build ProjectionInfo if needed (it probably isn't). */
674 [ - + ]: 41436 : if (need_projection)
675 : : {
1715 tgl@sss.pgh.pa.us 676 :UBC 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
677 : :
678 : : /* need an expression context to do the projection */
679 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
680 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
681 : :
682 : 0 : resultRelInfo->ri_projectNew =
683 : 0 : ExecBuildProjectionInfo(insertTargetList,
684 : : mtstate->ps.ps_ExprContext,
685 : : resultRelInfo->ri_newTupleSlot,
686 : : &mtstate->ps,
687 : : relDesc);
688 : : }
689 : :
1715 tgl@sss.pgh.pa.us 690 :CBC 41436 : resultRelInfo->ri_projectNewInfoValid = true;
691 : 41436 : }
692 : :
693 : : /*
694 : : * ExecInitUpdateProjection
695 : : * Do one-time initialization of projection data for UPDATE tuples.
696 : : *
697 : : * UPDATE always needs a projection, because (1) there's always some junk
698 : : * attrs, and (2) we may need to merge values of not-updated columns from
699 : : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
700 : : * the subplan contains only new values for the changed columns, plus row
701 : : * identity info in the junk attrs.
702 : : *
703 : : * This is "one-time" for any given result rel, but we might touch more than
704 : : * one result rel in the course of an inherited UPDATE, and each one needs
705 : : * its own projection due to possible column order variation.
706 : : *
707 : : * This is also a convenient place to verify that the output of an UPDATE
708 : : * matches the target table (ExecBuildUpdateProjection does that).
709 : : */
710 : : static void
711 : 7035 : ExecInitUpdateProjection(ModifyTableState *mtstate,
712 : : ResultRelInfo *resultRelInfo)
713 : : {
714 : 7035 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
715 : 7035 : Plan *subplan = outerPlan(node);
716 : 7035 : EState *estate = mtstate->ps.state;
717 : 7035 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
718 : : int whichrel;
719 : : List *updateColnos;
720 : :
721 : : /*
722 : : * Usually, mt_lastResultIndex matches the target rel. If it happens not
723 : : * to, we can get the index the hard way with an integer division.
724 : : */
725 : 7035 : whichrel = mtstate->mt_lastResultIndex;
726 [ - + ]: 7035 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
727 : : {
1715 tgl@sss.pgh.pa.us 728 :UBC 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
729 [ # # # # ]: 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
730 : : }
731 : :
312 amitlan@postgresql.o 732 :CBC 7035 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
733 : :
734 : : /*
735 : : * For UPDATE, we use the old tuple to fill up missing values in the tuple
736 : : * produced by the subplan to get the new tuple. We need two slots, both
737 : : * matching the table's desired format.
738 : : */
1715 tgl@sss.pgh.pa.us 739 : 7035 : resultRelInfo->ri_oldTupleSlot =
740 : 7035 : table_slot_create(resultRelInfo->ri_RelationDesc,
741 : : &estate->es_tupleTable);
742 : 7035 : resultRelInfo->ri_newTupleSlot =
743 : 7035 : table_slot_create(resultRelInfo->ri_RelationDesc,
744 : : &estate->es_tupleTable);
745 : :
746 : : /* need an expression context to do the projection */
747 [ + + ]: 7035 : if (mtstate->ps.ps_ExprContext == NULL)
748 : 6329 : ExecAssignExprContext(estate, &mtstate->ps);
749 : :
750 : 7035 : resultRelInfo->ri_projectNew =
751 : 7035 : ExecBuildUpdateProjection(subplan->targetlist,
752 : : false, /* subplan did the evaluation */
753 : : updateColnos,
754 : : relDesc,
755 : : mtstate->ps.ps_ExprContext,
756 : : resultRelInfo->ri_newTupleSlot,
757 : : &mtstate->ps);
758 : :
759 : 7035 : resultRelInfo->ri_projectNewInfoValid = true;
760 : 7035 : }
761 : :
762 : : /*
763 : : * ExecGetInsertNewTuple
764 : : * This prepares a "new" tuple ready to be inserted into given result
765 : : * relation, by removing any junk columns of the plan's output tuple
766 : : * and (if necessary) coercing the tuple to the right tuple format.
767 : : */
768 : : static TupleTableSlot *
1721 769 : 6180354 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
770 : : TupleTableSlot *planSlot)
771 : : {
772 : 6180354 : ProjectionInfo *newProj = relinfo->ri_projectNew;
773 : : ExprContext *econtext;
774 : :
775 : : /*
776 : : * If there's no projection to be done, just make sure the slot is of the
777 : : * right type for the target rel. If the planSlot is the right type we
778 : : * can use it as-is, else copy the data into ri_newTupleSlot.
779 : : */
780 [ + - ]: 6180354 : if (newProj == NULL)
781 : : {
782 [ + + ]: 6180354 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
783 : : {
784 : 5776633 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
785 : 5776633 : return relinfo->ri_newTupleSlot;
786 : : }
787 : : else
788 : 403721 : return planSlot;
789 : : }
790 : :
791 : : /*
792 : : * Else project; since the projection output slot is ri_newTupleSlot, this
793 : : * will also fix any slot-type problem.
794 : : *
795 : : * Note: currently, this is dead code, because INSERT cases don't receive
796 : : * any junk columns so there's never a projection to be done.
797 : : */
1721 tgl@sss.pgh.pa.us 798 :UBC 0 : econtext = newProj->pi_exprContext;
799 : 0 : econtext->ecxt_outertuple = planSlot;
800 : 0 : return ExecProject(newProj);
801 : : }
802 : :
803 : : /*
804 : : * ExecGetUpdateNewTuple
805 : : * This prepares a "new" tuple by combining an UPDATE subplan's output
806 : : * tuple (which contains values of changed columns) with unchanged
807 : : * columns taken from the old tuple.
808 : : *
809 : : * The subplan tuple might also contain junk columns, which are ignored.
810 : : * Note that the projection also ensures we have a slot of the right type.
811 : : */
812 : : TupleTableSlot *
1721 tgl@sss.pgh.pa.us 813 :CBC 160043 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
814 : : TupleTableSlot *planSlot,
815 : : TupleTableSlot *oldSlot)
816 : : {
1009 dean.a.rasheed@gmail 817 : 160043 : ProjectionInfo *newProj = relinfo->ri_projectNew;
818 : : ExprContext *econtext;
819 : :
820 : : /* Use a few extra Asserts to protect against outside callers */
1715 tgl@sss.pgh.pa.us 821 [ - + ]: 160043 : Assert(relinfo->ri_projectNewInfoValid);
1721 822 [ + - - + ]: 160043 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
823 [ + - - + ]: 160043 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
824 : :
825 : 160043 : econtext = newProj->pi_exprContext;
826 : 160043 : econtext->ecxt_outertuple = planSlot;
827 : 160043 : econtext->ecxt_scantuple = oldSlot;
828 : 160043 : return ExecProject(newProj);
829 : : }
830 : :
831 : : /* ----------------------------------------------------------------
832 : : * ExecInsert
833 : : *
834 : : * For INSERT, we have to insert the tuple into the target relation
835 : : * (or partition thereof) and insert appropriate tuples into the index
836 : : * relations.
837 : : *
838 : : * slot contains the new tuple value to be stored.
839 : : *
840 : : * Returns RETURNING result if any, otherwise NULL.
841 : : * *inserted_tuple is the tuple that's effectively inserted;
842 : : * *insert_destrel is the relation where it was inserted.
843 : : * These are only set on success.
844 : : *
845 : : * This may change the currently active tuple conversion map in
846 : : * mtstate->mt_transition_capture, so the callers must take care to
847 : : * save the previous value to avoid losing track of it.
848 : : * ----------------------------------------------------------------
849 : : */
850 : : static TupleTableSlot *
1370 alvherre@alvh.no-ip. 851 : 6181763 : ExecInsert(ModifyTableContext *context,
852 : : ResultRelInfo *resultRelInfo,
853 : : TupleTableSlot *slot,
854 : : bool canSetTag,
855 : : TupleTableSlot **inserted_tuple,
856 : : ResultRelInfo **insert_destrel)
857 : : {
858 : 6181763 : ModifyTableState *mtstate = context->mtstate;
859 : 6181763 : EState *estate = context->estate;
860 : : Relation resultRelationDesc;
5911 tgl@sss.pgh.pa.us 861 : 6181763 : List *recheckIndexes = NIL;
1370 alvherre@alvh.no-ip. 862 : 6181763 : TupleTableSlot *planSlot = context->planSlot;
3172 rhaas@postgresql.org 863 : 6181763 : TupleTableSlot *result = NULL;
864 : : TransitionCaptureState *ar_insert_trig_tcs;
2829 alvherre@alvh.no-ip. 865 : 6181763 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
866 : 6181763 : OnConflictAction onconflict = node->onConflictAction;
1889 heikki.linnakangas@i 867 : 6181763 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
868 : : MemoryContext oldContext;
869 : :
870 : : /*
871 : : * If the input result relation is a partitioned table, find the leaf
872 : : * partition to insert the tuple into.
873 : : */
874 [ + + ]: 6181763 : if (proute)
875 : : {
876 : : ResultRelInfo *partRelInfo;
877 : :
878 : 379349 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
879 : : resultRelInfo, slot,
880 : : &partRelInfo);
881 : 379238 : resultRelInfo = partRelInfo;
882 : : }
883 : :
884 : 6181652 : ExecMaterializeSlot(slot);
885 : :
5911 tgl@sss.pgh.pa.us 886 : 6181652 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
887 : :
888 : : /*
889 : : * Open the table's indexes, if we have not done so already, so that we
890 : : * can add new index entries for the inserted tuple.
891 : : */
1715 892 [ + + ]: 6181652 : if (resultRelationDesc->rd_rel->relhasindex &&
893 [ + + ]: 1461312 : resultRelInfo->ri_IndexRelationDescs == NULL)
894 : 14739 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
895 : :
896 : : /*
897 : : * BEFORE ROW INSERT Triggers.
898 : : *
899 : : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
900 : : * INSERT ... ON CONFLICT statement. We cannot check for constraint
901 : : * violations before firing these triggers, because they can change the
902 : : * values to insert. Also, they can run arbitrary user-defined code with
903 : : * side-effects that we can't cancel by just not inserting the tuple.
904 : : */
5911 905 [ + + ]: 6181652 : if (resultRelInfo->ri_TrigDesc &&
5546 906 [ + + ]: 37758 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
907 : : {
908 : : /* Flush any pending inserts, so rows are visible to the triggers */
1117 efujita@postgresql.o 909 [ + + ]: 1070 : if (estate->es_insert_pending_result_relations != NIL)
910 : 3 : ExecPendingInserts(estate);
911 : :
2485 andres@anarazel.de 912 [ + + ]: 1070 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
913 : 100 : return NULL; /* "do nothing" */
914 : : }
915 : :
916 : : /* INSTEAD OF ROW INSERT Triggers */
5546 tgl@sss.pgh.pa.us 917 [ + + ]: 6181503 : if (resultRelInfo->ri_TrigDesc &&
918 [ + + ]: 37609 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
919 : : {
2485 andres@anarazel.de 920 [ + + ]: 84 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
921 : 3 : return NULL; /* "do nothing" */
922 : : }
4664 tgl@sss.pgh.pa.us 923 [ + + ]: 6181419 : else if (resultRelInfo->ri_FdwRoutine)
924 : : {
925 : : /*
926 : : * GENERATED expressions might reference the tableoid column, so
927 : : * (re-)initialize tts_tableOid before evaluating them.
928 : : */
1670 929 : 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
930 : :
931 : : /*
932 : : * Compute stored generated columns
933 : : */
2453 peter@eisentraut.org 934 [ + + ]: 1010 : if (resultRelationDesc->rd_att->constr &&
935 [ + + ]: 179 : resultRelationDesc->rd_att->constr->has_generated_stored)
1889 heikki.linnakangas@i 936 : 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
937 : : CMD_INSERT);
938 : :
939 : : /*
940 : : * If the FDW supports batching, and batching is requested, accumulate
941 : : * rows and insert them in batches. Otherwise use the per-row inserts.
942 : : */
1791 tomas.vondra@postgre 943 [ + + ]: 1010 : if (resultRelInfo->ri_BatchSize > 1)
944 : : {
1117 efujita@postgresql.o 945 : 145 : bool flushed = false;
946 : :
947 : : /*
948 : : * When we've reached the desired batch size, perform the
949 : : * insertion.
950 : : */
1791 tomas.vondra@postgre 951 [ + + ]: 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
952 : : {
953 : 10 : ExecBatchInsert(mtstate, resultRelInfo,
954 : : resultRelInfo->ri_Slots,
955 : : resultRelInfo->ri_PlanSlots,
956 : : resultRelInfo->ri_NumSlots,
957 : : estate, canSetTag);
1117 efujita@postgresql.o 958 : 10 : flushed = true;
959 : : }
960 : :
1791 tomas.vondra@postgre 961 : 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
962 : :
963 [ + + ]: 145 : if (resultRelInfo->ri_Slots == NULL)
964 : : {
6 michael@paquier.xyz 965 :GNC 15 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
966 : 15 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
967 : : }
968 : :
969 : : /*
970 : : * Initialize the batch slots. We don't know how many slots will
971 : : * be needed, so we initialize them as the batch grows, and we
972 : : * keep them across batches. To mitigate an inefficiency in how
973 : : * resource owner handles objects with many references (as with
974 : : * many slots all referencing the same tuple descriptor) we copy
975 : : * the appropriate tuple descriptor for each slot.
976 : : */
1649 tomas.vondra@postgre 977 [ + + ]:CBC 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
978 : : {
1632 andrew@dunslane.net 979 : 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
980 : : TupleDesc plan_tdesc =
942 tgl@sss.pgh.pa.us 981 : 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
982 : :
1649 tomas.vondra@postgre 983 : 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
984 : 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
985 : :
986 : 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
1587 987 : 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
988 : :
989 : : /* remember how many batch slots we initialized */
1649 990 : 72 : resultRelInfo->ri_NumSlotsInitialized++;
991 : : }
992 : :
1644 993 : 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
994 : : slot);
995 : :
996 : 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
997 : : planSlot);
998 : :
999 : : /*
1000 : : * If these are the first tuples stored in the buffers, add the
1001 : : * target rel and the mtstate to the
1002 : : * es_insert_pending_result_relations and
1003 : : * es_insert_pending_modifytables lists respectively, except in
1004 : : * the case where flushing was done above, in which case they
1005 : : * would already have been added to the lists, so no need to do
1006 : : * this.
1007 : : */
1117 efujita@postgresql.o 1008 [ + + + + ]: 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1009 : : {
1010 [ - + ]: 19 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1011 : : resultRelInfo));
1012 : 19 : estate->es_insert_pending_result_relations =
1013 : 19 : lappend(estate->es_insert_pending_result_relations,
1014 : : resultRelInfo);
1104 1015 : 19 : estate->es_insert_pending_modifytables =
1016 : 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1017 : : }
1117 1018 [ - + ]: 145 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1019 : : resultRelInfo));
1020 : :
1791 tomas.vondra@postgre 1021 : 145 : resultRelInfo->ri_NumSlots++;
1022 : :
1023 : 145 : MemoryContextSwitchTo(oldContext);
1024 : :
1025 : 145 : return NULL;
1026 : : }
1027 : :
1028 : : /*
1029 : : * insert into foreign table: let the FDW do it
1030 : : */
4664 tgl@sss.pgh.pa.us 1031 : 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1032 : : resultRelInfo,
1033 : : slot,
1034 : : planSlot);
1035 : :
1036 [ + + ]: 862 : if (slot == NULL) /* "do nothing" */
1037 : 2 : return NULL;
1038 : :
1039 : : /*
1040 : : * AFTER ROW Triggers or RETURNING expressions might reference the
1041 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1042 : : * them. (This covers the case where the FDW replaced the slot.)
1043 : : */
2485 andres@anarazel.de 1044 : 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1045 : : }
1046 : : else
1047 : : {
1048 : : WCOKind wco_kind;
1049 : :
1050 : : /*
1051 : : * Constraints and GENERATED expressions might reference the tableoid
1052 : : * column, so (re-)initialize tts_tableOid before evaluating them.
1053 : : */
1054 : 6180409 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1055 : :
1056 : : /*
1057 : : * Compute stored generated columns
1058 : : */
2453 peter@eisentraut.org 1059 [ + + ]: 6180409 : if (resultRelationDesc->rd_att->constr &&
1060 [ + + ]: 1874929 : resultRelationDesc->rd_att->constr->has_generated_stored)
1889 heikki.linnakangas@i 1061 : 649 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1062 : : CMD_INSERT);
1063 : :
1064 : : /*
1065 : : * Check any RLS WITH CHECK policies.
1066 : : *
1067 : : * Normally we should check INSERT policies. But if the insert is the
1068 : : * result of a partition key update that moved the tuple to a new
1069 : : * partition, we should instead check UPDATE policies, because we are
1070 : : * executing policies defined on the target table, and not those
1071 : : * defined on the child partitions.
1072 : : *
1073 : : * If we're running MERGE, we refer to the action that we're executing
1074 : : * to know if we're doing an INSERT or UPDATE to a partition table.
1075 : : */
1359 alvherre@alvh.no-ip. 1076 [ + + ]: 6180394 : if (mtstate->operation == CMD_UPDATE)
1077 : 399 : wco_kind = WCO_RLS_UPDATE_CHECK;
1078 [ + + ]: 6179995 : else if (mtstate->operation == CMD_MERGE)
639 dean.a.rasheed@gmail 1079 : 890 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1359 alvherre@alvh.no-ip. 1080 [ + + ]: 890 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1081 : : else
1082 : 6179105 : wco_kind = WCO_RLS_INSERT_CHECK;
1083 : :
1084 : : /*
1085 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1086 : : * we are looking for at this point.
1087 : : */
3889 sfrost@snowman.net 1088 [ + + ]: 6180394 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2888 rhaas@postgresql.org 1089 : 333 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1090 : :
1091 : : /*
1092 : : * Check the constraints of the tuple.
1093 : : */
2745 alvherre@alvh.no-ip. 1094 [ + + ]: 6180298 : if (resultRelationDesc->rd_att->constr)
1095 : 1874863 : ExecConstraints(resultRelInfo, slot, estate);
1096 : :
1097 : : /*
1098 : : * Also check the tuple against the partition constraint, if there is
1099 : : * one; except that if we got here via tuple-routing, we don't need to
1100 : : * if there's no BR trigger defined on the partition.
1101 : : */
1917 tgl@sss.pgh.pa.us 1102 [ + + ]: 6179939 : if (resultRelationDesc->rd_rel->relispartition &&
1772 heikki.linnakangas@i 1103 [ + + ]: 380368 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
2745 alvherre@alvh.no-ip. 1104 [ + + ]: 378935 : (resultRelInfo->ri_TrigDesc &&
1105 [ + + ]: 832 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1106 : 1537 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1107 : :
3875 andres@anarazel.de 1108 [ + + + - ]: 6179855 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1109 : 2074 : {
1110 : : /* Perform a speculative insertion. */
1111 : : uint32 specToken;
1112 : : ItemPointerData conflictTid;
1113 : : ItemPointerData invalidItemPtr;
1114 : : bool specConflict;
1115 : : List *arbiterIndexes;
1116 : :
483 akapila@postgresql.o 1117 : 4800 : ItemPointerSetInvalid(&invalidItemPtr);
2822 alvherre@alvh.no-ip. 1118 : 4800 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1119 : :
1120 : : /*
1121 : : * Do a non-conclusive check for conflicts first.
1122 : : *
1123 : : * We're not holding any locks yet, so this doesn't guarantee that
1124 : : * the later insert won't conflict. But it avoids leaving behind
1125 : : * a lot of canceled speculative insertions, if you run a lot of
1126 : : * INSERT ON CONFLICT statements that do conflict.
1127 : : *
1128 : : * We loop back here if we find a conflict below, either during
1129 : : * the pre-check, or when we re-check after inserting the tuple
1130 : : * speculatively. Better allow interrupts in case some bug makes
1131 : : * this an infinite loop.
1132 : : */
3875 andres@anarazel.de 1133 : 5 : vlock:
1230 tgl@sss.pgh.pa.us 1134 [ - + ]: 4805 : CHECK_FOR_INTERRUPTS();
3875 andres@anarazel.de 1135 : 4805 : specConflict = false;
1889 heikki.linnakangas@i 1136 [ + + ]: 4805 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1137 : : &conflictTid, &invalidItemPtr,
1138 : : arbiterIndexes))
1139 : : {
1140 : : /* committed conflict tuple found */
3875 andres@anarazel.de 1141 [ + + ]: 2720 : if (onconflict == ONCONFLICT_UPDATE)
1142 : : {
1143 : : /*
1144 : : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1145 : : * part. Be prepared to retry if the UPDATE fails because
1146 : : * of another concurrent UPDATE/DELETE to the conflict
1147 : : * tuple.
1148 : : */
1149 : 2608 : TupleTableSlot *returning = NULL;
1150 : :
1370 alvherre@alvh.no-ip. 1151 [ + - ]: 2608 : if (ExecOnConflictUpdate(context, resultRelInfo,
1152 : : &conflictTid, slot, canSetTag,
1153 : : &returning))
1154 : : {
2807 1155 [ - + ]: 2569 : InstrCountTuples2(&mtstate->ps, 1);
3875 andres@anarazel.de 1156 : 2569 : return returning;
1157 : : }
1158 : : else
3875 andres@anarazel.de 1159 :UBC 0 : goto vlock;
1160 : : }
1161 : : else
1162 : : {
1163 : : /*
1164 : : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1165 : : * verify that the tuple is visible to the executor's MVCC
1166 : : * snapshot at higher isolation levels.
1167 : : *
1168 : : * Using ExecGetReturningSlot() to store the tuple for the
1169 : : * recheck isn't that pretty, but we can't trivially use
1170 : : * the input slot, because it might not be of a compatible
1171 : : * type. As there's no conflicting usage of
1172 : : * ExecGetReturningSlot() in the DO NOTHING case...
1173 : : */
3875 andres@anarazel.de 1174 [ - + ]:CBC 112 : Assert(onconflict == ONCONFLICT_NOTHING);
2460 1175 : 112 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1176 : : ExecGetReturningSlot(estate, resultRelInfo));
2807 alvherre@alvh.no-ip. 1177 [ - + ]: 102 : InstrCountTuples2(&mtstate->ps, 1);
3875 andres@anarazel.de 1178 : 102 : return NULL;
1179 : : }
1180 : : }
1181 : :
1182 : : /*
1183 : : * Before we start insertion proper, acquire our "speculative
1184 : : * insertion lock". Others can use that to wait for us to decide
1185 : : * if we're going to go ahead with the insertion, instead of
1186 : : * waiting for the whole transaction to complete.
1187 : : */
1188 : : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1189 : 2082 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1190 : :
1191 : : /* insert the tuple, with the speculative token */
2399 1192 : 2082 : table_tuple_insert_speculative(resultRelationDesc, slot,
1193 : : estate->es_output_cid,
1194 : : 0,
1195 : : NULL,
1196 : : specToken);
1197 : :
1198 : : /* insert index entries for tuple */
1889 heikki.linnakangas@i 1199 : 2082 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1200 : : slot, estate, false, true,
1201 : : &specConflict,
1202 : : arbiterIndexes,
1203 : : false);
1204 : :
1205 : : /* adjust the tuple's state accordingly */
2399 andres@anarazel.de 1206 : 2079 : table_tuple_complete_speculative(resultRelationDesc, slot,
1207 : 2079 : specToken, !specConflict);
1208 : :
1209 : : /*
1210 : : * Wake up anyone waiting for our decision. They will re-check
1211 : : * the tuple, see that it's no longer speculative, and wait on our
1212 : : * XID as if this was a regularly inserted tuple all along. Or if
1213 : : * we killed the tuple, they will see it's dead, and proceed as if
1214 : : * the tuple never existed.
1215 : : */
3875 1216 : 2079 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1217 : :
1218 : : /*
1219 : : * If there was a conflict, start from the beginning. We'll do
1220 : : * the pre-check again, which will now find the conflicting tuple
1221 : : * (unless it aborts before we get there).
1222 : : */
1223 [ + + ]: 2079 : if (specConflict)
1224 : : {
1225 : 5 : list_free(recheckIndexes);
1226 : 5 : goto vlock;
1227 : : }
1228 : :
1229 : : /* Since there was no insertion conflict, we're done */
1230 : : }
1231 : : else
1232 : : {
1233 : : /* insert the tuple normally */
614 akorotkov@postgresql 1234 : 6175055 : table_tuple_insert(resultRelationDesc, slot,
1235 : : estate->es_output_cid,
1236 : : 0, NULL);
1237 : :
1238 : : /* insert index entries for tuple */
1239 [ + + ]: 6175043 : if (resultRelInfo->ri_NumIndices > 0)
1889 heikki.linnakangas@i 1240 : 1456228 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1241 : : slot, estate, false,
1242 : : false, NULL, NIL,
1243 : : false);
1244 : : }
1245 : : }
1246 : :
5408 tgl@sss.pgh.pa.us 1247 [ + + ]: 6177760 : if (canSetTag)
1248 : 6177165 : (estate->es_processed)++;
1249 : :
1250 : : /*
1251 : : * If this insert is the result of a partition key update that moved the
1252 : : * tuple to a new partition, put this row into the transition NEW TABLE,
1253 : : * if there is one. We need to do this separately for DELETE and INSERT
1254 : : * because they happen on different tables.
1255 : : */
2888 rhaas@postgresql.org 1256 : 6177760 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1257 [ + + + + ]: 6177760 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1258 [ + + ]: 27 : && mtstate->mt_transition_capture->tcs_update_new_table)
1259 : : {
1367 alvherre@alvh.no-ip. 1260 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1261 : : NULL, NULL,
1262 : : NULL,
1263 : : NULL,
1264 : : slot,
1265 : : NULL,
1266 : 24 : mtstate->mt_transition_capture,
1267 : : false);
1268 : :
1269 : : /*
1270 : : * We've already captured the NEW TABLE row, so make sure any AR
1271 : : * INSERT trigger fired below doesn't capture it again.
1272 : : */
2888 rhaas@postgresql.org 1273 : 24 : ar_insert_trig_tcs = NULL;
1274 : : }
1275 : :
1276 : : /* AFTER ROW INSERT Triggers */
2485 andres@anarazel.de 1277 : 6177760 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1278 : : ar_insert_trig_tcs);
1279 : :
5798 tgl@sss.pgh.pa.us 1280 : 6177759 : list_free(recheckIndexes);
1281 : :
1282 : : /*
1283 : : * Check any WITH CHECK OPTION constraints from parent views. We are
1284 : : * required to do this after testing all constraints and uniqueness
1285 : : * violations per the SQL spec, so we do it after actually inserting the
1286 : : * record into the heap and all indexes.
1287 : : *
1288 : : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1289 : : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1290 : : *
1291 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1292 : : * are looking for at this point.
1293 : : */
4534 sfrost@snowman.net 1294 [ + + ]: 6177759 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3889 1295 : 218 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1296 : :
1297 : : /* Process RETURNING if present */
5911 tgl@sss.pgh.pa.us 1298 [ + + ]: 6177686 : if (resultRelInfo->ri_projectReturning)
1299 : : {
334 dean.a.rasheed@gmail 1300 : 1823 : TupleTableSlot *oldSlot = NULL;
1301 : :
1302 : : /*
1303 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1304 : : * refers to any OLD columns, ExecDelete() will have saved the tuple
1305 : : * deleted from the original partition, which we must use here to
1306 : : * compute the OLD column values. Otherwise, all OLD column values
1307 : : * will be NULL.
1308 : : */
1309 [ + + ]: 1823 : if (context->cpDeletedSlot)
1310 : : {
1311 : : TupleConversionMap *tupconv_map;
1312 : :
1313 : : /*
1314 : : * Convert the OLD tuple to the new partition's format/slot, if
1315 : : * needed. Note that ExecDelete() already converted it to the
1316 : : * root's partition's format/slot.
1317 : : */
1318 : 24 : oldSlot = context->cpDeletedSlot;
1319 : 24 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1320 [ + + ]: 24 : if (tupconv_map != NULL)
1321 : : {
1322 : 8 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1323 : : oldSlot,
1324 : : ExecGetReturningSlot(estate,
1325 : : resultRelInfo));
1326 : :
1327 : 8 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1328 : 8 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1329 : : }
1330 : : }
1331 : :
1332 : 1823 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1333 : : oldSlot, slot, planSlot);
1334 : :
1335 : : /*
1336 : : * For a cross-partition UPDATE, release the old tuple, first making
1337 : : * sure that the result slot has a local copy of any pass-by-reference
1338 : : * values.
1339 : : */
1340 [ + + ]: 1817 : if (context->cpDeletedSlot)
1341 : : {
1342 : 24 : ExecMaterializeSlot(result);
1343 : 24 : ExecClearTuple(oldSlot);
1344 [ + + ]: 24 : if (context->cpDeletedSlot != oldSlot)
1345 : 8 : ExecClearTuple(context->cpDeletedSlot);
1346 : 24 : context->cpDeletedSlot = NULL;
1347 : : }
1348 : : }
1349 : :
1367 alvherre@alvh.no-ip. 1350 [ + + ]: 6177680 : if (inserted_tuple)
1351 : 414 : *inserted_tuple = slot;
1352 [ + + ]: 6177680 : if (insert_destrel)
1353 : 414 : *insert_destrel = resultRelInfo;
1354 : :
3253 rhaas@postgresql.org 1355 : 6177680 : return result;
1356 : : }
1357 : :
1358 : : /* ----------------------------------------------------------------
1359 : : * ExecBatchInsert
1360 : : *
1361 : : * Insert multiple tuples in an efficient way.
1362 : : * Currently, this handles inserting into a foreign table without
1363 : : * RETURNING clause.
1364 : : * ----------------------------------------------------------------
1365 : : */
1366 : : static void
1791 tomas.vondra@postgre 1367 : 29 : ExecBatchInsert(ModifyTableState *mtstate,
1368 : : ResultRelInfo *resultRelInfo,
1369 : : TupleTableSlot **slots,
1370 : : TupleTableSlot **planSlots,
1371 : : int numSlots,
1372 : : EState *estate,
1373 : : bool canSetTag)
1374 : : {
1375 : : int i;
1376 : 29 : int numInserted = numSlots;
1377 : 29 : TupleTableSlot *slot = NULL;
1378 : : TupleTableSlot **rslots;
1379 : :
1380 : : /*
1381 : : * insert into foreign table: let the FDW do it
1382 : : */
1383 : 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1384 : : resultRelInfo,
1385 : : slots,
1386 : : planSlots,
1387 : : &numInserted);
1388 : :
1389 [ + + ]: 173 : for (i = 0; i < numInserted; i++)
1390 : : {
1391 : 145 : slot = rslots[i];
1392 : :
1393 : : /*
1394 : : * AFTER ROW Triggers might reference the tableoid column, so
1395 : : * (re-)initialize tts_tableOid before evaluating them.
1396 : : */
1397 : 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1398 : :
1399 : : /* AFTER ROW INSERT Triggers */
1400 : 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1401 : 145 : mtstate->mt_transition_capture);
1402 : :
1403 : : /*
1404 : : * Check any WITH CHECK OPTION constraints from parent views. See the
1405 : : * comment in ExecInsert.
1406 : : */
1407 [ - + ]: 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1791 tomas.vondra@postgre 1408 :UBC 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1409 : : }
1410 : :
1791 tomas.vondra@postgre 1411 [ + - + - ]:CBC 28 : if (canSetTag && numInserted > 0)
1412 : 28 : estate->es_processed += numInserted;
1413 : :
1414 : : /* Clean up all the slots, ready for the next batch */
966 michael@paquier.xyz 1415 [ + + ]: 172 : for (i = 0; i < numSlots; i++)
1416 : : {
1417 : 144 : ExecClearTuple(slots[i]);
1418 : 144 : ExecClearTuple(planSlots[i]);
1419 : : }
1420 : 28 : resultRelInfo->ri_NumSlots = 0;
1791 tomas.vondra@postgre 1421 : 28 : }
1422 : :
1423 : : /*
1424 : : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1425 : : */
1426 : : static void
1117 efujita@postgresql.o 1427 : 18 : ExecPendingInserts(EState *estate)
1428 : : {
1429 : : ListCell *l1,
1430 : : *l2;
1431 : :
1104 1432 [ + - + + : 36 : forboth(l1, estate->es_insert_pending_result_relations,
+ - + + +
+ + - +
+ ]
1433 : : l2, estate->es_insert_pending_modifytables)
1434 : : {
1435 : 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1436 : 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1437 : :
1117 1438 [ - + ]: 19 : Assert(mtstate);
1439 : 19 : ExecBatchInsert(mtstate, resultRelInfo,
1440 : : resultRelInfo->ri_Slots,
1441 : : resultRelInfo->ri_PlanSlots,
1442 : : resultRelInfo->ri_NumSlots,
1443 : 19 : estate, mtstate->canSetTag);
1444 : : }
1445 : :
1446 : 17 : list_free(estate->es_insert_pending_result_relations);
1104 1447 : 17 : list_free(estate->es_insert_pending_modifytables);
1117 1448 : 17 : estate->es_insert_pending_result_relations = NIL;
1104 1449 : 17 : estate->es_insert_pending_modifytables = NIL;
1117 1450 : 17 : }
1451 : :
1452 : : /*
1453 : : * ExecDeletePrologue -- subroutine for ExecDelete
1454 : : *
1455 : : * Prepare executor state for DELETE. Actually, the only thing we have to do
1456 : : * here is execute BEFORE ROW triggers. We return false if one of them makes
1457 : : * the delete a no-op; otherwise, return true.
1458 : : */
1459 : : static bool
1370 alvherre@alvh.no-ip. 1460 : 769251 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1461 : : ItemPointer tupleid, HeapTuple oldtuple,
1462 : : TupleTableSlot **epqreturnslot, TM_Result *result)
1463 : : {
1009 dean.a.rasheed@gmail 1464 [ + + ]: 769251 : if (result)
1465 : 798 : *result = TM_Ok;
1466 : :
1467 : : /* BEFORE ROW DELETE triggers */
1370 alvherre@alvh.no-ip. 1468 [ + + ]: 769251 : if (resultRelInfo->ri_TrigDesc &&
1469 [ + + ]: 3531 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1470 : : {
1471 : : /* Flush any pending inserts, so rows are visible to the triggers */
1117 efujita@postgresql.o 1472 [ + + ]: 173 : if (context->estate->es_insert_pending_result_relations != NIL)
1473 : 1 : ExecPendingInserts(context->estate);
1474 : :
1370 alvherre@alvh.no-ip. 1475 : 165 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1476 : : resultRelInfo, tupleid, oldtuple,
1477 : : epqreturnslot, result, &context->tmfd,
151 dean.a.rasheed@gmail 1478 : 173 : context->mtstate->operation == CMD_MERGE);
1479 : : }
1480 : :
1370 alvherre@alvh.no-ip. 1481 : 769078 : return true;
1482 : : }
1483 : :
1484 : : /*
1485 : : * ExecDeleteAct -- subroutine for ExecDelete
1486 : : *
1487 : : * Actually delete the tuple from a plain table.
1488 : : *
1489 : : * Caller is in charge of doing EvalPlanQual as necessary
1490 : : */
1491 : : static TM_Result
1492 : 769162 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1493 : : ItemPointer tupleid, bool changingPart)
1494 : : {
1495 : 769162 : EState *estate = context->estate;
1496 : :
1497 : 769162 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1498 : : estate->es_output_cid,
1499 : : estate->es_snapshot,
1500 : : estate->es_crosscheck_snapshot,
1501 : : true /* wait for commit */ ,
1502 : : &context->tmfd,
1503 : : changingPart);
1504 : : }
1505 : :
1506 : : /*
1507 : : * ExecDeleteEpilogue -- subroutine for ExecDelete
1508 : : *
1509 : : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1510 : : * including the UPDATE triggers if the deletion is being done as part of a
1511 : : * cross-partition tuple move.
1512 : : */
1513 : : static void
1514 : 769132 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1515 : : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1516 : : {
1517 : 769132 : ModifyTableState *mtstate = context->mtstate;
1518 : 769132 : EState *estate = context->estate;
1519 : : TransitionCaptureState *ar_delete_trig_tcs;
1520 : :
1521 : : /*
1522 : : * If this delete is the result of a partition key update that moved the
1523 : : * tuple to a new partition, put this row into the transition OLD TABLE,
1524 : : * if there is one. We need to do this separately for DELETE and INSERT
1525 : : * because they happen on different tables.
1526 : : */
1527 : 769132 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1528 [ + + + + ]: 769132 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1529 [ + + ]: 27 : mtstate->mt_transition_capture->tcs_update_old_table)
1530 : : {
1367 1531 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1532 : : NULL, NULL,
1533 : : tupleid, oldtuple,
614 akorotkov@postgresql 1534 : 24 : NULL, NULL, mtstate->mt_transition_capture,
1535 : : false);
1536 : :
1537 : : /*
1538 : : * We've already captured the OLD TABLE row, so make sure any AR
1539 : : * DELETE trigger fired below doesn't capture it again.
1540 : : */
1370 alvherre@alvh.no-ip. 1541 : 24 : ar_delete_trig_tcs = NULL;
1542 : : }
1543 : :
1544 : : /* AFTER ROW DELETE Triggers */
614 akorotkov@postgresql 1545 : 769132 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1546 : : ar_delete_trig_tcs, changingPart);
1370 alvherre@alvh.no-ip. 1547 : 769130 : }
1548 : :
1549 : : /* ----------------------------------------------------------------
1550 : : * ExecDelete
1551 : : *
1552 : : * DELETE is like UPDATE, except that we delete the tuple and no
1553 : : * index modifications are needed.
1554 : : *
1555 : : * When deleting from a table, tupleid identifies the tuple to delete and
1556 : : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1557 : : * oldtuple is passed to the triggers and identifies what to delete, and
1558 : : * tupleid is invalid. When deleting from a foreign table, tupleid is
1559 : : * invalid; the FDW has to figure out which row to delete using data from
1560 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
1561 : : * NULL when the foreign table has no relevant triggers. We use
1562 : : * tupleDeleted to indicate whether the tuple is actually deleted,
1563 : : * callers can use it to decide whether to continue the operation. When
1564 : : * this DELETE is a part of an UPDATE of partition-key, then the slot
1565 : : * returned by EvalPlanQual() is passed back using output parameter
1566 : : * epqreturnslot.
1567 : : *
1568 : : * Returns RETURNING result if any, otherwise NULL.
1569 : : * ----------------------------------------------------------------
1570 : : */
1571 : : static TupleTableSlot *
1572 : 768988 : ExecDelete(ModifyTableContext *context,
1573 : : ResultRelInfo *resultRelInfo,
1574 : : ItemPointer tupleid,
1575 : : HeapTuple oldtuple,
1576 : : bool processReturning,
1577 : : bool changingPart,
1578 : : bool canSetTag,
1579 : : TM_Result *tmresult,
1580 : : bool *tupleDeleted,
1581 : : TupleTableSlot **epqreturnslot)
1582 : : {
1583 : 768988 : EState *estate = context->estate;
1889 heikki.linnakangas@i 1584 : 768988 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
4664 tgl@sss.pgh.pa.us 1585 : 768988 : TupleTableSlot *slot = NULL;
1586 : : TM_Result result;
1587 : : bool saveOld;
1588 : :
2888 rhaas@postgresql.org 1589 [ + + ]: 768988 : if (tupleDeleted)
1590 : 535 : *tupleDeleted = false;
1591 : :
1592 : : /*
1593 : : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1594 : : * done if it says we are.
1595 : : */
1370 alvherre@alvh.no-ip. 1596 [ + + ]: 768988 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1597 : : epqreturnslot, tmresult))
1598 : 26 : return NULL;
1599 : :
1600 : : /* INSTEAD OF ROW DELETE Triggers */
5546 tgl@sss.pgh.pa.us 1601 [ + + ]: 768954 : if (resultRelInfo->ri_TrigDesc &&
1602 [ + + ]: 3463 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
5911 1603 : 24 : {
1604 : : bool dodelete;
1605 : :
5546 1606 [ - + ]: 27 : Assert(oldtuple != NULL);
4286 noah@leadboat.com 1607 : 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1608 : :
5546 tgl@sss.pgh.pa.us 1609 [ + + ]: 27 : if (!dodelete) /* "do nothing" */
5911 1610 : 3 : return NULL;
1611 : : }
4664 1612 [ + + ]: 768927 : else if (resultRelInfo->ri_FdwRoutine)
1613 : : {
1614 : : /*
1615 : : * delete from foreign table: let the FDW do it
1616 : : *
1617 : : * We offer the returning slot as a place to store RETURNING data,
1618 : : * although the FDW can return some other slot if it wants.
1619 : : */
2485 andres@anarazel.de 1620 : 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4664 tgl@sss.pgh.pa.us 1621 : 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1622 : : resultRelInfo,
1623 : : slot,
1624 : : context->planSlot);
1625 : :
1626 [ - + ]: 23 : if (slot == NULL) /* "do nothing" */
4664 tgl@sss.pgh.pa.us 1627 :UBC 0 : return NULL;
1628 : :
1629 : : /*
1630 : : * RETURNING expressions might reference the tableoid column, so
1631 : : * (re)initialize tts_tableOid before evaluating them.
1632 : : */
2619 andres@anarazel.de 1633 [ + + ]:CBC 23 : if (TTS_EMPTY(slot))
3603 rhaas@postgresql.org 1634 : 5 : ExecStoreAllNullTuple(slot);
1635 : :
2485 andres@anarazel.de 1636 : 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1637 : : }
1638 : : else
1639 : : {
1640 : : /*
1641 : : * delete the tuple
1642 : : *
1643 : : * Note: if context->estate->es_crosscheck_snapshot isn't
1644 : : * InvalidSnapshot, we check that the row to be deleted is visible to
1645 : : * that snapshot, and throw a can't-serialize error if not. This is a
1646 : : * special-case behavior needed for referential integrity updates in
1647 : : * transaction-snapshot mode transactions.
1648 : : */
1163 john.naylor@postgres 1649 : 768904 : ldelete:
614 akorotkov@postgresql 1650 : 768908 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1651 : :
726 dean.a.rasheed@gmail 1652 [ + + ]: 768890 : if (tmresult)
1653 : 518 : *tmresult = result;
1654 : :
5546 tgl@sss.pgh.pa.us 1655 [ + + + + : 768890 : switch (result)
- ]
1656 : : {
2460 andres@anarazel.de 1657 : 15 : case TM_SelfModified:
1658 : :
1659 : : /*
1660 : : * The target tuple was already updated or deleted by the
1661 : : * current command, or by a later command in the current
1662 : : * transaction. The former case is possible in a join DELETE
1663 : : * where multiple tuples join to the same target tuple. This
1664 : : * is somewhat questionable, but Postgres has always allowed
1665 : : * it: we just ignore additional deletion attempts.
1666 : : *
1667 : : * The latter case arises if the tuple is modified by a
1668 : : * command in a BEFORE trigger, or perhaps by a command in a
1669 : : * volatile function used in the query. In such situations we
1670 : : * should not ignore the deletion, but it is equally unsafe to
1671 : : * proceed. We don't want to discard the original DELETE
1672 : : * while keeping the triggered actions based on its deletion;
1673 : : * and it would be no better to allow the original DELETE
1674 : : * while discarding updates that it triggered. The row update
1675 : : * carries some information that might be important according
1676 : : * to business rules; so throwing an error is the only safe
1677 : : * course.
1678 : : *
1679 : : * If a trigger actually intends this type of interaction, it
1680 : : * can re-execute the DELETE and then return NULL to cancel
1681 : : * the outer delete.
1682 : : */
1370 alvherre@alvh.no-ip. 1683 [ + + ]: 15 : if (context->tmfd.cmax != estate->es_output_cid)
4799 kgrittn@postgresql.o 1684 [ + - ]: 3 : ereport(ERROR,
1685 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1686 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1687 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1688 : :
1689 : : /* Else, already deleted by self; nothing to do */
5546 tgl@sss.pgh.pa.us 1690 : 12 : return NULL;
1691 : :
2460 andres@anarazel.de 1692 : 768837 : case TM_Ok:
5546 tgl@sss.pgh.pa.us 1693 : 768837 : break;
1694 : :
2460 andres@anarazel.de 1695 : 35 : case TM_Updated:
1696 : : {
1697 : : TupleTableSlot *inputslot;
1698 : : TupleTableSlot *epqslot;
1699 : :
1700 [ + + ]: 35 : if (IsolationUsesXactSnapshot())
1701 [ + - ]: 1 : ereport(ERROR,
1702 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1703 : : errmsg("could not serialize access due to concurrent update")));
1704 : :
1705 : : /*
1706 : : * Already know that we're going to need to do EPQ, so
1707 : : * fetch tuple directly into the right slot.
1708 : : */
614 akorotkov@postgresql 1709 : 34 : EvalPlanQualBegin(context->epqstate);
1710 : 34 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1711 : : resultRelInfo->ri_RangeTableIndex);
1712 : :
1713 : 34 : result = table_tuple_lock(resultRelationDesc, tupleid,
1714 : : estate->es_snapshot,
1715 : : inputslot, estate->es_output_cid,
1716 : : LockTupleExclusive, LockWaitBlock,
1717 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1718 : : &context->tmfd);
1719 : :
1720 [ + + + - ]: 30 : switch (result)
1721 : : {
1722 : 27 : case TM_Ok:
1723 [ - + ]: 27 : Assert(context->tmfd.traversed);
1724 : 27 : epqslot = EvalPlanQual(context->epqstate,
1725 : : resultRelationDesc,
1726 : : resultRelInfo->ri_RangeTableIndex,
1727 : : inputslot);
1728 [ + - + + ]: 27 : if (TupIsNull(epqslot))
1729 : : /* Tuple not passing quals anymore, exiting... */
1730 : 15 : return NULL;
1731 : :
1732 : : /*
1733 : : * If requested, skip delete and pass back the
1734 : : * updated row.
1735 : : */
1736 [ + + ]: 12 : if (epqreturnslot)
1737 : : {
1738 : 8 : *epqreturnslot = epqslot;
1739 : 8 : return NULL;
1740 : : }
1741 : : else
1742 : 4 : goto ldelete;
1743 : :
1744 : 2 : case TM_SelfModified:
1745 : :
1746 : : /*
1747 : : * This can be reached when following an update
1748 : : * chain from a tuple updated by another session,
1749 : : * reaching a tuple that was already updated in
1750 : : * this transaction. If previously updated by this
1751 : : * command, ignore the delete, otherwise error
1752 : : * out.
1753 : : *
1754 : : * See also TM_SelfModified response to
1755 : : * table_tuple_delete() above.
1756 : : */
1757 [ + + ]: 2 : if (context->tmfd.cmax != estate->es_output_cid)
1758 [ + - ]: 1 : ereport(ERROR,
1759 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1760 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1761 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1762 : 1 : return NULL;
1763 : :
1764 : 1 : case TM_Deleted:
1765 : : /* tuple already deleted; nothing to do */
1766 : 1 : return NULL;
1767 : :
614 akorotkov@postgresql 1768 :UBC 0 : default:
1769 : :
1770 : : /*
1771 : : * TM_Invisible should be impossible because we're
1772 : : * waiting for updated row versions, and would
1773 : : * already have errored out if the first version
1774 : : * is invisible.
1775 : : *
1776 : : * TM_Updated should be impossible, because we're
1777 : : * locking the latest version via
1778 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1779 : : */
1780 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1781 : : result);
1782 : : return NULL;
1783 : : }
1784 : :
1785 : : Assert(false);
1786 : : break;
1787 : : }
1788 : :
2460 andres@anarazel.de 1789 :CBC 3 : case TM_Deleted:
1790 [ - + ]: 3 : if (IsolationUsesXactSnapshot())
2460 andres@anarazel.de 1791 [ # # ]:UBC 0 : ereport(ERROR,
1792 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1793 : : errmsg("could not serialize access due to concurrent delete")));
1794 : : /* tuple already deleted; nothing to do */
5546 tgl@sss.pgh.pa.us 1795 :CBC 3 : return NULL;
1796 : :
5546 tgl@sss.pgh.pa.us 1797 :UBC 0 : default:
2399 andres@anarazel.de 1798 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1799 : : result);
1800 : : return NULL;
1801 : : }
1802 : :
1803 : : /*
1804 : : * Note: Normally one would think that we have to delete index tuples
1805 : : * associated with the heap tuple now...
1806 : : *
1807 : : * ... but in POSTGRES, we have no need to do this because VACUUM will
1808 : : * take care of it later. We can't delete index tuples immediately
1809 : : * anyway, since the tuple is still visible to other transactions.
1810 : : */
1811 : : }
1812 : :
5408 tgl@sss.pgh.pa.us 1813 [ + + ]:CBC 768884 : if (canSetTag)
1814 : 768278 : (estate->es_processed)++;
1815 : :
1816 : : /* Tell caller that the delete actually happened. */
2888 rhaas@postgresql.org 1817 [ + + ]: 768884 : if (tupleDeleted)
1818 : 492 : *tupleDeleted = true;
1819 : :
614 akorotkov@postgresql 1820 : 768884 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1821 : :
1822 : : /*
1823 : : * Process RETURNING if present and if requested.
1824 : : *
1825 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1826 : : * refers to any OLD column values, save the old tuple here for later
1827 : : * processing of the RETURNING list by ExecInsert().
1828 : : */
334 dean.a.rasheed@gmail 1829 [ + + + + ]: 768957 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1830 [ + + ]: 75 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1831 : :
1832 [ + + + + : 768882 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
+ + ]
1833 : : {
1834 : : /*
1835 : : * We have to put the target tuple into a slot, which means first we
1836 : : * gotta fetch it. We can use the trigger tuple slot.
1837 : : */
1838 : : TupleTableSlot *rslot;
1839 : :
4664 tgl@sss.pgh.pa.us 1840 [ + + ]: 506 : if (resultRelInfo->ri_FdwRoutine)
1841 : : {
1842 : : /* FDW must have provided a slot containing the deleted row */
1843 [ + - - + ]: 7 : Assert(!TupIsNull(slot));
1844 : : }
1845 : : else
1846 : : {
2485 andres@anarazel.de 1847 : 499 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4664 tgl@sss.pgh.pa.us 1848 [ + + ]: 499 : if (oldtuple != NULL)
1849 : : {
2433 andres@anarazel.de 1850 : 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1851 : : }
1852 : : else
1853 : : {
614 akorotkov@postgresql 1854 [ - + ]: 487 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1855 : : SnapshotAny, slot))
614 akorotkov@postgresql 1856 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1857 : : }
1858 : : }
1859 : :
1860 : : /*
1861 : : * If required, save the old tuple for later processing of the
1862 : : * RETURNING list by ExecInsert().
1863 : : */
334 dean.a.rasheed@gmail 1864 [ + + ]:CBC 506 : if (saveOld)
1865 : : {
1866 : : TupleConversionMap *tupconv_map;
1867 : :
1868 : : /*
1869 : : * Convert the tuple into the root partition's format/slot, if
1870 : : * needed. ExecInsert() will then convert it to the new
1871 : : * partition's format/slot, if necessary.
1872 : : */
1873 : 24 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1874 [ + + ]: 24 : if (tupconv_map != NULL)
1875 : : {
1876 : 10 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1877 : 10 : TupleTableSlot *oldSlot = slot;
1878 : :
1879 : 10 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1880 : : slot,
1881 : : ExecGetReturningSlot(estate,
1882 : : rootRelInfo));
1883 : :
1884 : 10 : slot->tts_tableOid = oldSlot->tts_tableOid;
1885 : 10 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1886 : : }
1887 : :
1888 : 24 : context->cpDeletedSlot = slot;
1889 : :
1890 : 24 : return NULL;
1891 : : }
1892 : :
1893 : 482 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1894 : : slot, NULL, context->planSlot);
1895 : :
1896 : : /*
1897 : : * Before releasing the target tuple again, make sure rslot has a
1898 : : * local copy of any pass-by-reference values.
1899 : : */
4664 tgl@sss.pgh.pa.us 1900 : 482 : ExecMaterializeSlot(rslot);
1901 : :
5911 1902 : 482 : ExecClearTuple(slot);
1903 : :
1904 : 482 : return rslot;
1905 : : }
1906 : :
1907 : 768376 : return NULL;
1908 : : }
1909 : :
1910 : : /*
1911 : : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1912 : : *
1913 : : * This works by first deleting the old tuple from the current partition,
1914 : : * followed by inserting the new tuple into the root parent table, that is,
1915 : : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1916 : : * correct partition.
1917 : : *
1918 : : * Returns true if the tuple has been successfully moved, or if it's found
1919 : : * that the tuple was concurrently deleted so there's nothing more to do
1920 : : * for the caller.
1921 : : *
1922 : : * False is returned if the tuple we're trying to move is found to have been
1923 : : * concurrently updated. In that case, the caller must check if the updated
1924 : : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1925 : : * this function again or perform a regular update accordingly. For MERGE,
1926 : : * the updated tuple is not returned in *retry_slot; it has its own retry
1927 : : * logic.
1928 : : */
1929 : : static bool
1370 alvherre@alvh.no-ip. 1930 : 559 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1931 : : ResultRelInfo *resultRelInfo,
1932 : : ItemPointer tupleid, HeapTuple oldtuple,
1933 : : TupleTableSlot *slot,
1934 : : bool canSetTag,
1935 : : UpdateContext *updateCxt,
1936 : : TM_Result *tmresult,
1937 : : TupleTableSlot **retry_slot,
1938 : : TupleTableSlot **inserted_tuple,
1939 : : ResultRelInfo **insert_destrel)
1940 : : {
1941 : 559 : ModifyTableState *mtstate = context->mtstate;
1888 heikki.linnakangas@i 1942 : 559 : EState *estate = mtstate->ps.state;
1943 : : TupleConversionMap *tupconv_map;
1944 : : bool tuple_deleted;
1945 : 559 : TupleTableSlot *epqslot = NULL;
1946 : :
334 dean.a.rasheed@gmail 1947 : 559 : context->cpDeletedSlot = NULL;
1370 alvherre@alvh.no-ip. 1948 : 559 : context->cpUpdateReturningSlot = NULL;
1009 dean.a.rasheed@gmail 1949 : 559 : *retry_slot = NULL;
1950 : :
1951 : : /*
1952 : : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1953 : : * to migrate to a different partition. Maybe this can be implemented
1954 : : * some day, but it seems a fringe feature with little redeeming value.
1955 : : */
1888 heikki.linnakangas@i 1956 [ - + ]: 559 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1888 heikki.linnakangas@i 1957 [ # # ]:UBC 0 : ereport(ERROR,
1958 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1959 : : errmsg("invalid ON UPDATE specification"),
1960 : : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1961 : :
1962 : : /*
1963 : : * When an UPDATE is run directly on a leaf partition, simply fail with a
1964 : : * partition constraint violation error.
1965 : : */
1715 tgl@sss.pgh.pa.us 1966 [ + + ]:CBC 559 : if (resultRelInfo == mtstate->rootResultRelInfo)
1888 heikki.linnakangas@i 1967 : 24 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1968 : :
1969 : : /* Initialize tuple routing info if not already done. */
1715 tgl@sss.pgh.pa.us 1970 [ + + ]: 535 : if (mtstate->mt_partition_tuple_routing == NULL)
1971 : : {
1972 : 342 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1973 : : MemoryContext oldcxt;
1974 : :
1975 : : /* Things built here have to last for the query duration. */
1976 : 342 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1977 : :
1978 : 342 : mtstate->mt_partition_tuple_routing =
1979 : 342 : ExecSetupPartitionTupleRouting(estate, rootRel);
1980 : :
1981 : : /*
1982 : : * Before a partition's tuple can be re-routed, it must first be
1983 : : * converted to the root's format, so we'll need a slot for storing
1984 : : * such tuples.
1985 : : */
1986 [ - + ]: 342 : Assert(mtstate->mt_root_tuple_slot == NULL);
1987 : 342 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1988 : :
1989 : 342 : MemoryContextSwitchTo(oldcxt);
1990 : : }
1991 : :
1992 : : /*
1993 : : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1994 : : * We want to return rows from INSERT.
1995 : : */
1370 alvherre@alvh.no-ip. 1996 : 535 : ExecDelete(context, resultRelInfo,
1997 : : tupleid, oldtuple,
1998 : : false, /* processReturning */
1999 : : true, /* changingPart */
2000 : : false, /* canSetTag */
2001 : : tmresult, &tuple_deleted, &epqslot);
2002 : :
2003 : : /*
2004 : : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2005 : : * it was already deleted by self, or it was concurrently deleted by
2006 : : * another transaction), then we should skip the insert as well;
2007 : : * otherwise, an UPDATE could cause an increase in the total number of
2008 : : * rows across all partitions, which is clearly wrong.
2009 : : *
2010 : : * For a normal UPDATE, the case where the tuple has been the subject of a
2011 : : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2012 : : * machinery, but for an UPDATE that we've translated into a DELETE from
2013 : : * this partition and an INSERT into some other partition, that's not
2014 : : * available, because CTID chains can't span relation boundaries. We
2015 : : * mimic the semantics to a limited extent by skipping the INSERT if the
2016 : : * DELETE fails to find a tuple. This ensures that two concurrent
2017 : : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2018 : : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2019 : : * it.
2020 : : */
1888 heikki.linnakangas@i 2021 [ + + ]: 532 : if (!tuple_deleted)
2022 : : {
2023 : : /*
2024 : : * epqslot will be typically NULL. But when ExecDelete() finds that
2025 : : * another transaction has concurrently updated the same row, it
2026 : : * re-fetches the row, skips the delete, and epqslot is set to the
2027 : : * re-fetched tuple slot. In that case, we need to do all the checks
2028 : : * again. For MERGE, we leave everything to the caller (it must do
2029 : : * additional rechecking, and might end up executing a different
2030 : : * action entirely).
2031 : : */
639 dean.a.rasheed@gmail 2032 [ + + ]: 40 : if (mtstate->operation == CMD_MERGE)
726 2033 : 19 : return *tmresult == TM_Ok;
1009 2034 [ + + - + ]: 21 : else if (TupIsNull(epqslot))
1888 heikki.linnakangas@i 2035 : 18 : return true;
2036 : : else
2037 : : {
2038 : : /* Fetch the most recent version of old tuple. */
2039 : : TupleTableSlot *oldSlot;
2040 : :
2041 : : /* ... but first, make sure ri_oldTupleSlot is initialized. */
614 akorotkov@postgresql 2042 [ - + ]: 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
614 akorotkov@postgresql 2043 :UBC 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
614 akorotkov@postgresql 2044 :CBC 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2045 [ - + ]: 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2046 : : tupleid,
2047 : : SnapshotAny,
2048 : : oldSlot))
614 akorotkov@postgresql 2049 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
2050 : : /* and project the new tuple to retry the UPDATE with */
1009 dean.a.rasheed@gmail 2051 :CBC 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2052 : : oldSlot);
1888 heikki.linnakangas@i 2053 : 3 : return false;
2054 : : }
2055 : : }
2056 : :
2057 : : /*
2058 : : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2059 : : * convert the tuple into root's tuple descriptor if needed, since
2060 : : * ExecInsert() starts the search from root.
2061 : : */
1715 tgl@sss.pgh.pa.us 2062 : 492 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1888 heikki.linnakangas@i 2063 [ + + ]: 492 : if (tupconv_map != NULL)
2064 : 158 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2065 : : slot,
2066 : : mtstate->mt_root_tuple_slot);
2067 : :
2068 : : /* Tuple routing starts from the root table. */
1370 alvherre@alvh.no-ip. 2069 : 428 : context->cpUpdateReturningSlot =
1367 2070 : 492 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2071 : : inserted_tuple, insert_destrel);
2072 : :
2073 : : /*
2074 : : * Reset the transition state that may possibly have been written by
2075 : : * INSERT.
2076 : : */
1888 heikki.linnakangas@i 2077 [ + + ]: 428 : if (mtstate->mt_transition_capture)
2078 : 27 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2079 : :
2080 : : /* We're done moving. */
2081 : 428 : return true;
2082 : : }
2083 : :
2084 : : /*
2085 : : * ExecUpdatePrologue -- subroutine for ExecUpdate
2086 : : *
2087 : : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2088 : : * triggers. We return false if one of them makes the update a no-op;
2089 : : * otherwise, return true.
2090 : : */
2091 : : static bool
1370 alvherre@alvh.no-ip. 2092 : 163645 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2093 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2094 : : TM_Result *result)
2095 : : {
2096 : 163645 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2097 : :
1009 dean.a.rasheed@gmail 2098 [ + + ]: 163645 : if (result)
2099 : 1092 : *result = TM_Ok;
2100 : :
1370 alvherre@alvh.no-ip. 2101 : 163645 : ExecMaterializeSlot(slot);
2102 : :
2103 : : /*
2104 : : * Open the table's indexes, if we have not done so already, so that we
2105 : : * can add new index entries for the updated tuple.
2106 : : */
2107 [ + + ]: 163645 : if (resultRelationDesc->rd_rel->relhasindex &&
2108 [ + + ]: 118111 : resultRelInfo->ri_IndexRelationDescs == NULL)
2109 : 4777 : ExecOpenIndices(resultRelInfo, false);
2110 : :
2111 : : /* BEFORE ROW UPDATE triggers */
2112 [ + + ]: 163645 : if (resultRelInfo->ri_TrigDesc &&
2113 [ + + ]: 3173 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2114 : : {
2115 : : /* Flush any pending inserts, so rows are visible to the triggers */
1117 efujita@postgresql.o 2116 [ + + ]: 1305 : if (context->estate->es_insert_pending_result_relations != NIL)
2117 : 1 : ExecPendingInserts(context->estate);
2118 : :
1370 alvherre@alvh.no-ip. 2119 : 1293 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2120 : : resultRelInfo, tupleid, oldtuple, slot,
2121 : : result, &context->tmfd,
151 dean.a.rasheed@gmail 2122 : 1305 : context->mtstate->operation == CMD_MERGE);
2123 : : }
2124 : :
1370 alvherre@alvh.no-ip. 2125 : 162340 : return true;
2126 : : }
2127 : :
2128 : : /*
2129 : : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2130 : : *
2131 : : * Apply the final modifications to the tuple slot before the update.
2132 : : * (This is split out because we also need it in the foreign-table code path.)
2133 : : */
2134 : : static void
2135 : 163510 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2136 : : TupleTableSlot *slot,
2137 : : EState *estate)
2138 : : {
2139 : 163510 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2140 : :
2141 : : /*
2142 : : * Constraints and GENERATED expressions might reference the tableoid
2143 : : * column, so (re-)initialize tts_tableOid before evaluating them.
2144 : : */
2145 : 163510 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2146 : :
2147 : : /*
2148 : : * Compute stored generated columns
2149 : : */
2150 [ + + ]: 163510 : if (resultRelationDesc->rd_att->constr &&
2151 [ + + ]: 99676 : resultRelationDesc->rd_att->constr->has_generated_stored)
2152 : 138 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2153 : : CMD_UPDATE);
2154 : 163510 : }
2155 : :
2156 : : /*
2157 : : * ExecUpdateAct -- subroutine for ExecUpdate
2158 : : *
2159 : : * Actually update the tuple, when operating on a plain table. If the
2160 : : * table is a partition, and the command was called referencing an ancestor
2161 : : * partitioned table, this routine migrates the resulting tuple to another
2162 : : * partition.
2163 : : *
2164 : : * The caller is in charge of keeping indexes current as necessary. The
2165 : : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2166 : : * be concurrently updated. However, in case of a cross-partition update,
2167 : : * this routine does it.
2168 : : */
2169 : : static TM_Result
2170 : 163412 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2171 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2172 : : bool canSetTag, UpdateContext *updateCxt)
2173 : : {
2174 : 163412 : EState *estate = context->estate;
2175 : 163412 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2176 : : bool partition_constraint_failed;
2177 : : TM_Result result;
2178 : :
2179 : 163412 : updateCxt->crossPartUpdate = false;
2180 : :
2181 : : /*
2182 : : * If we move the tuple to a new partition, we loop back here to recompute
2183 : : * GENERATED values (which are allowed to be different across partitions)
2184 : : * and recheck any RLS policies and constraints. We do not fire any
2185 : : * BEFORE triggers of the new partition, however.
2186 : : */
1163 john.naylor@postgres 2187 : 163415 : lreplace:
2188 : : /* Fill in GENERATEd columns */
1016 tgl@sss.pgh.pa.us 2189 : 163415 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2190 : :
2191 : : /* ensure slot is independent, consider e.g. EPQ */
1370 alvherre@alvh.no-ip. 2192 : 163415 : ExecMaterializeSlot(slot);
2193 : :
2194 : : /*
2195 : : * If partition constraint fails, this row might get moved to another
2196 : : * partition, in which case we should check the RLS CHECK policy just
2197 : : * before inserting into the new partition, rather than doing it here.
2198 : : * This is because a trigger on that partition might again change the row.
2199 : : * So skip the WCO checks if the partition constraint fails.
2200 : : */
2201 : 163415 : partition_constraint_failed =
2202 [ + + ]: 164803 : resultRelationDesc->rd_rel->relispartition &&
2203 [ + + ]: 1388 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2204 : :
2205 : : /* Check any RLS UPDATE WITH CHECK policies */
2206 [ + + ]: 163415 : if (!partition_constraint_failed &&
2207 [ + + ]: 162856 : resultRelInfo->ri_WithCheckOptions != NIL)
2208 : : {
2209 : : /*
2210 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2211 : : * we are looking for at this point.
2212 : : */
2213 : 267 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2214 : : resultRelInfo, slot, estate);
2215 : : }
2216 : :
2217 : : /*
2218 : : * If a partition check failed, try to move the row into the right
2219 : : * partition.
2220 : : */
2221 [ + + ]: 163388 : if (partition_constraint_failed)
2222 : : {
2223 : : TupleTableSlot *inserted_tuple,
2224 : : *retry_slot;
1367 2225 : 559 : ResultRelInfo *insert_destrel = NULL;
2226 : :
2227 : : /*
2228 : : * ExecCrossPartitionUpdate will first DELETE the row from the
2229 : : * partition it's currently in and then insert it back into the root
2230 : : * table, which will re-route it to the correct partition. However,
2231 : : * if the tuple has been concurrently updated, a retry is needed.
2232 : : */
1370 2233 [ + + ]: 559 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2234 : : tupleid, oldtuple, slot,
2235 : : canSetTag, updateCxt,
2236 : : &result,
2237 : : &retry_slot,
2238 : : &inserted_tuple,
2239 : : &insert_destrel))
2240 : : {
2241 : : /* success! */
2242 : 458 : updateCxt->crossPartUpdate = true;
2243 : :
2244 : : /*
2245 : : * If the partitioned table being updated is referenced in foreign
2246 : : * keys, queue up trigger events to check that none of them were
2247 : : * violated. No special treatment is needed in
2248 : : * non-cross-partition update situations, because the leaf
2249 : : * partition's AR update triggers will take care of that. During
2250 : : * cross-partition updates implemented as delete on the source
2251 : : * partition followed by insert on the destination partition,
2252 : : * AR-UPDATE triggers of the root table (that is, the table
2253 : : * mentioned in the query) must be fired.
2254 : : *
2255 : : * NULL insert_destrel means that the move failed to occur, that
2256 : : * is, the update failed, so no need to anything in that case.
2257 : : */
1367 2258 [ + + ]: 458 : if (insert_destrel &&
2259 [ + + ]: 414 : resultRelInfo->ri_TrigDesc &&
2260 [ + + ]: 184 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2261 : 153 : ExecCrossPartitionUpdateForeignKey(context,
2262 : : resultRelInfo,
2263 : : insert_destrel,
2264 : : tupleid, slot,
2265 : : inserted_tuple);
2266 : :
1370 2267 : 462 : return TM_Ok;
2268 : : }
2269 : :
2270 : : /*
2271 : : * No luck, a retry is needed. If running MERGE, we do not do so
2272 : : * here; instead let it handle that on its own rules.
2273 : : */
639 dean.a.rasheed@gmail 2274 [ + + ]: 10 : if (context->mtstate->operation == CMD_MERGE)
726 2275 : 7 : return result;
2276 : :
2277 : : /*
2278 : : * ExecCrossPartitionUpdate installed an updated version of the new
2279 : : * tuple in the retry slot; start over.
2280 : : */
1009 2281 : 3 : slot = retry_slot;
1370 alvherre@alvh.no-ip. 2282 : 3 : goto lreplace;
2283 : : }
2284 : :
2285 : : /*
2286 : : * Check the constraints of the tuple. We've already checked the
2287 : : * partition constraint above; however, we must still ensure the tuple
2288 : : * passes all other constraints, so we will call ExecConstraints() and
2289 : : * have it validate all remaining checks.
2290 : : */
2291 [ + + ]: 162829 : if (resultRelationDesc->rd_att->constr)
2292 : 99363 : ExecConstraints(resultRelInfo, slot, estate);
2293 : :
2294 : : /*
2295 : : * replace the heap tuple
2296 : : *
2297 : : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2298 : : * the row to be updated is visible to that snapshot, and throw a
2299 : : * can't-serialize error if not. This is a special-case behavior needed
2300 : : * for referential integrity updates in transaction-snapshot mode
2301 : : * transactions.
2302 : : */
2303 : 162792 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2304 : : estate->es_output_cid,
2305 : : estate->es_snapshot,
2306 : : estate->es_crosscheck_snapshot,
2307 : : true /* wait for commit */ ,
2308 : : &context->tmfd, &updateCxt->lockmode,
2309 : : &updateCxt->updateIndexes);
2310 : :
2311 : 162780 : return result;
2312 : : }
2313 : :
2314 : : /*
2315 : : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2316 : : *
2317 : : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2318 : : * returns indicating that the tuple was updated.
2319 : : */
2320 : : static void
2321 : 162801 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2322 : : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2323 : : HeapTuple oldtuple, TupleTableSlot *slot)
2324 : : {
2325 : 162801 : ModifyTableState *mtstate = context->mtstate;
1009 dean.a.rasheed@gmail 2326 : 162801 : List *recheckIndexes = NIL;
2327 : :
2328 : : /* insert index entries for tuple if necessary */
1002 tomas.vondra@postgre 2329 [ + + + + ]: 162801 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
1370 alvherre@alvh.no-ip. 2330 : 89187 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2331 : : slot, context->estate,
2332 : : true, false,
2333 : : NULL, NIL,
1002 tomas.vondra@postgre 2334 : 89187 : (updateCxt->updateIndexes == TU_Summarizing));
2335 : :
2336 : : /* AFTER ROW UPDATE Triggers */
1370 alvherre@alvh.no-ip. 2337 : 162755 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2338 : : NULL, NULL,
2339 : : tupleid, oldtuple, slot,
2340 : : recheckIndexes,
2341 [ + + ]: 162755 : mtstate->operation == CMD_INSERT ?
2342 : : mtstate->mt_oc_transition_capture :
2343 : : mtstate->mt_transition_capture,
2344 : : false);
2345 : :
1009 dean.a.rasheed@gmail 2346 : 162753 : list_free(recheckIndexes);
2347 : :
2348 : : /*
2349 : : * Check any WITH CHECK OPTION constraints from parent views. We are
2350 : : * required to do this after testing all constraints and uniqueness
2351 : : * violations per the SQL spec, so we do it after actually updating the
2352 : : * record in the heap and all indexes.
2353 : : *
2354 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2355 : : * are looking for at this point.
2356 : : */
1370 alvherre@alvh.no-ip. 2357 [ + + ]: 162753 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2358 : 254 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2359 : : slot, context->estate);
2360 : 162712 : }
2361 : :
2362 : : /*
2363 : : * Queues up an update event using the target root partitioned table's
2364 : : * trigger to check that a cross-partition update hasn't broken any foreign
2365 : : * keys pointing into it.
2366 : : */
2367 : : static void
1367 2368 : 153 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2369 : : ResultRelInfo *sourcePartInfo,
2370 : : ResultRelInfo *destPartInfo,
2371 : : ItemPointer tupleid,
2372 : : TupleTableSlot *oldslot,
2373 : : TupleTableSlot *newslot)
2374 : : {
2375 : : ListCell *lc;
2376 : : ResultRelInfo *rootRelInfo;
2377 : : List *ancestorRels;
2378 : :
2379 : 153 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2380 : 153 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2381 : :
2382 : : /*
2383 : : * For any foreign keys that point directly into a non-root ancestors of
2384 : : * the source partition, we can in theory fire an update event to enforce
2385 : : * those constraints using their triggers, if we could tell that both the
2386 : : * source and the destination partitions are under the same ancestor. But
2387 : : * for now, we simply report an error that those cannot be enforced.
2388 : : */
2389 [ + - + + : 333 : foreach(lc, ancestorRels)
+ + ]
2390 : : {
2391 : 183 : ResultRelInfo *rInfo = lfirst(lc);
2392 : 183 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2393 : 183 : bool has_noncloned_fkey = false;
2394 : :
2395 : : /* Root ancestor's triggers will be processed. */
2396 [ + + ]: 183 : if (rInfo == rootRelInfo)
2397 : 150 : continue;
2398 : :
2399 [ + - + - ]: 33 : if (trigdesc && trigdesc->trig_update_after_row)
2400 : : {
2401 [ + + ]: 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2402 : : {
2403 : 84 : Trigger *trig = &trigdesc->triggers[i];
2404 : :
2405 [ + + + - ]: 87 : if (!trig->tgisclone &&
2406 : 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2407 : : {
2408 : 3 : has_noncloned_fkey = true;
2409 : 3 : break;
2410 : : }
2411 : : }
2412 : : }
2413 : :
2414 [ + + ]: 33 : if (has_noncloned_fkey)
2415 [ + - ]: 3 : ereport(ERROR,
2416 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2417 : : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2418 : : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2419 : : RelationGetRelationName(rInfo->ri_RelationDesc),
2420 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2421 : : errhint("Consider defining the foreign key on table \"%s\".",
2422 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2423 : : }
2424 : :
2425 : : /* Perform the root table's triggers. */
2426 : 150 : ExecARUpdateTriggers(context->estate,
2427 : : rootRelInfo, sourcePartInfo, destPartInfo,
2428 : : tupleid, NULL, newslot, NIL, NULL, true);
2429 : 150 : }
2430 : :
2431 : : /* ----------------------------------------------------------------
2432 : : * ExecUpdate
2433 : : *
2434 : : * note: we can't run UPDATE queries with transactions
2435 : : * off because UPDATEs are actually INSERTs and our
2436 : : * scan will mistakenly loop forever, updating the tuple
2437 : : * it just inserted.. This should be fixed but until it
2438 : : * is, we don't want to get stuck in an infinite loop
2439 : : * which corrupts your database..
2440 : : *
2441 : : * When updating a table, tupleid identifies the tuple to update and
2442 : : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2443 : : * oldtuple is passed to the triggers and identifies what to update, and
2444 : : * tupleid is invalid. When updating a foreign table, tupleid is
2445 : : * invalid; the FDW has to figure out which row to update using data from
2446 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
2447 : : * NULL when the foreign table has no relevant triggers.
2448 : : *
2449 : : * oldSlot contains the old tuple value.
2450 : : * slot contains the new tuple value to be stored.
2451 : : * planSlot is the output of the ModifyTable's subplan; we use it
2452 : : * to access values from other input tables (for RETURNING),
2453 : : * row-ID junk columns, etc.
2454 : : *
2455 : : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2456 : : * had identified the tuple to update, it will identify the tuple
2457 : : * actually updated after EvalPlanQual.
2458 : : * ----------------------------------------------------------------
2459 : : */
2460 : : static TupleTableSlot *
1370 2461 : 162553 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2462 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2463 : : TupleTableSlot *slot, bool canSetTag)
2464 : : {
2465 : 162553 : EState *estate = context->estate;
1889 heikki.linnakangas@i 2466 : 162553 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1370 alvherre@alvh.no-ip. 2467 : 162553 : UpdateContext updateCxt = {0};
2468 : : TM_Result result;
2469 : :
2470 : : /*
2471 : : * abort the operation if not running transactions
2472 : : */
5911 tgl@sss.pgh.pa.us 2473 [ - + ]: 162553 : if (IsBootstrapProcessingMode())
5911 tgl@sss.pgh.pa.us 2474 [ # # ]:UBC 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2475 : :
2476 : : /*
2477 : : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2478 : : * done if it says we are.
2479 : : */
1009 dean.a.rasheed@gmail 2480 [ + + ]:CBC 162553 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
1370 alvherre@alvh.no-ip. 2481 : 66 : return NULL;
2482 : :
2483 : : /* INSTEAD OF ROW UPDATE Triggers */
5546 tgl@sss.pgh.pa.us 2484 [ + + ]: 162475 : if (resultRelInfo->ri_TrigDesc &&
2485 [ + + ]: 2911 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2486 : : {
2485 andres@anarazel.de 2487 [ + + ]: 63 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2488 : : oldtuple, slot))
2400 tgl@sss.pgh.pa.us 2489 : 9 : return NULL; /* "do nothing" */
2490 : : }
4664 2491 [ + + ]: 162412 : else if (resultRelInfo->ri_FdwRoutine)
2492 : : {
2493 : : /* Fill in GENERATEd columns */
1370 alvherre@alvh.no-ip. 2494 : 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2495 : :
2496 : : /*
2497 : : * update in foreign table: let the FDW do it
2498 : : */
4664 tgl@sss.pgh.pa.us 2499 : 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2500 : : resultRelInfo,
2501 : : slot,
2502 : : context->planSlot);
2503 : :
2504 [ + + ]: 95 : if (slot == NULL) /* "do nothing" */
2505 : 1 : return NULL;
2506 : :
2507 : : /*
2508 : : * AFTER ROW Triggers or RETURNING expressions might reference the
2509 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2510 : : * them. (This covers the case where the FDW replaced the slot.)
2511 : : */
2485 andres@anarazel.de 2512 : 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2513 : : }
2514 : : else
2515 : : {
2516 : : ItemPointerData lockedtid;
2517 : :
2518 : : /*
2519 : : * If we generate a new candidate tuple after EvalPlanQual testing, we
2520 : : * must loop back here to try again. (We don't need to redo triggers,
2521 : : * however. If there are any BEFORE triggers then trigger.c will have
2522 : : * done table_tuple_lock to lock the correct tuple, so there's no need
2523 : : * to do them again.)
2524 : : */
1370 alvherre@alvh.no-ip. 2525 : 162317 : redo_act:
448 noah@leadboat.com 2526 : 162369 : lockedtid = *tupleid;
1370 alvherre@alvh.no-ip. 2527 : 162369 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2528 : : canSetTag, &updateCxt);
2529 : :
2530 : : /*
2531 : : * If ExecUpdateAct reports that a cross-partition update was done,
2532 : : * then the RETURNING tuple (if any) has been projected and there's
2533 : : * nothing else for us to do.
2534 : : */
2535 [ + + ]: 162211 : if (updateCxt.crossPartUpdate)
2536 : 452 : return context->cpUpdateReturningSlot;
2537 : :
5546 tgl@sss.pgh.pa.us 2538 [ + + + + : 161825 : switch (result)
- ]
2539 : : {
2460 andres@anarazel.de 2540 : 42 : case TM_SelfModified:
2541 : :
2542 : : /*
2543 : : * The target tuple was already updated or deleted by the
2544 : : * current command, or by a later command in the current
2545 : : * transaction. The former case is possible in a join UPDATE
2546 : : * where multiple tuples join to the same target tuple. This
2547 : : * is pretty questionable, but Postgres has always allowed it:
2548 : : * we just execute the first update action and ignore
2549 : : * additional update attempts.
2550 : : *
2551 : : * The latter case arises if the tuple is modified by a
2552 : : * command in a BEFORE trigger, or perhaps by a command in a
2553 : : * volatile function used in the query. In such situations we
2554 : : * should not ignore the update, but it is equally unsafe to
2555 : : * proceed. We don't want to discard the original UPDATE
2556 : : * while keeping the triggered actions based on it; and we
2557 : : * have no principled way to merge this update with the
2558 : : * previous ones. So throwing an error is the only safe
2559 : : * course.
2560 : : *
2561 : : * If a trigger actually intends this type of interaction, it
2562 : : * can re-execute the UPDATE (assuming it can figure out how)
2563 : : * and then return NULL to cancel the outer update.
2564 : : */
1370 alvherre@alvh.no-ip. 2565 [ + + ]: 42 : if (context->tmfd.cmax != estate->es_output_cid)
4799 kgrittn@postgresql.o 2566 [ + - ]: 3 : ereport(ERROR,
2567 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2568 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2569 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2570 : :
2571 : : /* Else, already updated by self; nothing to do */
5546 tgl@sss.pgh.pa.us 2572 : 39 : return NULL;
2573 : :
2460 andres@anarazel.de 2574 : 161699 : case TM_Ok:
5546 tgl@sss.pgh.pa.us 2575 : 161699 : break;
2576 : :
2460 andres@anarazel.de 2577 : 80 : case TM_Updated:
2578 : : {
2579 : : TupleTableSlot *inputslot;
2580 : : TupleTableSlot *epqslot;
2581 : :
2582 [ + + ]: 80 : if (IsolationUsesXactSnapshot())
2583 [ + - ]: 2 : ereport(ERROR,
2584 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2585 : : errmsg("could not serialize access due to concurrent update")));
2586 : :
2587 : : /*
2588 : : * Already know that we're going to need to do EPQ, so
2589 : : * fetch tuple directly into the right slot.
2590 : : */
614 akorotkov@postgresql 2591 : 78 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2592 : : resultRelInfo->ri_RangeTableIndex);
2593 : :
2594 : 78 : result = table_tuple_lock(resultRelationDesc, tupleid,
2595 : : estate->es_snapshot,
2596 : : inputslot, estate->es_output_cid,
2597 : : updateCxt.lockmode, LockWaitBlock,
2598 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2599 : : &context->tmfd);
2600 : :
2601 [ + + + - ]: 76 : switch (result)
2602 : : {
2603 : 71 : case TM_Ok:
2604 [ - + ]: 71 : Assert(context->tmfd.traversed);
2605 : :
2606 : 71 : epqslot = EvalPlanQual(context->epqstate,
2607 : : resultRelationDesc,
2608 : : resultRelInfo->ri_RangeTableIndex,
2609 : : inputslot);
2610 [ + + + + ]: 71 : if (TupIsNull(epqslot))
2611 : : /* Tuple not passing quals anymore, exiting... */
2612 : 19 : return NULL;
2613 : :
2614 : : /* Make sure ri_oldTupleSlot is initialized. */
2615 [ - + ]: 52 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
614 akorotkov@postgresql 2616 :UBC 0 : ExecInitUpdateProjection(context->mtstate,
2617 : : resultRelInfo);
2618 : :
448 noah@leadboat.com 2619 [ + + ]:CBC 52 : if (resultRelInfo->ri_needLockTagTuple)
2620 : : {
2621 : 1 : UnlockTuple(resultRelationDesc,
2622 : : &lockedtid, InplaceUpdateTupleLock);
2623 : 1 : LockTuple(resultRelationDesc,
2624 : : tupleid, InplaceUpdateTupleLock);
2625 : : }
2626 : :
2627 : : /* Fetch the most recent version of old tuple. */
614 akorotkov@postgresql 2628 : 52 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2629 [ - + ]: 52 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2630 : : tupleid,
2631 : : SnapshotAny,
2632 : : oldSlot))
614 akorotkov@postgresql 2633 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
614 akorotkov@postgresql 2634 :CBC 52 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2635 : : epqslot, oldSlot);
2636 : 52 : goto redo_act;
2637 : :
2638 : 1 : case TM_Deleted:
2639 : : /* tuple already deleted; nothing to do */
2640 : 1 : return NULL;
2641 : :
2642 : 4 : case TM_SelfModified:
2643 : :
2644 : : /*
2645 : : * This can be reached when following an update
2646 : : * chain from a tuple updated by another session,
2647 : : * reaching a tuple that was already updated in
2648 : : * this transaction. If previously modified by
2649 : : * this command, ignore the redundant update,
2650 : : * otherwise error out.
2651 : : *
2652 : : * See also TM_SelfModified response to
2653 : : * table_tuple_update() above.
2654 : : */
2655 [ + + ]: 4 : if (context->tmfd.cmax != estate->es_output_cid)
2656 [ + - ]: 1 : ereport(ERROR,
2657 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2658 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2659 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2660 : 3 : return NULL;
2661 : :
614 akorotkov@postgresql 2662 :UBC 0 : default:
2663 : : /* see table_tuple_lock call in ExecDelete() */
2664 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2665 : : result);
2666 : : return NULL;
2667 : : }
2668 : : }
2669 : :
2670 : : break;
2671 : :
2460 andres@anarazel.de 2672 :CBC 4 : case TM_Deleted:
2673 [ - + ]: 4 : if (IsolationUsesXactSnapshot())
2460 andres@anarazel.de 2674 [ # # ]:UBC 0 : ereport(ERROR,
2675 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2676 : : errmsg("could not serialize access due to concurrent delete")));
2677 : : /* tuple already deleted; nothing to do */
5546 tgl@sss.pgh.pa.us 2678 :CBC 4 : return NULL;
2679 : :
5546 tgl@sss.pgh.pa.us 2680 :UBC 0 : default:
2399 andres@anarazel.de 2681 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2682 : : result);
2683 : : return NULL;
2684 : : }
2685 : : }
2686 : :
5408 tgl@sss.pgh.pa.us 2687 [ + + ]:CBC 161841 : if (canSetTag)
2688 : 161542 : (estate->es_processed)++;
2689 : :
1370 alvherre@alvh.no-ip. 2690 : 161841 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2691 : : slot);
2692 : :
2693 : : /* Process RETURNING if present */
5911 tgl@sss.pgh.pa.us 2694 [ + + ]: 161758 : if (resultRelInfo->ri_projectReturning)
334 dean.a.rasheed@gmail 2695 : 1207 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2696 : : oldSlot, slot, context->planSlot);
2697 : :
5911 tgl@sss.pgh.pa.us 2698 : 160551 : return NULL;
2699 : : }
2700 : :
2701 : : /*
2702 : : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2703 : : *
2704 : : * Try to lock tuple for update as part of speculative insertion. If
2705 : : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2706 : : * (but still lock row, even though it may not satisfy estate's
2707 : : * snapshot).
2708 : : *
2709 : : * Returns true if we're done (with or without an update), or false if
2710 : : * the caller must retry the INSERT from scratch.
2711 : : */
2712 : : static bool
1370 alvherre@alvh.no-ip. 2713 : 2608 : ExecOnConflictUpdate(ModifyTableContext *context,
2714 : : ResultRelInfo *resultRelInfo,
2715 : : ItemPointer conflictTid,
2716 : : TupleTableSlot *excludedSlot,
2717 : : bool canSetTag,
2718 : : TupleTableSlot **returning)
2719 : : {
2720 : 2608 : ModifyTableState *mtstate = context->mtstate;
3875 andres@anarazel.de 2721 : 2608 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2722 : 2608 : Relation relation = resultRelInfo->ri_RelationDesc;
2822 alvherre@alvh.no-ip. 2723 : 2608 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2477 andres@anarazel.de 2724 : 2608 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2725 : : TM_FailureData tmfd;
2726 : : LockTupleMode lockmode;
2727 : : TM_Result test;
2728 : : Datum xminDatum;
2729 : : TransactionId xmin;
2730 : : bool isnull;
2731 : :
2732 : : /*
2733 : : * Parse analysis should have blocked ON CONFLICT for all system
2734 : : * relations, which includes these. There's no fundamental obstacle to
2735 : : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2736 : : * ExecUpdate() caller.
2737 : : */
448 noah@leadboat.com 2738 [ - + ]: 2608 : Assert(!resultRelInfo->ri_needLockTagTuple);
2739 : :
2740 : : /* Determine lock mode to use */
1370 alvherre@alvh.no-ip. 2741 : 2608 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2742 : :
2743 : : /*
2744 : : * Lock tuple for update. Don't follow updates when tuple cannot be
2745 : : * locked without doing so. A row locking conflict here means our
2746 : : * previous conclusion that the tuple is conclusively committed is not
2747 : : * true anymore.
2748 : : */
2399 andres@anarazel.de 2749 : 2608 : test = table_tuple_lock(relation, conflictTid,
1370 alvherre@alvh.no-ip. 2750 : 2608 : context->estate->es_snapshot,
2751 : 2608 : existing, context->estate->es_output_cid,
2752 : : lockmode, LockWaitBlock, 0,
2753 : : &tmfd);
3875 andres@anarazel.de 2754 [ + + - - : 2608 : switch (test)
- - ]
2755 : : {
2460 2756 : 2596 : case TM_Ok:
2757 : : /* success! */
3875 2758 : 2596 : break;
2759 : :
2460 2760 : 12 : case TM_Invisible:
2761 : :
2762 : : /*
2763 : : * This can occur when a just inserted tuple is updated again in
2764 : : * the same command. E.g. because multiple rows with the same
2765 : : * conflicting key values are inserted.
2766 : : *
2767 : : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2768 : : * case. We do not want to proceed because it would lead to the
2769 : : * same row being updated a second time in some unspecified order,
2770 : : * and in contrast to plain UPDATEs there's no historical behavior
2771 : : * to break.
2772 : : *
2773 : : * It is the user's responsibility to prevent this situation from
2774 : : * occurring. These problems are why the SQL standard similarly
2775 : : * specifies that for SQL MERGE, an exception must be raised in
2776 : : * the event of an attempt to update the same row twice.
2777 : : */
2778 : 12 : xminDatum = slot_getsysattr(existing,
2779 : : MinTransactionIdAttributeNumber,
2780 : : &isnull);
2781 [ - + ]: 12 : Assert(!isnull);
2782 : 12 : xmin = DatumGetTransactionId(xminDatum);
2783 : :
2784 [ + - ]: 12 : if (TransactionIdIsCurrentTransactionId(xmin))
3875 2785 [ + - ]: 12 : ereport(ERROR,
2786 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2787 : : /* translator: %s is a SQL command name */
2788 : : errmsg("%s command cannot affect row a second time",
2789 : : "ON CONFLICT DO UPDATE"),
2790 : : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2791 : :
2792 : : /* This shouldn't happen */
3875 andres@anarazel.de 2793 [ # # ]:UBC 0 : elog(ERROR, "attempted to lock invisible tuple");
2794 : : break;
2795 : :
2460 2796 : 0 : case TM_SelfModified:
2797 : :
2798 : : /*
2799 : : * This state should never be reached. As a dirty snapshot is used
2800 : : * to find conflicting tuples, speculative insertion wouldn't have
2801 : : * seen this row to conflict with.
2802 : : */
3875 2803 [ # # ]: 0 : elog(ERROR, "unexpected self-updated tuple");
2804 : : break;
2805 : :
2460 2806 : 0 : case TM_Updated:
3875 2807 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2808 [ # # ]: 0 : ereport(ERROR,
2809 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2810 : : errmsg("could not serialize access due to concurrent update")));
2811 : :
2812 : : /*
2813 : : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2814 : : * a partitioned table we shouldn't reach to a case where tuple to
2815 : : * be lock is moved to another partition due to concurrent update
2816 : : * of the partition key.
2817 : : */
2460 2818 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2819 : :
2820 : : /*
2821 : : * Tell caller to try again from the very start.
2822 : : *
2823 : : * It does not make sense to use the usual EvalPlanQual() style
2824 : : * loop here, as the new version of the row might not conflict
2825 : : * anymore, or the conflicting tuple has actually been deleted.
2826 : : */
2827 : 0 : ExecClearTuple(existing);
2828 : 0 : return false;
2829 : :
2830 : 0 : case TM_Deleted:
2831 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2832 [ # # ]: 0 : ereport(ERROR,
2833 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2834 : : errmsg("could not serialize access due to concurrent delete")));
2835 : :
2836 : : /* see TM_Updated case */
2837 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2838 : 0 : ExecClearTuple(existing);
3875 2839 : 0 : return false;
2840 : :
2841 : 0 : default:
2399 2842 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2843 : : }
2844 : :
2845 : : /* Success, the tuple is locked. */
2846 : :
2847 : : /*
2848 : : * Verify that the tuple is visible to our MVCC snapshot if the current
2849 : : * isolation level mandates that.
2850 : : *
2851 : : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2852 : : * CONFLICT ... WHERE clause may prevent us from reaching that.
2853 : : *
2854 : : * This means we only ever continue when a new command in the current
2855 : : * transaction could see the row, even though in READ COMMITTED mode the
2856 : : * tuple will not be visible according to the current statement's
2857 : : * snapshot. This is in line with the way UPDATE deals with newer tuple
2858 : : * versions.
2859 : : */
1370 alvherre@alvh.no-ip. 2860 :CBC 2596 : ExecCheckTupleVisible(context->estate, relation, existing);
2861 : :
2862 : : /*
2863 : : * Make tuple and any needed join variables available to ExecQual and
2864 : : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2865 : : * the target's existing tuple is installed in the scantuple. EXCLUDED
2866 : : * has been made to reference INNER_VAR in setrefs.c, but there is no
2867 : : * other redirection.
2868 : : */
2477 andres@anarazel.de 2869 : 2596 : econtext->ecxt_scantuple = existing;
3875 2870 : 2596 : econtext->ecxt_innertuple = excludedSlot;
2871 : 2596 : econtext->ecxt_outertuple = NULL;
2872 : :
3199 2873 [ + + ]: 2596 : if (!ExecQual(onConflictSetWhere, econtext))
2874 : : {
2477 2875 : 16 : ExecClearTuple(existing); /* see return below */
3875 2876 [ - + ]: 16 : InstrCountFiltered1(&mtstate->ps, 1);
2877 : 16 : return true; /* done with the tuple */
2878 : : }
2879 : :
2880 [ + + ]: 2580 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2881 : : {
2882 : : /*
2883 : : * Check target's existing tuple against UPDATE-applicable USING
2884 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2885 : : *
2886 : : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2887 : : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2888 : : * but that's almost the extent of its special handling for ON
2889 : : * CONFLICT DO UPDATE.
2890 : : *
2891 : : * The rewriter will also have associated UPDATE applicable straight
2892 : : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2893 : : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2894 : : * kinds, so there is no danger of spurious over-enforcement in the
2895 : : * INSERT or UPDATE path.
2896 : : */
2897 : 36 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2898 : : existing,
2899 : : mtstate->ps.state);
2900 : : }
2901 : :
2902 : : /* Project the new tuple version */
2822 alvherre@alvh.no-ip. 2903 : 2568 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2904 : :
2905 : : /*
2906 : : * Note that it is possible that the target tuple has been modified in
2907 : : * this session, after the above table_tuple_lock. We choose to not error
2908 : : * out in that case, in line with ExecUpdate's treatment of similar cases.
2909 : : * This can happen if an UPDATE is triggered from within ExecQual(),
2910 : : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2911 : : * wCTE in the ON CONFLICT's SET.
2912 : : */
2913 : :
2914 : : /* Execute UPDATE with projection */
1370 2915 : 5121 : *returning = ExecUpdate(context, resultRelInfo,
2916 : : conflictTid, NULL, existing,
2477 andres@anarazel.de 2917 : 2568 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2918 : : canSetTag);
2919 : :
2920 : : /*
2921 : : * Clear out existing tuple, as there might not be another conflict among
2922 : : * the next input rows. Don't want to hold resources till the end of the
2923 : : * query. First though, make sure that the returning slot, if any, has a
2924 : : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2925 : : * columns.
2926 : : */
334 dean.a.rasheed@gmail 2927 [ + + ]: 2553 : if (*returning != NULL &&
2928 [ + + ]: 113 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2929 : 3 : ExecMaterializeSlot(*returning);
2930 : :
2477 andres@anarazel.de 2931 : 2553 : ExecClearTuple(existing);
2932 : :
3875 2933 : 2553 : return true;
2934 : : }
2935 : :
2936 : : /*
2937 : : * Perform MERGE.
2938 : : */
2939 : : static TupleTableSlot *
1359 alvherre@alvh.no-ip. 2940 : 7636 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2941 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2942 : : {
639 dean.a.rasheed@gmail 2943 : 7636 : TupleTableSlot *rslot = NULL;
2944 : : bool matched;
2945 : :
2946 : : /*-----
2947 : : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2948 : : * valid, depending on whether the result relation is a table or a view.
2949 : : * We execute the first action for which the additional WHEN MATCHED AND
2950 : : * quals pass. If an action without quals is found, that action is
2951 : : * executed.
2952 : : *
2953 : : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2954 : : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2955 : : * in sequence until one passes. This is almost identical to the WHEN
2956 : : * MATCHED case, and both cases are handled by ExecMergeMatched().
2957 : : *
2958 : : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2959 : : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2960 : : * TARGET] actions in sequence until one passes.
2961 : : *
2962 : : * Things get interesting in case of concurrent update/delete of the
2963 : : * target tuple. Such concurrent update/delete is detected while we are
2964 : : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2965 : : *
2966 : : * A concurrent update can:
2967 : : *
2968 : : * 1. modify the target tuple so that the results from checking any
2969 : : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2970 : : * SOURCE actions potentially change, but the result from the join
2971 : : * quals does not change.
2972 : : *
2973 : : * In this case, we are still dealing with the same kind of match
2974 : : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2975 : : * actions from the start and choose the first one that satisfies the
2976 : : * new target tuple.
2977 : : *
2978 : : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2979 : : * quals no longer pass and hence the source and target tuples no
2980 : : * longer match.
2981 : : *
2982 : : * In this case, we are now dealing with a NOT MATCHED case, and we
2983 : : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2984 : : * TARGET] actions. First ExecMergeMatched() processes the list of
2985 : : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2986 : : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2987 : : * TARGET] actions in sequence until one passes. Thus we may execute
2988 : : * two actions; one of each kind.
2989 : : *
2990 : : * Thus we support concurrent updates that turn MATCHED candidate rows
2991 : : * into NOT MATCHED rows. However, we do not attempt to support cases
2992 : : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2993 : : * cause a target row to match a different source row.
2994 : : *
2995 : : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2996 : : * [BY TARGET].
2997 : : *
2998 : : * ExecMergeMatched() takes care of following the update chain and
2999 : : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3000 : : * action, as long as the target tuple still exists. If the target tuple
3001 : : * gets deleted or a concurrent update causes the join quals to fail, it
3002 : : * returns a matched status of false and we call ExecMergeNotMatched().
3003 : : * Given that ExecMergeMatched() always makes progress by following the
3004 : : * update chain and we never switch from ExecMergeNotMatched() to
3005 : : * ExecMergeMatched(), there is no risk of a livelock.
3006 : : */
656 3007 [ + + + + ]: 7636 : matched = tupleid != NULL || oldtuple != NULL;
1359 alvherre@alvh.no-ip. 3008 [ + + ]: 7636 : if (matched)
639 dean.a.rasheed@gmail 3009 : 6293 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3010 : : canSetTag, &matched);
3011 : :
3012 : : /*
3013 : : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3014 : : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3015 : : * "matched" to false, indicating that it no longer matches).
3016 : : */
1359 alvherre@alvh.no-ip. 3017 [ + + ]: 7589 : if (!matched)
3018 : : {
3019 : : /*
3020 : : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3021 : : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3022 : : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3023 : : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3024 : : * SOURCE action, and computed the row to return. If so, we cannot
3025 : : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3026 : : * pending (to be processed on the next call to ExecModifyTable()).
3027 : : * Otherwise, just process the action now.
3028 : : */
626 dean.a.rasheed@gmail 3029 [ + + ]: 1352 : if (rslot == NULL)
3030 : 1350 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3031 : : else
3032 : 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3033 : : }
3034 : :
639 3035 : 7559 : return rslot;
3036 : : }
3037 : :
3038 : : /*
3039 : : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3040 : : * action, depending on whether the join quals are satisfied. If the target
3041 : : * relation is a table, the current target tuple is identified by tupleid.
3042 : : * Otherwise, if the target relation is a view, oldtuple is the current target
3043 : : * tuple from the view.
3044 : : *
3045 : : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3046 : : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3047 : : * action do not pass, we check the second, then the third and so on. If we
3048 : : * reach the end without finding a qualifying action, we return NULL.
3049 : : * Otherwise, we execute the qualifying action and return its RETURNING
3050 : : * result, if any, or NULL.
3051 : : *
3052 : : * On entry, "*matched" is assumed to be true. If a concurrent update or
3053 : : * delete is detected that causes the join quals to no longer pass, we set it
3054 : : * to false, indicating that the caller should process any NOT MATCHED [BY
3055 : : * TARGET] actions.
3056 : : *
3057 : : * After a concurrent update, we restart from the first action to look for a
3058 : : * new qualifying action to execute. If the join quals originally passed, and
3059 : : * the concurrent update caused them to no longer pass, then we switch from
3060 : : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3061 : : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3062 : : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3063 : : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3064 : : */
3065 : : static TupleTableSlot *
1359 alvherre@alvh.no-ip. 3066 : 6293 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3067 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3068 : : bool *matched)
3069 : : {
3070 : 6293 : ModifyTableState *mtstate = context->mtstate;
626 dean.a.rasheed@gmail 3071 : 6293 : List **mergeActions = resultRelInfo->ri_MergeActions;
3072 : : ItemPointerData lockedtid;
3073 : : List *actionStates;
639 3074 : 6293 : TupleTableSlot *newslot = NULL;
3075 : 6293 : TupleTableSlot *rslot = NULL;
1359 alvherre@alvh.no-ip. 3076 : 6293 : EState *estate = context->estate;
3077 : 6293 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3078 : : bool isNull;
3079 : 6293 : EPQState *epqstate = &mtstate->mt_epqstate;
3080 : : ListCell *l;
3081 : :
3082 : : /* Expect matched to be true on entry */
626 dean.a.rasheed@gmail 3083 [ - + ]: 6293 : Assert(*matched);
3084 : :
3085 : : /*
3086 : : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3087 : : * are done.
3088 : : */
3089 [ + + ]: 6293 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3090 [ + + ]: 603 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
639 3091 : 267 : return NULL;
3092 : :
3093 : : /*
3094 : : * Make tuple and any needed join variables available to ExecQual and
3095 : : * ExecProject. The target's existing tuple is installed in the scantuple.
3096 : : * This target relation's slot is required only in the case of a MATCHED
3097 : : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3098 : : */
1359 alvherre@alvh.no-ip. 3099 : 6026 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3100 : 6026 : econtext->ecxt_innertuple = context->planSlot;
3101 : 6026 : econtext->ecxt_outertuple = NULL;
3102 : :
3103 : : /*
3104 : : * This routine is only invoked for matched target rows, so we should
3105 : : * either have the tupleid of the target row, or an old tuple from the
3106 : : * target wholerow junk attr.
3107 : : */
656 dean.a.rasheed@gmail 3108 [ + + - + ]: 6026 : Assert(tupleid != NULL || oldtuple != NULL);
448 noah@leadboat.com 3109 : 6026 : ItemPointerSetInvalid(&lockedtid);
656 dean.a.rasheed@gmail 3110 [ + + ]: 6026 : if (oldtuple != NULL)
3111 : : {
448 noah@leadboat.com 3112 [ - + ]: 48 : Assert(!resultRelInfo->ri_needLockTagTuple);
656 dean.a.rasheed@gmail 3113 : 48 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3114 : : false);
3115 : : }
3116 : : else
3117 : : {
448 noah@leadboat.com 3118 [ + + ]: 5978 : if (resultRelInfo->ri_needLockTagTuple)
3119 : : {
3120 : : /*
3121 : : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3122 : : * that don't match mas_whenqual. MERGE on system catalogs is a
3123 : : * minor use case, so don't bother optimizing those.
3124 : : */
3125 : 3988 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3126 : : InplaceUpdateTupleLock);
3127 : 3988 : lockedtid = *tupleid;
3128 : : }
3129 [ - + ]: 5978 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3130 : : tupleid,
3131 : : SnapshotAny,
3132 : : resultRelInfo->ri_oldTupleSlot))
448 noah@leadboat.com 3133 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3134 : : }
3135 : :
3136 : : /*
3137 : : * Test the join condition. If it's satisfied, perform a MATCHED action.
3138 : : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3139 : : *
3140 : : * Note that this join condition will be NULL if there are no NOT MATCHED
3141 : : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3142 : : * need only consider MATCHED actions here.
3143 : : */
626 dean.a.rasheed@gmail 3144 [ + + ]:CBC 6026 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3145 : 5933 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3146 : : else
3147 : 93 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3148 : :
3149 : 6026 : lmerge_matched:
3150 : :
3151 [ + + + + : 10786 : foreach(l, actionStates)
+ + ]
3152 : : {
1359 alvherre@alvh.no-ip. 3153 : 6104 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3154 : 6104 : CmdType commandType = relaction->mas_action->commandType;
3155 : : TM_Result result;
3156 : 6104 : UpdateContext updateCxt = {0};
3157 : :
3158 : : /*
3159 : : * Test condition, if any.
3160 : : *
3161 : : * In the absence of any condition, we perform the action
3162 : : * unconditionally (no need to check separately since ExecQual() will
3163 : : * return true if there are no conditions to evaluate).
3164 : : */
3165 [ + + ]: 6104 : if (!ExecQual(relaction->mas_whenqual, econtext))
3166 : 4722 : continue;
3167 : :
3168 : : /*
3169 : : * Check if the existing target tuple meets the USING checks of
3170 : : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3171 : : * error.
3172 : : *
3173 : : * The WITH CHECK quals for UPDATE RLS policies are applied in
3174 : : * ExecUpdateAct() and hence we need not do anything special to handle
3175 : : * them.
3176 : : *
3177 : : * NOTE: We must do this after WHEN quals are evaluated, so that we
3178 : : * check policies only when they matter.
3179 : : */
862 dean.a.rasheed@gmail 3180 [ + + + + ]: 1382 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3181 : : {
1359 alvherre@alvh.no-ip. 3182 : 57 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3183 : : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3184 : : resultRelInfo,
3185 : : resultRelInfo->ri_oldTupleSlot,
3186 [ + + ]: 57 : context->mtstate->ps.state);
3187 : : }
3188 : :
3189 : : /* Perform stated action */
3190 [ + + + - ]: 1370 : switch (commandType)
3191 : : {
3192 : 1092 : case CMD_UPDATE:
3193 : :
3194 : : /*
3195 : : * Project the output tuple, and use that to update the table.
3196 : : * We don't need to filter out junk attributes, because the
3197 : : * UPDATE action's targetlist doesn't have any.
3198 : : */
3199 : 1092 : newslot = ExecProject(relaction->mas_proj);
3200 : :
639 dean.a.rasheed@gmail 3201 : 1092 : mtstate->mt_merge_action = relaction;
1359 alvherre@alvh.no-ip. 3202 [ + + ]: 1092 : if (!ExecUpdatePrologue(context, resultRelInfo,
3203 : : tupleid, NULL, newslot, &result))
3204 : : {
1009 dean.a.rasheed@gmail 3205 [ + + ]: 10 : if (result == TM_Ok)
448 noah@leadboat.com 3206 : 80 : goto out; /* "do nothing" */
3207 : :
1009 dean.a.rasheed@gmail 3208 : 7 : break; /* concurrent update/delete */
3209 : : }
3210 : :
3211 : : /* INSTEAD OF ROW UPDATE Triggers */
656 3212 [ + + ]: 1082 : if (resultRelInfo->ri_TrigDesc &&
3213 [ + + ]: 174 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3214 : : {
3215 [ - + ]: 39 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3216 : : oldtuple, newslot))
448 noah@leadboat.com 3217 :UBC 0 : goto out; /* "do nothing" */
3218 : : }
3219 : : else
3220 : : {
3221 : : /* checked ri_needLockTagTuple above */
521 noah@leadboat.com 3222 [ - + ]:CBC 1043 : Assert(oldtuple == NULL);
3223 : :
656 dean.a.rasheed@gmail 3224 : 1043 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3225 : : NULL, newslot, canSetTag,
3226 : : &updateCxt);
3227 : :
3228 : : /*
3229 : : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3230 : : * cross-partition update was done, then there's nothing
3231 : : * else for us to do --- the UPDATE has been turned into a
3232 : : * DELETE and an INSERT, and we must not perform any of
3233 : : * the usual post-update tasks. Also, the RETURNING tuple
3234 : : * (if any) has been projected, so we can just return
3235 : : * that.
3236 : : */
3237 [ + + ]: 1031 : if (updateCxt.crossPartUpdate)
3238 : : {
3239 : 69 : mtstate->mt_merge_updated += 1;
448 noah@leadboat.com 3240 : 69 : rslot = context->cpUpdateReturningSlot;
3241 : 69 : goto out;
3242 : : }
3243 : : }
3244 : :
656 dean.a.rasheed@gmail 3245 [ + + ]: 1001 : if (result == TM_Ok)
3246 : : {
1359 alvherre@alvh.no-ip. 3247 : 960 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3248 : : tupleid, NULL, newslot);
3249 : 954 : mtstate->mt_merge_updated += 1;
3250 : : }
3251 : 995 : break;
3252 : :
3253 : 263 : case CMD_DELETE:
639 dean.a.rasheed@gmail 3254 : 263 : mtstate->mt_merge_action = relaction;
1359 alvherre@alvh.no-ip. 3255 [ + + ]: 263 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3256 : : NULL, NULL, &result))
3257 : : {
1009 dean.a.rasheed@gmail 3258 [ + + ]: 6 : if (result == TM_Ok)
448 noah@leadboat.com 3259 : 3 : goto out; /* "do nothing" */
3260 : :
1009 dean.a.rasheed@gmail 3261 : 3 : break; /* concurrent update/delete */
3262 : : }
3263 : :
3264 : : /* INSTEAD OF ROW DELETE Triggers */
656 3265 [ + + ]: 257 : if (resultRelInfo->ri_TrigDesc &&
3266 [ + + ]: 28 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3267 : : {
3268 [ - + ]: 3 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3269 : : oldtuple))
448 noah@leadboat.com 3270 :UBC 0 : goto out; /* "do nothing" */
3271 : : }
3272 : : else
3273 : : {
3274 : : /* checked ri_needLockTagTuple above */
521 noah@leadboat.com 3275 [ - + ]:CBC 254 : Assert(oldtuple == NULL);
3276 : :
656 dean.a.rasheed@gmail 3277 : 254 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3278 : : false);
3279 : : }
3280 : :
1359 alvherre@alvh.no-ip. 3281 [ + + ]: 257 : if (result == TM_Ok)
3282 : : {
3283 : 248 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3284 : : false);
3285 : 248 : mtstate->mt_merge_deleted += 1;
3286 : : }
3287 : 257 : break;
3288 : :
3289 : 15 : case CMD_NOTHING:
3290 : : /* Doing nothing is always OK */
3291 : 15 : result = TM_Ok;
3292 : 15 : break;
3293 : :
1359 alvherre@alvh.no-ip. 3294 :UBC 0 : default:
626 dean.a.rasheed@gmail 3295 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3296 : : }
3297 : :
1359 alvherre@alvh.no-ip. 3298 [ + + + + :CBC 1277 : switch (result)
- - ]
3299 : : {
3300 : 1217 : case TM_Ok:
3301 : : /* all good; perform final actions */
1125 3302 [ + + + + ]: 1217 : if (canSetTag && commandType != CMD_NOTHING)
1359 3303 : 1191 : (estate->es_processed)++;
3304 : :
3305 : 1217 : break;
3306 : :
3307 : 16 : case TM_SelfModified:
3308 : :
3309 : : /*
3310 : : * The target tuple was already updated or deleted by the
3311 : : * current command, or by a later command in the current
3312 : : * transaction. The former case is explicitly disallowed by
3313 : : * the SQL standard for MERGE, which insists that the MERGE
3314 : : * join condition should not join a target row to more than
3315 : : * one source row.
3316 : : *
3317 : : * The latter case arises if the tuple is modified by a
3318 : : * command in a BEFORE trigger, or perhaps by a command in a
3319 : : * volatile function used in the query. In such situations we
3320 : : * should not ignore the MERGE action, but it is equally
3321 : : * unsafe to proceed. We don't want to discard the original
3322 : : * MERGE action while keeping the triggered actions based on
3323 : : * it; and it would be no better to allow the original MERGE
3324 : : * action while discarding the updates that it triggered. So
3325 : : * throwing an error is the only safe course.
3326 : : */
649 dean.a.rasheed@gmail 3327 [ + + ]: 16 : if (context->tmfd.cmax != estate->es_output_cid)
3328 [ + - ]: 6 : ereport(ERROR,
3329 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3330 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3331 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3332 : :
1359 alvherre@alvh.no-ip. 3333 [ + - ]: 10 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3334 [ + - ]: 10 : ereport(ERROR,
3335 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3336 : : /* translator: %s is a SQL command name */
3337 : : errmsg("%s command cannot affect row a second time",
3338 : : "MERGE"),
3339 : : errhint("Ensure that not more than one source row matches any one target row.")));
3340 : :
3341 : : /* This shouldn't happen */
1359 alvherre@alvh.no-ip. 3342 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3343 : : break;
3344 : :
1359 alvherre@alvh.no-ip. 3345 :CBC 5 : case TM_Deleted:
3346 [ - + ]: 5 : if (IsolationUsesXactSnapshot())
1359 alvherre@alvh.no-ip. 3347 [ # # ]:UBC 0 : ereport(ERROR,
3348 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3349 : : errmsg("could not serialize access due to concurrent delete")));
3350 : :
3351 : : /*
3352 : : * If the tuple was already deleted, set matched to false to
3353 : : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3354 : : */
639 dean.a.rasheed@gmail 3355 :CBC 5 : *matched = false;
448 noah@leadboat.com 3356 : 5 : goto out;
3357 : :
1359 alvherre@alvh.no-ip. 3358 : 39 : case TM_Updated:
3359 : : {
3360 : : bool was_matched;
3361 : : Relation resultRelationDesc;
3362 : : TupleTableSlot *epqslot,
3363 : : *inputslot;
3364 : : LockTupleMode lockmode;
3365 : :
3366 : : /*
3367 : : * The target tuple was concurrently updated by some other
3368 : : * transaction. If we are currently processing a MATCHED
3369 : : * action, use EvalPlanQual() with the new version of the
3370 : : * tuple and recheck the join qual, to detect a change
3371 : : * from the MATCHED to the NOT MATCHED cases. If we are
3372 : : * already processing a NOT MATCHED BY SOURCE action, we
3373 : : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3374 : : * MATCHED).
3375 : : */
626 dean.a.rasheed@gmail 3376 : 39 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
1359 alvherre@alvh.no-ip. 3377 : 39 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3378 : 39 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3379 : :
626 dean.a.rasheed@gmail 3380 [ + - ]: 39 : if (was_matched)
3381 : 39 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3382 : : resultRelInfo->ri_RangeTableIndex);
3383 : : else
626 dean.a.rasheed@gmail 3384 :UBC 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3385 : :
1359 alvherre@alvh.no-ip. 3386 :CBC 39 : result = table_tuple_lock(resultRelationDesc, tupleid,
3387 : : estate->es_snapshot,
3388 : : inputslot, estate->es_output_cid,
3389 : : lockmode, LockWaitBlock,
3390 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3391 : : &context->tmfd);
3392 [ + - + - ]: 39 : switch (result)
3393 : : {
3394 : 38 : case TM_Ok:
3395 : :
3396 : : /*
3397 : : * If the tuple was updated and migrated to
3398 : : * another partition concurrently, the current
3399 : : * MERGE implementation can't follow. There's
3400 : : * probably a better way to handle this case, but
3401 : : * it'd require recognizing the relation to which
3402 : : * the tuple moved, and setting our current
3403 : : * resultRelInfo to that.
3404 : : */
102 dean.a.rasheed@gmail 3405 [ - + ]: 38 : if (ItemPointerIndicatesMovedPartitions(tupleid))
1359 alvherre@alvh.no-ip. 3406 [ # # ]:UBC 0 : ereport(ERROR,
3407 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3408 : : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3409 : :
3410 : : /*
3411 : : * If this was a MATCHED case, use EvalPlanQual()
3412 : : * to recheck the join condition.
3413 : : */
626 dean.a.rasheed@gmail 3414 [ + - ]:CBC 38 : if (was_matched)
3415 : : {
3416 : 38 : epqslot = EvalPlanQual(epqstate,
3417 : : resultRelationDesc,
3418 : : resultRelInfo->ri_RangeTableIndex,
3419 : : inputslot);
3420 : :
3421 : : /*
3422 : : * If the subplan didn't return a tuple, then
3423 : : * we must be dealing with an inner join for
3424 : : * which the join condition no longer matches.
3425 : : * This can only happen if there are no NOT
3426 : : * MATCHED actions, and so there is nothing
3427 : : * more to do.
3428 : : */
3429 [ + - - + ]: 38 : if (TupIsNull(epqslot))
448 noah@leadboat.com 3430 :UBC 0 : goto out;
3431 : :
3432 : : /*
3433 : : * If we got a NULL ctid from the subplan, the
3434 : : * join quals no longer pass and we switch to
3435 : : * the NOT MATCHED BY SOURCE case.
3436 : : */
626 dean.a.rasheed@gmail 3437 :CBC 38 : (void) ExecGetJunkAttribute(epqslot,
3438 : 38 : resultRelInfo->ri_RowIdAttNo,
3439 : : &isNull);
3440 [ + + ]: 38 : if (isNull)
3441 : 2 : *matched = false;
3442 : :
3443 : : /*
3444 : : * Otherwise, recheck the join quals to see if
3445 : : * we need to switch to the NOT MATCHED BY
3446 : : * SOURCE case.
3447 : : */
448 noah@leadboat.com 3448 [ + + ]: 38 : if (resultRelInfo->ri_needLockTagTuple)
3449 : : {
3450 [ + - ]: 1 : if (ItemPointerIsValid(&lockedtid))
3451 : 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3452 : : InplaceUpdateTupleLock);
102 dean.a.rasheed@gmail 3453 : 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3454 : : InplaceUpdateTupleLock);
3455 : 1 : lockedtid = *tupleid;
3456 : : }
3457 : :
626 3458 [ - + ]: 38 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3459 : : tupleid,
3460 : : SnapshotAny,
3461 : : resultRelInfo->ri_oldTupleSlot))
626 dean.a.rasheed@gmail 3462 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3463 : :
626 dean.a.rasheed@gmail 3464 [ + + ]:CBC 38 : if (*matched)
3465 : 36 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3466 : : econtext);
3467 : :
3468 : : /* Switch lists, if necessary */
3469 [ + + ]: 38 : if (!*matched)
3470 : : {
3471 : 4 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3472 : :
3473 : : /*
3474 : : * If we have both NOT MATCHED BY SOURCE
3475 : : * and NOT MATCHED BY TARGET actions (a
3476 : : * full join between the source and target
3477 : : * relations), the single previously
3478 : : * matched tuple from the outer plan node
3479 : : * is treated as two not matched tuples,
3480 : : * in the same way as if they had not
3481 : : * matched to start with. Therefore, we
3482 : : * must adjust the outer plan node's tuple
3483 : : * count, if we're instrumenting the
3484 : : * query, to get the correct "skipped" row
3485 : : * count --- see show_modifytable_info().
3486 : : */
30 3487 [ + + ]: 4 : if (outerPlanState(mtstate)->instrument &&
3488 [ + - ]: 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3489 [ + - ]: 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3490 : 1 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3491 : : }
3492 : : }
3493 : :
3494 : : /*
3495 : : * Loop back and process the MATCHED or NOT
3496 : : * MATCHED BY SOURCE actions from the start.
3497 : : */
1359 alvherre@alvh.no-ip. 3498 : 38 : goto lmerge_matched;
3499 : :
1359 alvherre@alvh.no-ip. 3500 :UBC 0 : case TM_Deleted:
3501 : :
3502 : : /*
3503 : : * tuple already deleted; tell caller to run NOT
3504 : : * MATCHED [BY TARGET] actions
3505 : : */
639 dean.a.rasheed@gmail 3506 : 0 : *matched = false;
448 noah@leadboat.com 3507 : 0 : goto out;
3508 : :
1359 alvherre@alvh.no-ip. 3509 :CBC 1 : case TM_SelfModified:
3510 : :
3511 : : /*
3512 : : * This can be reached when following an update
3513 : : * chain from a tuple updated by another session,
3514 : : * reaching a tuple that was already updated or
3515 : : * deleted by the current command, or by a later
3516 : : * command in the current transaction. As above,
3517 : : * this should always be treated as an error.
3518 : : */
3519 [ - + ]: 1 : if (context->tmfd.cmax != estate->es_output_cid)
1359 alvherre@alvh.no-ip. 3520 [ # # ]:UBC 0 : ereport(ERROR,
3521 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3522 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3523 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3524 : :
649 dean.a.rasheed@gmail 3525 [ + - ]:CBC 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3526 [ + - ]: 1 : ereport(ERROR,
3527 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3528 : : /* translator: %s is a SQL command name */
3529 : : errmsg("%s command cannot affect row a second time",
3530 : : "MERGE"),
3531 : : errhint("Ensure that not more than one source row matches any one target row.")));
3532 : :
3533 : : /* This shouldn't happen */
649 dean.a.rasheed@gmail 3534 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3535 : : goto out;
3536 : :
1359 alvherre@alvh.no-ip. 3537 : 0 : default:
3538 : : /* see table_tuple_lock call in ExecDelete() */
3539 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3540 : : result);
3541 : : goto out;
3542 : : }
3543 : : }
3544 : :
3545 : 0 : case TM_Invisible:
3546 : : case TM_WouldBlock:
3547 : : case TM_BeingModified:
3548 : : /* these should not occur */
3549 [ # # ]: 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3550 : : break;
3551 : : }
3552 : :
3553 : : /* Process RETURNING if present */
639 dean.a.rasheed@gmail 3554 [ + + ]:CBC 1217 : if (resultRelInfo->ri_projectReturning)
3555 : : {
3556 [ + + - - ]: 214 : switch (commandType)
3557 : : {
3558 : 94 : case CMD_UPDATE:
334 3559 : 94 : rslot = ExecProcessReturning(context,
3560 : : resultRelInfo,
3561 : : CMD_UPDATE,
3562 : : resultRelInfo->ri_oldTupleSlot,
3563 : : newslot,
3564 : : context->planSlot);
639 3565 : 94 : break;
3566 : :
3567 : 120 : case CMD_DELETE:
334 3568 : 120 : rslot = ExecProcessReturning(context,
3569 : : resultRelInfo,
3570 : : CMD_DELETE,
3571 : : resultRelInfo->ri_oldTupleSlot,
3572 : : NULL,
3573 : : context->planSlot);
639 3574 : 120 : break;
3575 : :
639 dean.a.rasheed@gmail 3576 :UBC 0 : case CMD_NOTHING:
3577 : 0 : break;
3578 : :
3579 : 0 : default:
3580 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d",
3581 : : (int) commandType);
3582 : : }
3583 : : }
3584 : :
3585 : : /*
3586 : : * We've activated one of the WHEN clauses, so we don't search
3587 : : * further. This is required behaviour, not an optimization.
3588 : : */
1359 alvherre@alvh.no-ip. 3589 :CBC 1217 : break;
3590 : : }
3591 : :
3592 : : /*
3593 : : * Successfully executed an action or no qualifying action was found.
3594 : : */
448 noah@leadboat.com 3595 : 5979 : out:
3596 [ + + ]: 5979 : if (ItemPointerIsValid(&lockedtid))
3597 : 3988 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3598 : : InplaceUpdateTupleLock);
639 dean.a.rasheed@gmail 3599 : 5979 : return rslot;
3600 : : }
3601 : :
3602 : : /*
3603 : : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3604 : : */
3605 : : static TupleTableSlot *
1359 alvherre@alvh.no-ip. 3606 : 1352 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3607 : : bool canSetTag)
3608 : : {
3609 : 1352 : ModifyTableState *mtstate = context->mtstate;
3610 : 1352 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3611 : : List *actionStates;
639 dean.a.rasheed@gmail 3612 : 1352 : TupleTableSlot *rslot = NULL;
3613 : : ListCell *l;
3614 : :
3615 : : /*
3616 : : * For INSERT actions, the root relation's merge action is OK since the
3617 : : * INSERT's targetlist and the WHEN conditions can only refer to the
3618 : : * source relation and hence it does not matter which result relation we
3619 : : * work with.
3620 : : *
3621 : : * XXX does this mean that we can avoid creating copies of actionStates on
3622 : : * partitioned tables, for not-matched actions?
3623 : : */
626 3624 : 1352 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3625 : :
3626 : : /*
3627 : : * Make source tuple available to ExecQual and ExecProject. We don't need
3628 : : * the target tuple, since the WHEN quals and targetlist can't refer to
3629 : : * the target columns.
3630 : : */
1359 alvherre@alvh.no-ip. 3631 : 1352 : econtext->ecxt_scantuple = NULL;
3632 : 1352 : econtext->ecxt_innertuple = context->planSlot;
3633 : 1352 : econtext->ecxt_outertuple = NULL;
3634 : :
3635 [ + - + + : 1787 : foreach(l, actionStates)
+ + ]
3636 : : {
3637 : 1352 : MergeActionState *action = (MergeActionState *) lfirst(l);
3638 : 1352 : CmdType commandType = action->mas_action->commandType;
3639 : : TupleTableSlot *newslot;
3640 : :
3641 : : /*
3642 : : * Test condition, if any.
3643 : : *
3644 : : * In the absence of any condition, we perform the action
3645 : : * unconditionally (no need to check separately since ExecQual() will
3646 : : * return true if there are no conditions to evaluate).
3647 : : */
3648 [ + + ]: 1352 : if (!ExecQual(action->mas_whenqual, econtext))
3649 : 435 : continue;
3650 : :
3651 : : /* Perform stated action */
3652 [ + - - ]: 917 : switch (commandType)
3653 : : {
3654 : 917 : case CMD_INSERT:
3655 : :
3656 : : /*
3657 : : * Project the tuple. In case of a partitioned table, the
3658 : : * projection was already built to use the root's descriptor,
3659 : : * so we don't need to map the tuple here.
3660 : : */
3661 : 917 : newslot = ExecProject(action->mas_proj);
639 dean.a.rasheed@gmail 3662 : 917 : mtstate->mt_merge_action = action;
3663 : :
3664 : 917 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3665 : : newslot, canSetTag, NULL, NULL);
1359 alvherre@alvh.no-ip. 3666 : 887 : mtstate->mt_merge_inserted += 1;
3667 : 887 : break;
1359 alvherre@alvh.no-ip. 3668 :UBC 0 : case CMD_NOTHING:
3669 : : /* Do nothing */
3670 : 0 : break;
3671 : 0 : default:
3672 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3673 : : }
3674 : :
3675 : : /*
3676 : : * We've activated one of the WHEN clauses, so we don't search
3677 : : * further. This is required behaviour, not an optimization.
3678 : : */
1359 alvherre@alvh.no-ip. 3679 :CBC 887 : break;
3680 : : }
3681 : :
639 dean.a.rasheed@gmail 3682 : 1322 : return rslot;
3683 : : }
3684 : :
3685 : : /*
3686 : : * Initialize state for execution of MERGE.
3687 : : */
3688 : : void
1359 alvherre@alvh.no-ip. 3689 : 808 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3690 : : {
302 amitlan@postgresql.o 3691 : 808 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3692 : 808 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
1359 alvherre@alvh.no-ip. 3693 : 808 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3694 : : ResultRelInfo *resultRelInfo;
3695 : : ExprContext *econtext;
3696 : : ListCell *lc;
3697 : : int i;
3698 : :
302 amitlan@postgresql.o 3699 [ - + ]: 808 : if (mergeActionLists == NIL)
1359 alvherre@alvh.no-ip. 3700 :UBC 0 : return;
3701 : :
1359 alvherre@alvh.no-ip. 3702 :CBC 808 : mtstate->mt_merge_subcommands = 0;
3703 : :
3704 [ + + ]: 808 : if (mtstate->ps.ps_ExprContext == NULL)
3705 : 664 : ExecAssignExprContext(estate, &mtstate->ps);
3706 : 808 : econtext = mtstate->ps.ps_ExprContext;
3707 : :
3708 : : /*
3709 : : * Create a MergeActionState for each action on the mergeActionList and
3710 : : * add it to either a list of matched actions or not-matched actions.
3711 : : *
3712 : : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3713 : : * anything here, do so there too.
3714 : : */
3715 : 808 : i = 0;
302 amitlan@postgresql.o 3716 [ + - + + : 1737 : foreach(lc, mergeActionLists)
+ + ]
3717 : : {
1359 alvherre@alvh.no-ip. 3718 : 929 : List *mergeActionList = lfirst(lc);
3719 : : Node *joinCondition;
3720 : : TupleDesc relationDesc;
3721 : : ListCell *l;
3722 : :
302 amitlan@postgresql.o 3723 : 929 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
1359 alvherre@alvh.no-ip. 3724 : 929 : resultRelInfo = mtstate->resultRelInfo + i;
3725 : 929 : i++;
3726 : 929 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3727 : :
3728 : : /* initialize slots for MERGE fetches from this rel */
3729 [ + - ]: 929 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3730 : 929 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3731 : :
3732 : : /* initialize state for join condition checking */
626 dean.a.rasheed@gmail 3733 : 929 : resultRelInfo->ri_MergeJoinCondition =
3734 : 929 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3735 : :
1359 alvherre@alvh.no-ip. 3736 [ + - + + : 2561 : foreach(l, mergeActionList)
+ + ]
3737 : : {
3738 : 1632 : MergeAction *action = (MergeAction *) lfirst(l);
3739 : : MergeActionState *action_state;
3740 : : TupleTableSlot *tgtslot;
3741 : : TupleDesc tgtdesc;
3742 : :
3743 : : /*
3744 : : * Build action merge state for this rel. (For partitions,
3745 : : * equivalent code exists in ExecInitPartitionInfo.)
3746 : : */
3747 : 1632 : action_state = makeNode(MergeActionState);
3748 : 1632 : action_state->mas_action = action;
3749 : 1632 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3750 : : &mtstate->ps);
3751 : :
3752 : : /*
3753 : : * We create three lists - one for each MergeMatchKind - and stick
3754 : : * the MergeActionState into the appropriate list.
3755 : : */
626 dean.a.rasheed@gmail 3756 : 3264 : resultRelInfo->ri_MergeActions[action->matchKind] =
3757 : 1632 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3758 : : action_state);
3759 : :
1359 alvherre@alvh.no-ip. 3760 [ + + + + : 1632 : switch (action->commandType)
- ]
3761 : : {
3762 : 540 : case CMD_INSERT:
3763 : : /* INSERT actions always use rootRelInfo */
3764 : 540 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3765 : : action->targetList);
3766 : :
3767 : : /*
3768 : : * If the MERGE targets a partitioned table, any INSERT
3769 : : * actions must be routed through it, not the child
3770 : : * relations. Initialize the routing struct and the root
3771 : : * table's "new" tuple slot for that, if not already done.
3772 : : * The projection we prepare, for all relations, uses the
3773 : : * root relation descriptor, and targets the plan's root
3774 : : * slot. (This is consistent with the fact that we
3775 : : * checked the plan output to match the root relation,
3776 : : * above.)
3777 : : */
3778 [ + + ]: 540 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3779 : : RELKIND_PARTITIONED_TABLE)
3780 : : {
3781 [ + + ]: 168 : if (mtstate->mt_partition_tuple_routing == NULL)
3782 : : {
3783 : : /*
3784 : : * Initialize planstate for routing if not already
3785 : : * done.
3786 : : *
3787 : : * Note that the slot is managed as a standalone
3788 : : * slot belonging to ModifyTableState, so we pass
3789 : : * NULL for the 2nd argument.
3790 : : */
3791 : 79 : mtstate->mt_root_tuple_slot =
3792 : 79 : table_slot_create(rootRelInfo->ri_RelationDesc,
3793 : : NULL);
3794 : 79 : mtstate->mt_partition_tuple_routing =
3795 : 79 : ExecSetupPartitionTupleRouting(estate,
3796 : : rootRelInfo->ri_RelationDesc);
3797 : : }
3798 : 168 : tgtslot = mtstate->mt_root_tuple_slot;
3799 : 168 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3800 : : }
3801 : : else
3802 : : {
3803 : : /*
3804 : : * If the MERGE targets an inherited table, we insert
3805 : : * into the root table, so we must initialize its
3806 : : * "new" tuple slot, if not already done, and use its
3807 : : * relation descriptor for the projection.
3808 : : *
3809 : : * For non-inherited tables, rootRelInfo and
3810 : : * resultRelInfo are the same, and the "new" tuple
3811 : : * slot will already have been initialized.
3812 : : */
199 dean.a.rasheed@gmail 3813 [ + + ]: 372 : if (rootRelInfo->ri_newTupleSlot == NULL)
3814 : 18 : rootRelInfo->ri_newTupleSlot =
3815 : 18 : table_slot_create(rootRelInfo->ri_RelationDesc,
3816 : : &estate->es_tupleTable);
3817 : :
3818 : 372 : tgtslot = rootRelInfo->ri_newTupleSlot;
3819 : 372 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3820 : : }
3821 : :
1359 alvherre@alvh.no-ip. 3822 : 540 : action_state->mas_proj =
3823 : 540 : ExecBuildProjectionInfo(action->targetList, econtext,
3824 : : tgtslot,
3825 : : &mtstate->ps,
3826 : : tgtdesc);
3827 : :
3828 : 540 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3829 : 540 : break;
3830 : 819 : case CMD_UPDATE:
3831 : 819 : action_state->mas_proj =
3832 : 819 : ExecBuildUpdateProjection(action->targetList,
3833 : : true,
3834 : : action->updateColnos,
3835 : : relationDesc,
3836 : : econtext,
3837 : : resultRelInfo->ri_newTupleSlot,
3838 : : &mtstate->ps);
3839 : 819 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3840 : 819 : break;
3841 : 235 : case CMD_DELETE:
3842 : 235 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3843 : 235 : break;
3844 : 38 : case CMD_NOTHING:
3845 : 38 : break;
1359 alvherre@alvh.no-ip. 3846 :UBC 0 : default:
262 dean.a.rasheed@gmail 3847 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3848 : : break;
3849 : : }
3850 : : }
3851 : : }
3852 : :
3853 : : /*
3854 : : * If the MERGE targets an inherited table, any INSERT actions will use
3855 : : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
3856 : : * Therefore we must initialize its WITH CHECK OPTION constraints and
3857 : : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
3858 : : * entries.
3859 : : *
3860 : : * Note that the planner does not build a withCheckOptionList or
3861 : : * returningList for the root relation, but as in ExecInitPartitionInfo,
3862 : : * we can use the first resultRelInfo entry as a reference to calculate
3863 : : * the attno's for the root table.
3864 : : */
199 dean.a.rasheed@gmail 3865 [ + + ]:CBC 808 : if (rootRelInfo != mtstate->resultRelInfo &&
3866 [ + + ]: 124 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
3867 [ + + ]: 24 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
3868 : : {
3869 : 18 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3870 : 18 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
3871 : 18 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
3872 : 18 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
3873 : 18 : AttrMap *part_attmap = NULL;
3874 : : bool found_whole_row;
3875 : :
3876 [ + + ]: 18 : if (node->withCheckOptionLists != NIL)
3877 : : {
3878 : : List *wcoList;
3879 : 9 : List *wcoExprs = NIL;
3880 : :
3881 : : /* There should be as many WCO lists as result rels */
3882 [ - + ]: 9 : Assert(list_length(node->withCheckOptionLists) ==
3883 : : list_length(node->resultRelations));
3884 : :
3885 : : /*
3886 : : * Use the first WCO list as a reference. In the most common case,
3887 : : * this will be for the same relation as rootRelInfo, and so there
3888 : : * will be no need to adjust its attno's.
3889 : : */
3890 : 9 : wcoList = linitial(node->withCheckOptionLists);
3891 [ + - ]: 9 : if (rootRelation != firstResultRel)
3892 : : {
3893 : : /* Convert any Vars in it to contain the root's attno's */
3894 : : part_attmap =
3895 : 9 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3896 : : RelationGetDescr(firstResultRel),
3897 : : false);
3898 : :
3899 : : wcoList = (List *)
3900 : 9 : map_variable_attnos((Node *) wcoList,
3901 : : firstVarno, 0,
3902 : : part_attmap,
3903 : 9 : RelationGetForm(rootRelation)->reltype,
3904 : : &found_whole_row);
3905 : : }
3906 : :
3907 [ + - + + : 45 : foreach(lc, wcoList)
+ + ]
3908 : : {
3909 : 36 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
3910 : 36 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
3911 : : &mtstate->ps);
3912 : :
3913 : 36 : wcoExprs = lappend(wcoExprs, wcoExpr);
3914 : : }
3915 : :
3916 : 9 : rootRelInfo->ri_WithCheckOptions = wcoList;
3917 : 9 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
3918 : : }
3919 : :
3920 [ + + ]: 18 : if (node->returningLists != NIL)
3921 : : {
3922 : : List *returningList;
3923 : :
3924 : : /* There should be as many returning lists as result rels */
3925 [ - + ]: 3 : Assert(list_length(node->returningLists) ==
3926 : : list_length(node->resultRelations));
3927 : :
3928 : : /*
3929 : : * Use the first returning list as a reference. In the most common
3930 : : * case, this will be for the same relation as rootRelInfo, and so
3931 : : * there will be no need to adjust its attno's.
3932 : : */
3933 : 3 : returningList = linitial(node->returningLists);
3934 [ + - ]: 3 : if (rootRelation != firstResultRel)
3935 : : {
3936 : : /* Convert any Vars in it to contain the root's attno's */
3937 [ - + ]: 3 : if (part_attmap == NULL)
3938 : : part_attmap =
199 dean.a.rasheed@gmail 3939 :UBC 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3940 : : RelationGetDescr(firstResultRel),
3941 : : false);
3942 : :
3943 : : returningList = (List *)
199 dean.a.rasheed@gmail 3944 :CBC 3 : map_variable_attnos((Node *) returningList,
3945 : : firstVarno, 0,
3946 : : part_attmap,
3947 : 3 : RelationGetForm(rootRelation)->reltype,
3948 : : &found_whole_row);
3949 : : }
3950 : 3 : rootRelInfo->ri_returningList = returningList;
3951 : :
3952 : : /* Initialize the RETURNING projection */
3953 : 3 : rootRelInfo->ri_projectReturning =
3954 : 3 : ExecBuildProjectionInfo(returningList, econtext,
3955 : : mtstate->ps.ps_ResultTupleSlot,
3956 : : &mtstate->ps,
3957 : : RelationGetDescr(rootRelation));
3958 : : }
3959 : : }
3960 : : }
3961 : :
3962 : : /*
3963 : : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3964 : : *
3965 : : * We mark 'projectNewInfoValid' even though the projections themselves
3966 : : * are not initialized here.
3967 : : */
3968 : : void
1359 alvherre@alvh.no-ip. 3969 : 941 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3970 : : ResultRelInfo *resultRelInfo)
3971 : : {
3972 : 941 : EState *estate = mtstate->ps.state;
3973 : :
3974 [ - + ]: 941 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3975 : :
3976 : 941 : resultRelInfo->ri_oldTupleSlot =
3977 : 941 : table_slot_create(resultRelInfo->ri_RelationDesc,
3978 : : &estate->es_tupleTable);
3979 : 941 : resultRelInfo->ri_newTupleSlot =
3980 : 941 : table_slot_create(resultRelInfo->ri_RelationDesc,
3981 : : &estate->es_tupleTable);
3982 : 941 : resultRelInfo->ri_projectNewInfoValid = true;
3983 : 941 : }
3984 : :
3985 : : /*
3986 : : * Process BEFORE EACH STATEMENT triggers
3987 : : */
3988 : : static void
5911 tgl@sss.pgh.pa.us 3989 : 56029 : fireBSTriggers(ModifyTableState *node)
3990 : : {
2829 alvherre@alvh.no-ip. 3991 : 56029 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1884 heikki.linnakangas@i 3992 : 56029 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3993 : :
5911 tgl@sss.pgh.pa.us 3994 [ + + + + : 56029 : switch (node->operation)
- ]
3995 : : {
3996 : 42032 : case CMD_INSERT:
3151 rhaas@postgresql.org 3997 : 42032 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
2829 alvherre@alvh.no-ip. 3998 [ + + ]: 42026 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3875 andres@anarazel.de 3999 : 431 : ExecBSUpdateTriggers(node->ps.state,
4000 : : resultRelInfo);
5911 tgl@sss.pgh.pa.us 4001 : 42026 : break;
4002 : 7190 : case CMD_UPDATE:
3151 rhaas@postgresql.org 4003 : 7190 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
5911 tgl@sss.pgh.pa.us 4004 : 7190 : break;
4005 : 6076 : case CMD_DELETE:
3151 rhaas@postgresql.org 4006 : 6076 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
5911 tgl@sss.pgh.pa.us 4007 : 6076 : break;
1359 alvherre@alvh.no-ip. 4008 : 731 : case CMD_MERGE:
4009 [ + + ]: 731 : if (node->mt_merge_subcommands & MERGE_INSERT)
4010 : 400 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4011 [ + + ]: 731 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4012 : 489 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4013 [ + + ]: 731 : if (node->mt_merge_subcommands & MERGE_DELETE)
4014 : 193 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4015 : 731 : break;
5911 tgl@sss.pgh.pa.us 4016 :UBC 0 : default:
4017 [ # # ]: 0 : elog(ERROR, "unknown operation");
4018 : : break;
4019 : : }
5911 tgl@sss.pgh.pa.us 4020 :CBC 56023 : }
4021 : :
4022 : : /*
4023 : : * Process AFTER EACH STATEMENT triggers
4024 : : */
4025 : : static void
3093 rhodiumtoad@postgres 4026 : 54376 : fireASTriggers(ModifyTableState *node)
4027 : : {
2829 alvherre@alvh.no-ip. 4028 : 54376 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1884 heikki.linnakangas@i 4029 : 54376 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4030 : :
5911 tgl@sss.pgh.pa.us 4031 [ + + + + : 54376 : switch (node->operation)
- ]
4032 : : {
4033 : 40867 : case CMD_INSERT:
2829 alvherre@alvh.no-ip. 4034 [ + + ]: 40867 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3875 andres@anarazel.de 4035 : 377 : ExecASUpdateTriggers(node->ps.state,
4036 : : resultRelInfo,
3013 tgl@sss.pgh.pa.us 4037 : 377 : node->mt_oc_transition_capture);
3093 rhodiumtoad@postgres 4038 : 40867 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4039 : 40867 : node->mt_transition_capture);
5911 tgl@sss.pgh.pa.us 4040 : 40867 : break;
4041 : 6834 : case CMD_UPDATE:
3093 rhodiumtoad@postgres 4042 : 6834 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4043 : 6834 : node->mt_transition_capture);
5911 tgl@sss.pgh.pa.us 4044 : 6834 : break;
4045 : 6021 : case CMD_DELETE:
3093 rhodiumtoad@postgres 4046 : 6021 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4047 : 6021 : node->mt_transition_capture);
5911 tgl@sss.pgh.pa.us 4048 : 6021 : break;
1359 alvherre@alvh.no-ip. 4049 : 654 : case CMD_MERGE:
4050 [ + + ]: 654 : if (node->mt_merge_subcommands & MERGE_DELETE)
4051 : 175 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4052 : 175 : node->mt_transition_capture);
4053 [ + + ]: 654 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4054 : 439 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4055 : 439 : node->mt_transition_capture);
4056 [ + + ]: 654 : if (node->mt_merge_subcommands & MERGE_INSERT)
4057 : 366 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4058 : 366 : node->mt_transition_capture);
4059 : 654 : break;
5911 tgl@sss.pgh.pa.us 4060 :UBC 0 : default:
4061 [ # # ]: 0 : elog(ERROR, "unknown operation");
4062 : : break;
4063 : : }
5911 tgl@sss.pgh.pa.us 4064 :CBC 54376 : }
4065 : :
4066 : : /*
4067 : : * Set up the state needed for collecting transition tuples for AFTER
4068 : : * triggers.
4069 : : */
4070 : : static void
3093 rhodiumtoad@postgres 4071 : 56211 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4072 : : {
2829 alvherre@alvh.no-ip. 4073 : 56211 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
1884 heikki.linnakangas@i 4074 : 56211 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4075 : :
4076 : : /* Check for transition tables on the directly targeted relation. */
3093 rhodiumtoad@postgres 4077 : 56211 : mtstate->mt_transition_capture =
3013 tgl@sss.pgh.pa.us 4078 : 56211 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4079 : 56211 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4080 : : mtstate->operation);
2829 alvherre@alvh.no-ip. 4081 [ + + ]: 56211 : if (plan->operation == CMD_INSERT &&
4082 [ + + ]: 42036 : plan->onConflictAction == ONCONFLICT_UPDATE)
3013 tgl@sss.pgh.pa.us 4083 : 434 : mtstate->mt_oc_transition_capture =
4084 : 434 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4085 : 434 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4086 : : CMD_UPDATE);
2888 rhaas@postgresql.org 4087 : 56211 : }
4088 : :
4089 : : /*
4090 : : * ExecPrepareTupleRouting --- prepare for routing one tuple
4091 : : *
4092 : : * Determine the partition in which the tuple in slot is to be inserted,
4093 : : * and return its ResultRelInfo in *partRelInfo. The return value is
4094 : : * a slot holding the tuple of the partition rowtype.
4095 : : *
4096 : : * This also sets the transition table information in mtstate based on the
4097 : : * selected partition.
4098 : : */
4099 : : static TupleTableSlot *
2829 alvherre@alvh.no-ip. 4100 : 379349 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4101 : : EState *estate,
4102 : : PartitionTupleRouting *proute,
4103 : : ResultRelInfo *targetRelInfo,
4104 : : TupleTableSlot *slot,
4105 : : ResultRelInfo **partRelInfo)
4106 : : {
4107 : : ResultRelInfo *partrel;
4108 : : TupleConversionMap *map;
4109 : :
4110 : : /*
4111 : : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4112 : : * not find a valid partition for the tuple in 'slot' then an error is
4113 : : * raised. An error may also be raised if the found partition is not a
4114 : : * valid target for INSERTs. This is required since a partitioned table
4115 : : * UPDATE to another partition becomes a DELETE+INSERT.
4116 : : */
2587 4117 : 379349 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4118 : :
4119 : : /*
4120 : : * If we're capturing transition tuples, we might need to convert from the
4121 : : * partition rowtype to root partitioned table's rowtype. But if there
4122 : : * are no BEFORE triggers on the partition that could change the tuple, we
4123 : : * can just remember the original unconverted tuple to avoid a needless
4124 : : * round trip conversion.
4125 : : */
2829 4126 [ + + ]: 379238 : if (mtstate->mt_transition_capture != NULL)
4127 : : {
4128 : : bool has_before_insert_row_trig;
4129 : :
1884 heikki.linnakangas@i 4130 [ + + ]: 98 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4131 [ + + ]: 21 : partrel->ri_TrigDesc->trig_insert_before_row);
4132 : :
4133 : 77 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4134 [ + + ]: 77 : !has_before_insert_row_trig ? slot : NULL;
4135 : : }
4136 : :
4137 : : /*
4138 : : * Convert the tuple, if necessary.
4139 : : */
1110 alvherre@alvh.no-ip. 4140 : 379238 : map = ExecGetRootToChildMap(partrel, estate);
2632 andres@anarazel.de 4141 [ + + ]: 379238 : if (map != NULL)
4142 : : {
1884 heikki.linnakangas@i 4143 : 34267 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4144 : :
2632 andres@anarazel.de 4145 : 34267 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4146 : : }
4147 : :
1889 heikki.linnakangas@i 4148 : 379238 : *partRelInfo = partrel;
2829 alvherre@alvh.no-ip. 4149 : 379238 : return slot;
4150 : : }
4151 : :
4152 : : /* ----------------------------------------------------------------
4153 : : * ExecModifyTable
4154 : : *
4155 : : * Perform table modifications as required, and return RETURNING results
4156 : : * if needed.
4157 : : * ----------------------------------------------------------------
4158 : : */
4159 : : static TupleTableSlot *
3074 andres@anarazel.de 4160 : 60489 : ExecModifyTable(PlanState *pstate)
4161 : : {
4162 : 60489 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4163 : : ModifyTableContext context;
5772 bruce@momjian.us 4164 : 60489 : EState *estate = node->ps.state;
4165 : 60489 : CmdType operation = node->operation;
4166 : : ResultRelInfo *resultRelInfo;
4167 : : PlanState *subplanstate;
4168 : : TupleTableSlot *slot;
4169 : : TupleTableSlot *oldSlot;
4170 : : ItemPointerData tuple_ctid;
4171 : : HeapTupleData oldtupdata;
4172 : : HeapTuple oldtuple;
4173 : : ItemPointer tupleid;
4174 : : bool tuplock;
4175 : :
3066 andres@anarazel.de 4176 [ - + ]: 60489 : CHECK_FOR_INTERRUPTS();
4177 : :
4178 : : /*
4179 : : * This should NOT get called during EvalPlanQual; we should have passed a
4180 : : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4181 : : * Assert because this condition is easy to miss in testing. (Note:
4182 : : * although ModifyTable should not get executed within an EvalPlanQual
4183 : : * operation, we do have to allow it to be initialized and shut down in
4184 : : * case it is within a CTE subplan. Hence this test must be here, not in
4185 : : * ExecInitModifyTable.)
4186 : : */
2294 4187 [ - + ]: 60489 : if (estate->es_epq_active != NULL)
5071 tgl@sss.pgh.pa.us 4188 [ # # ]:UBC 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4189 : :
4190 : : /*
4191 : : * If we've already completed processing, don't try to do more. We need
4192 : : * this test because ExecPostprocessPlan might call us an extra time, and
4193 : : * our subplan's nodes aren't necessarily robust against being called
4194 : : * extra times.
4195 : : */
5408 tgl@sss.pgh.pa.us 4196 [ + + ]:CBC 60489 : if (node->mt_done)
4197 : 399 : return NULL;
4198 : :
4199 : : /*
4200 : : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4201 : : */
5911 4202 [ + + ]: 60090 : if (node->fireBSTriggers)
4203 : : {
4204 : 56029 : fireBSTriggers(node);
4205 : 56023 : node->fireBSTriggers = false;
4206 : : }
4207 : :
4208 : : /* Preload local variables */
1721 4209 : 60084 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4210 : 60084 : subplanstate = outerPlanState(node);
4211 : :
4212 : : /* Set global context */
1370 alvherre@alvh.no-ip. 4213 : 60084 : context.mtstate = node;
4214 : 60084 : context.epqstate = &node->mt_epqstate;
4215 : 60084 : context.estate = estate;
4216 : :
4217 : : /*
4218 : : * Fetch rows from subplan, and execute the required table modification
4219 : : * for each row.
4220 : : */
4221 : : for (;;)
4222 : : {
4223 : : /*
4224 : : * Reset the per-output-tuple exprcontext. This is needed because
4225 : : * triggers expect to use that context as workspace. It's a bit ugly
4226 : : * to do this below the top level of the plan, however. We might need
4227 : : * to rethink this later.
4228 : : */
5599 tgl@sss.pgh.pa.us 4229 [ + + ]: 7171363 : ResetPerTupleExprContext(estate);
4230 : :
4231 : : /*
4232 : : * Reset per-tuple memory context used for processing on conflict and
4233 : : * returning clauses, to free any expression evaluation storage
4234 : : * allocated in the previous cycle.
4235 : : */
2590 andres@anarazel.de 4236 [ + + ]: 7171363 : if (pstate->ps_ExprContext)
4237 : 177427 : ResetExprContext(pstate->ps_ExprContext);
4238 : :
4239 : : /*
4240 : : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4241 : : * to execute, do so now --- see the comments in ExecMerge().
4242 : : */
626 dean.a.rasheed@gmail 4243 [ + + ]: 7171363 : if (node->mt_merge_pending_not_matched != NULL)
4244 : : {
4245 : 2 : context.planSlot = node->mt_merge_pending_not_matched;
334 4246 : 2 : context.cpDeletedSlot = NULL;
4247 : :
626 4248 : 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4249 : 2 : node->canSetTag);
4250 : :
4251 : : /* Clear the pending action */
4252 : 2 : node->mt_merge_pending_not_matched = NULL;
4253 : :
4254 : : /*
4255 : : * If we got a RETURNING result, return it to the caller. We'll
4256 : : * continue the work on next call.
4257 : : */
4258 [ + - ]: 2 : if (slot)
4259 : 2 : return slot;
4260 : :
626 dean.a.rasheed@gmail 4261 :UBC 0 : continue; /* continue with the next tuple */
4262 : : }
4263 : :
4264 : : /* Fetch the next row from subplan */
1336 alvherre@alvh.no-ip. 4265 :CBC 7171361 : context.planSlot = ExecProcNode(subplanstate);
334 dean.a.rasheed@gmail 4266 : 7171152 : context.cpDeletedSlot = NULL;
4267 : :
4268 : : /* No more tuples to process? */
1336 alvherre@alvh.no-ip. 4269 [ + + + + ]: 7171152 : if (TupIsNull(context.planSlot))
4270 : : break;
4271 : :
4272 : : /*
4273 : : * When there are multiple result relations, each tuple contains a
4274 : : * junk column that gives the OID of the rel from which it came.
4275 : : * Extract it and select the correct result relation.
4276 : : */
1721 tgl@sss.pgh.pa.us 4277 [ + + ]: 7116775 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4278 : : {
4279 : : Datum datum;
4280 : : bool isNull;
4281 : : Oid resultoid;
4282 : :
1336 alvherre@alvh.no-ip. 4283 : 2615 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4284 : : &isNull);
1721 tgl@sss.pgh.pa.us 4285 [ + + ]: 2615 : if (isNull)
4286 : : {
4287 : : /*
4288 : : * For commands other than MERGE, any tuples having InvalidOid
4289 : : * for tableoid are errors. For MERGE, we may need to handle
4290 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4291 : : *
4292 : : * Note that we use the node's toplevel resultRelInfo, not any
4293 : : * specific partition's.
4294 : : */
1359 alvherre@alvh.no-ip. 4295 [ + - ]: 254 : if (operation == CMD_MERGE)
4296 : : {
1336 4297 : 254 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4298 : :
639 dean.a.rasheed@gmail 4299 : 254 : slot = ExecMerge(&context, node->resultRelInfo,
4300 : 254 : NULL, NULL, node->canSetTag);
4301 : :
4302 : : /*
4303 : : * If we got a RETURNING result, return it to the caller.
4304 : : * We'll continue the work on next call.
4305 : : */
4306 [ + + ]: 248 : if (slot)
4307 : 19 : return slot;
4308 : :
4309 : 229 : continue; /* continue with the next tuple */
4310 : : }
4311 : :
1721 tgl@sss.pgh.pa.us 4312 [ # # ]:UBC 0 : elog(ERROR, "tableoid is NULL");
4313 : : }
1721 tgl@sss.pgh.pa.us 4314 :CBC 2361 : resultoid = DatumGetObjectId(datum);
4315 : :
4316 : : /* If it's not the same as last time, we need to locate the rel */
4317 [ + + ]: 2361 : if (resultoid != node->mt_lastResultOid)
1715 4318 : 1624 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4319 : : false, true);
4320 : : }
4321 : :
4322 : : /*
4323 : : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4324 : : * here is compute the RETURNING expressions.
4325 : : */
3560 rhaas@postgresql.org 4326 [ + + ]: 7116521 : if (resultRelInfo->ri_usesFdwDirectModify)
4327 : : {
4328 [ - + ]: 347 : Assert(resultRelInfo->ri_projectReturning);
4329 : :
4330 : : /*
4331 : : * A scan slot containing the data that was actually inserted,
4332 : : * updated or deleted has already been made available to
4333 : : * ExecProcessReturning by IterateDirectModify, so no need to
4334 : : * provide it here. The individual old and new slots are not
4335 : : * needed, since direct-modify is disabled if the RETURNING list
4336 : : * refers to OLD/NEW values.
4337 : : */
334 dean.a.rasheed@gmail 4338 [ + - - + ]: 347 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4339 : : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4340 : :
4341 : 347 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4342 : : NULL, NULL, context.planSlot);
4343 : :
3560 rhaas@postgresql.org 4344 : 347 : return slot;
4345 : : }
4346 : :
1336 alvherre@alvh.no-ip. 4347 : 7116174 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4348 : 7116174 : slot = context.planSlot;
4349 : :
2941 tgl@sss.pgh.pa.us 4350 : 7116174 : tupleid = NULL;
4286 noah@leadboat.com 4351 : 7116174 : oldtuple = NULL;
4352 : :
4353 : : /*
4354 : : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4355 : : * to be updated/deleted/merged. For a heap relation, that's a TID;
4356 : : * otherwise we may have a wholerow junk attr that carries the old
4357 : : * tuple in toto. Keep this in step with the part of
4358 : : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4359 : : */
1359 alvherre@alvh.no-ip. 4360 [ + + + + : 7116174 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4361 : : operation == CMD_MERGE)
4362 : : {
4363 : : char relkind;
4364 : : Datum datum;
4365 : : bool isNull;
4366 : :
1721 tgl@sss.pgh.pa.us 4367 : 935820 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4368 [ + + + + ]: 935820 : if (relkind == RELKIND_RELATION ||
4369 [ + + ]: 285 : relkind == RELKIND_MATVIEW ||
4370 : : relkind == RELKIND_PARTITIONED_TABLE)
4371 : : {
4372 : : /* ri_RowIdAttNo refers to a ctid attribute */
4373 [ - + ]: 935538 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4374 : 935538 : datum = ExecGetJunkAttribute(slot,
4375 : 935538 : resultRelInfo->ri_RowIdAttNo,
4376 : : &isNull);
4377 : :
4378 : : /*
4379 : : * For commands other than MERGE, any tuples having a null row
4380 : : * identifier are errors. For MERGE, we may need to handle
4381 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4382 : : *
4383 : : * Note that we use the node's toplevel resultRelInfo, not any
4384 : : * specific partition's.
4385 : : */
4386 [ + + ]: 935538 : if (isNull)
4387 : : {
1359 alvherre@alvh.no-ip. 4388 [ + - ]: 1065 : if (operation == CMD_MERGE)
4389 : : {
1336 4390 : 1065 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4391 : :
639 dean.a.rasheed@gmail 4392 : 1065 : slot = ExecMerge(&context, node->resultRelInfo,
4393 : 1065 : NULL, NULL, node->canSetTag);
4394 : :
4395 : : /*
4396 : : * If we got a RETURNING result, return it to the
4397 : : * caller. We'll continue the work on next call.
4398 : : */
4399 [ + + ]: 1044 : if (slot)
4400 : 64 : return slot;
4401 : :
4402 : 1001 : continue; /* continue with the next tuple */
4403 : : }
4404 : :
1721 tgl@sss.pgh.pa.us 4405 [ # # ]:UBC 0 : elog(ERROR, "ctid is NULL");
4406 : : }
4407 : :
1721 tgl@sss.pgh.pa.us 4408 :CBC 934473 : tupleid = (ItemPointer) DatumGetPointer(datum);
4409 : 934473 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4410 : 934473 : tupleid = &tuple_ctid;
4411 : : }
4412 : :
4413 : : /*
4414 : : * Use the wholerow attribute, when available, to reconstruct the
4415 : : * old relation tuple. The old tuple serves one or both of two
4416 : : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4417 : : * provides values for any unchanged columns for the NEW tuple of
4418 : : * an UPDATE, because the subplan does not produce all the columns
4419 : : * of the target table.
4420 : : *
4421 : : * Note that the wholerow attribute does not carry system columns,
4422 : : * so foreign table triggers miss seeing those, except that we
4423 : : * know enough here to set t_tableOid. Quite separately from
4424 : : * this, the FDW may fetch its own junk attrs to identify the row.
4425 : : *
4426 : : * Other relevant relkinds, currently limited to views, always
4427 : : * have a wholerow attribute.
4428 : : */
4429 [ + + ]: 282 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4430 : : {
4431 : 267 : datum = ExecGetJunkAttribute(slot,
4432 : 267 : resultRelInfo->ri_RowIdAttNo,
4433 : : &isNull);
4434 : :
4435 : : /*
4436 : : * For commands other than MERGE, any tuples having a null row
4437 : : * identifier are errors. For MERGE, we may need to handle
4438 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4439 : : *
4440 : : * Note that we use the node's toplevel resultRelInfo, not any
4441 : : * specific partition's.
4442 : : */
4443 [ + + ]: 267 : if (isNull)
4444 : : {
656 dean.a.rasheed@gmail 4445 [ + - ]: 24 : if (operation == CMD_MERGE)
4446 : : {
4447 : 24 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4448 : :
639 4449 : 24 : slot = ExecMerge(&context, node->resultRelInfo,
4450 : 24 : NULL, NULL, node->canSetTag);
4451 : :
4452 : : /*
4453 : : * If we got a RETURNING result, return it to the
4454 : : * caller. We'll continue the work on next call.
4455 : : */
4456 [ + + ]: 21 : if (slot)
4457 : 6 : return slot;
4458 : :
4459 : 15 : continue; /* continue with the next tuple */
4460 : : }
4461 : :
1721 tgl@sss.pgh.pa.us 4462 [ # # ]:UBC 0 : elog(ERROR, "wholerow is NULL");
4463 : : }
4464 : :
1721 tgl@sss.pgh.pa.us 4465 :CBC 243 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4466 : 243 : oldtupdata.t_len =
4467 : 243 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4468 : 243 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4469 : : /* Historically, view triggers see invalid t_tableOid. */
4470 : 243 : oldtupdata.t_tableOid =
4471 [ + + ]: 243 : (relkind == RELKIND_VIEW) ? InvalidOid :
4472 : 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4473 : :
4474 : 243 : oldtuple = &oldtupdata;
4475 : : }
4476 : : else
4477 : : {
4478 : : /* Only foreign tables are allowed to omit a row-ID attr */
4479 [ - + ]: 15 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4480 : : }
4481 : : }
4482 : :
5911 4483 [ + + + + : 7115085 : switch (operation)
- ]
4484 : : {
4485 : 6180354 : case CMD_INSERT:
4486 : : /* Initialize projection info if first time for this table */
1715 4487 [ + + ]: 6180354 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4488 : 41436 : ExecInitInsertProjection(node, resultRelInfo);
1336 alvherre@alvh.no-ip. 4489 : 6180354 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
1370 4490 : 6180354 : slot = ExecInsert(&context, resultRelInfo, slot,
1367 4491 : 6180354 : node->canSetTag, NULL, NULL);
5911 tgl@sss.pgh.pa.us 4492 : 6179286 : break;
4493 : :
4494 : 159985 : case CMD_UPDATE:
448 noah@leadboat.com 4495 : 159985 : tuplock = false;
4496 : :
4497 : : /* Initialize projection info if first time for this table */
1715 tgl@sss.pgh.pa.us 4498 [ + + ]: 159985 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4499 : 7035 : ExecInitUpdateProjection(node, resultRelInfo);
4500 : :
4501 : : /*
4502 : : * Make the new tuple by combining plan's output tuple with
4503 : : * the old tuple being updated.
4504 : : */
1721 4505 : 159985 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4506 [ + + ]: 159985 : if (oldtuple != NULL)
4507 : : {
448 noah@leadboat.com 4508 [ - + ]: 159 : Assert(!resultRelInfo->ri_needLockTagTuple);
4509 : : /* Use the wholerow junk attr as the old tuple. */
1721 tgl@sss.pgh.pa.us 4510 : 159 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4511 : : }
4512 : : else
4513 : : {
4514 : : /* Fetch the most recent version of old tuple. */
4515 : 159826 : Relation relation = resultRelInfo->ri_RelationDesc;
4516 : :
448 noah@leadboat.com 4517 [ + + ]: 159826 : if (resultRelInfo->ri_needLockTagTuple)
4518 : : {
4519 : 13396 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4520 : 13396 : tuplock = true;
4521 : : }
1721 tgl@sss.pgh.pa.us 4522 [ - + ]: 159826 : if (!table_tuple_fetch_row_version(relation, tupleid,
4523 : : SnapshotAny,
4524 : : oldSlot))
1721 tgl@sss.pgh.pa.us 4525 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
4526 : : }
1009 dean.a.rasheed@gmail 4527 :CBC 159985 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4528 : : oldSlot);
4529 : :
4530 : : /* Now apply the update. */
1370 alvherre@alvh.no-ip. 4531 : 159985 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
334 dean.a.rasheed@gmail 4532 : 159985 : oldSlot, slot, node->canSetTag);
448 noah@leadboat.com 4533 [ + + ]: 159733 : if (tuplock)
4534 : 13396 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4535 : : InplaceUpdateTupleLock);
5911 tgl@sss.pgh.pa.us 4536 : 159733 : break;
4537 : :
4538 : 768453 : case CMD_DELETE:
1370 alvherre@alvh.no-ip. 4539 : 768453 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
614 akorotkov@postgresql 4540 : 768453 : true, false, node->canSetTag, NULL, NULL, NULL);
5911 tgl@sss.pgh.pa.us 4541 : 768419 : break;
4542 : :
1359 alvherre@alvh.no-ip. 4543 : 6293 : case CMD_MERGE:
656 dean.a.rasheed@gmail 4544 : 6293 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4545 : 6293 : node->canSetTag);
1359 alvherre@alvh.no-ip. 4546 : 6246 : break;
4547 : :
5911 tgl@sss.pgh.pa.us 4548 :UBC 0 : default:
4549 [ # # ]: 0 : elog(ERROR, "unknown operation");
4550 : : break;
4551 : : }
4552 : :
4553 : : /*
4554 : : * If we got a RETURNING result, return it to caller. We'll continue
4555 : : * the work on next call.
4556 : : */
5911 tgl@sss.pgh.pa.us 4557 [ + + ]:CBC 7113684 : if (slot)
4558 : 3635 : return slot;
4559 : : }
4560 : :
4561 : : /*
4562 : : * Insert remaining tuples for batch insert.
4563 : : */
1117 efujita@postgresql.o 4564 [ + + ]: 54377 : if (estate->es_insert_pending_result_relations != NIL)
4565 : 13 : ExecPendingInserts(estate);
4566 : :
4567 : : /*
4568 : : * We're done, but fire AFTER STATEMENT triggers before exiting.
4569 : : */
5911 tgl@sss.pgh.pa.us 4570 : 54376 : fireASTriggers(node);
4571 : :
5408 4572 : 54376 : node->mt_done = true;
4573 : :
5911 4574 : 54376 : return NULL;
4575 : : }
4576 : :
4577 : : /*
4578 : : * ExecLookupResultRelByOid
4579 : : * If the table with given OID is among the result relations to be
4580 : : * updated by the given ModifyTable node, return its ResultRelInfo.
4581 : : *
4582 : : * If not found, return NULL if missing_ok, else raise error.
4583 : : *
4584 : : * If update_cache is true, then upon successful lookup, update the node's
4585 : : * one-element cache. ONLY ExecModifyTable may pass true for this.
4586 : : */
4587 : : ResultRelInfo *
1715 4588 : 5510 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4589 : : bool missing_ok, bool update_cache)
4590 : : {
4591 [ + + ]: 5510 : if (node->mt_resultOidHash)
4592 : : {
4593 : : /* Use the pre-built hash table to locate the rel */
4594 : : MTTargetRelLookup *mtlookup;
4595 : :
4596 : : mtlookup = (MTTargetRelLookup *)
4597 : 562 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4598 [ + - ]: 562 : if (mtlookup)
4599 : : {
4600 [ + + ]: 562 : if (update_cache)
4601 : : {
4602 : 412 : node->mt_lastResultOid = resultoid;
4603 : 412 : node->mt_lastResultIndex = mtlookup->relationIndex;
4604 : : }
4605 : 562 : return node->resultRelInfo + mtlookup->relationIndex;
4606 : : }
4607 : : }
4608 : : else
4609 : : {
4610 : : /* With few target rels, just search the ResultRelInfo array */
4611 [ + + ]: 8948 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4612 : : {
4613 : 5316 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4614 : :
4615 [ + + ]: 5316 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4616 : : {
4617 [ + + ]: 1316 : if (update_cache)
4618 : : {
4619 : 1212 : node->mt_lastResultOid = resultoid;
4620 : 1212 : node->mt_lastResultIndex = ndx;
4621 : : }
4622 : 1316 : return rInfo;
4623 : : }
4624 : : }
4625 : : }
4626 : :
4627 [ - + ]: 3632 : if (!missing_ok)
1715 tgl@sss.pgh.pa.us 4628 [ # # ]:UBC 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
1715 tgl@sss.pgh.pa.us 4629 :CBC 3632 : return NULL;
4630 : : }
4631 : :
4632 : : /* ----------------------------------------------------------------
4633 : : * ExecInitModifyTable
4634 : : * ----------------------------------------------------------------
4635 : : */
4636 : : ModifyTableState *
5911 4637 : 56728 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4638 : : {
4639 : : ModifyTableState *mtstate;
1721 4640 : 56728 : Plan *subplan = outerPlan(node);
5911 4641 : 56728 : CmdType operation = node->operation;
272 amitlan@postgresql.o 4642 : 56728 : int total_nrels = list_length(node->resultRelations);
4643 : : int nrels;
312 4644 : 56728 : List *resultRelations = NIL;
4645 : 56728 : List *withCheckOptionLists = NIL;
4646 : 56728 : List *returningLists = NIL;
4647 : 56728 : List *updateColnosLists = NIL;
302 4648 : 56728 : List *mergeActionLists = NIL;
4649 : 56728 : List *mergeJoinConditions = NIL;
4650 : : ResultRelInfo *resultRelInfo;
4651 : : List *arowmarks;
4652 : : ListCell *l;
4653 : : int i;
4654 : : Relation rel;
4655 : :
4656 : : /* check for unsupported flags */
5911 tgl@sss.pgh.pa.us 4657 [ - + ]: 56728 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4658 : :
4659 : : /*
4660 : : * Only consider unpruned relations for initializing their ResultRelInfo
4661 : : * struct and other fields such as withCheckOptions, etc.
4662 : : *
4663 : : * Note: We must avoid pruning every result relation. This is important
4664 : : * for MERGE, since even if every result relation is pruned from the
4665 : : * subplan, there might still be NOT MATCHED rows, for which there may be
4666 : : * INSERT actions to perform. To allow these actions to be found, at
4667 : : * least one result relation must be kept. Also, when inserting into a
4668 : : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4669 : : * as a reference for building the ResultRelInfo of the target partition.
4670 : : * In either case, it doesn't matter which result relation is kept, so we
4671 : : * just keep the first one, if all others have been pruned. See also,
4672 : : * ExecDoInitialPruning(), which ensures that this first result relation
4673 : : * has been locked.
4674 : : */
312 amitlan@postgresql.o 4675 : 56728 : i = 0;
4676 [ + - + + : 114722 : foreach(l, node->resultRelations)
+ + ]
4677 : : {
4678 : 57994 : Index rti = lfirst_int(l);
4679 : : bool keep_rel;
4680 : :
272 4681 : 57994 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4682 [ + + + + : 57994 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
+ + ]
4683 : : {
4684 : : /* all result relations pruned; keep the first one */
4685 : 24 : keep_rel = true;
4686 : 24 : rti = linitial_int(node->resultRelations);
4687 : 24 : i = 0;
4688 : : }
4689 : :
4690 [ + + ]: 57994 : if (keep_rel)
4691 : : {
312 4692 : 57951 : resultRelations = lappend_int(resultRelations, rti);
4693 [ + + ]: 57951 : if (node->withCheckOptionLists)
4694 : : {
4695 : 772 : List *withCheckOptions = list_nth_node(List,
4696 : : node->withCheckOptionLists,
4697 : : i);
4698 : :
4699 : 772 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4700 : : }
4701 [ + + ]: 57951 : if (node->returningLists)
4702 : : {
4703 : 2557 : List *returningList = list_nth_node(List,
4704 : : node->returningLists,
4705 : : i);
4706 : :
4707 : 2557 : returningLists = lappend(returningLists, returningList);
4708 : : }
4709 [ + + ]: 57951 : if (node->updateColnosLists)
4710 : : {
4711 : 8406 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4712 : :
4713 : 8406 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4714 : : }
302 4715 [ + + ]: 57951 : if (node->mergeActionLists)
4716 : : {
4717 : 935 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4718 : :
4719 : 935 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4720 : : }
4721 [ + + ]: 57951 : if (node->mergeJoinConditions)
4722 : : {
4723 : 935 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4724 : :
4725 : 935 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4726 : : }
4727 : : }
312 4728 : 57994 : i++;
4729 : : }
4730 : 56728 : nrels = list_length(resultRelations);
272 4731 [ - + ]: 56728 : Assert(nrels > 0);
4732 : :
4733 : : /*
4734 : : * create state structure
4735 : : */
5911 tgl@sss.pgh.pa.us 4736 : 56728 : mtstate = makeNode(ModifyTableState);
4737 : 56728 : mtstate->ps.plan = (Plan *) node;
4738 : 56728 : mtstate->ps.state = estate;
3074 andres@anarazel.de 4739 : 56728 : mtstate->ps.ExecProcNode = ExecModifyTable;
4740 : :
5408 tgl@sss.pgh.pa.us 4741 : 56728 : mtstate->operation = operation;
4742 : 56728 : mtstate->canSetTag = node->canSetTag;
4743 : 56728 : mtstate->mt_done = false;
4744 : :
1721 4745 : 56728 : mtstate->mt_nrels = nrels;
6 michael@paquier.xyz 4746 :GNC 56728 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
4747 : :
626 dean.a.rasheed@gmail 4748 :CBC 56728 : mtstate->mt_merge_pending_not_matched = NULL;
1359 alvherre@alvh.no-ip. 4749 : 56728 : mtstate->mt_merge_inserted = 0;
4750 : 56728 : mtstate->mt_merge_updated = 0;
4751 : 56728 : mtstate->mt_merge_deleted = 0;
312 amitlan@postgresql.o 4752 : 56728 : mtstate->mt_updateColnosLists = updateColnosLists;
302 4753 : 56728 : mtstate->mt_mergeActionLists = mergeActionLists;
4754 : 56728 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4755 : :
4756 : : /*----------
4757 : : * Resolve the target relation. This is the same as:
4758 : : *
4759 : : * - the relation for which we will fire FOR STATEMENT triggers,
4760 : : * - the relation into whose tuple format all captured transition tuples
4761 : : * must be converted, and
4762 : : * - the root partitioned table used for tuple routing.
4763 : : *
4764 : : * If it's a partitioned or inherited table, the root partition or
4765 : : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4766 : : * given explicitly in node->rootRelation. Otherwise, the target relation
4767 : : * is the sole relation in the node->resultRelations list and, since it can
4768 : : * never be pruned, also in the resultRelations list constructed above.
4769 : : *----------
4770 : : */
1890 heikki.linnakangas@i 4771 [ + + ]: 56728 : if (node->rootRelation > 0)
4772 : : {
312 amitlan@postgresql.o 4773 [ - + ]: 1467 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
1890 heikki.linnakangas@i 4774 : 1467 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4775 : 1467 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4776 : : node->rootRelation);
4777 : : }
4778 : : else
4779 : : {
784 tgl@sss.pgh.pa.us 4780 [ - + ]: 55261 : Assert(list_length(node->resultRelations) == 1);
302 amitlan@postgresql.o 4781 [ - + ]: 55261 : Assert(list_length(resultRelations) == 1);
1884 heikki.linnakangas@i 4782 : 55261 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4783 : 55261 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
302 amitlan@postgresql.o 4784 : 55261 : linitial_int(resultRelations));
4785 : : }
4786 : :
4787 : : /* set up epqstate with dummy subplan data for the moment */
942 tgl@sss.pgh.pa.us 4788 : 56728 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4789 : : node->epqParam, resultRelations);
5911 4790 : 56728 : mtstate->fireBSTriggers = true;
4791 : :
4792 : : /*
4793 : : * Build state for collecting transition tuples. This requires having a
4794 : : * valid trigger query context, so skip it in explain-only mode.
4795 : : */
1884 heikki.linnakangas@i 4796 [ + + ]: 56728 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4797 : 56211 : ExecSetupTransitionCaptureState(mtstate, estate);
4798 : :
4799 : : /*
4800 : : * Open all the result relations and initialize the ResultRelInfo structs.
4801 : : * (But root relation was initialized above, if it's part of the array.)
4802 : : * We must do this before initializing the subplan, because direct-modify
4803 : : * FDWs expect their ResultRelInfos to be available.
4804 : : */
5408 tgl@sss.pgh.pa.us 4805 : 56728 : resultRelInfo = mtstate->resultRelInfo;
5911 4806 : 56728 : i = 0;
312 amitlan@postgresql.o 4807 [ + - + + : 114508 : foreach(l, resultRelations)
+ + ]
4808 : : {
1890 heikki.linnakangas@i 4809 : 57948 : Index resultRelation = lfirst_int(l);
656 dean.a.rasheed@gmail 4810 : 57948 : List *mergeActions = NIL;
4811 : :
302 amitlan@postgresql.o 4812 [ + + ]: 57948 : if (mergeActionLists)
4813 : 935 : mergeActions = list_nth(mergeActionLists, i);
4814 : :
1884 heikki.linnakangas@i 4815 [ + + ]: 57948 : if (resultRelInfo != mtstate->rootResultRelInfo)
4816 : : {
4817 : 2687 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4818 : :
4819 : : /*
4820 : : * For child result relations, store the root result relation
4821 : : * pointer. We do so for the convenience of places that want to
4822 : : * look at the query's original target relation but don't have the
4823 : : * mtstate handy.
4824 : : */
1715 tgl@sss.pgh.pa.us 4825 : 2687 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4826 : : }
4827 : :
4828 : : /* Initialize the usesFdwDirectModify flag */
1370 alvherre@alvh.no-ip. 4829 : 57948 : resultRelInfo->ri_usesFdwDirectModify =
4830 : 57948 : bms_is_member(i, node->fdwDirectModifyPlans);
4831 : :
4832 : : /*
4833 : : * Verify result relation is a valid target for the current operation
4834 : : */
103 dean.a.rasheed@gmail 4835 : 57948 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
4836 : : mergeActions);
4837 : :
1721 tgl@sss.pgh.pa.us 4838 : 57780 : resultRelInfo++;
4839 : 57780 : i++;
4840 : : }
4841 : :
4842 : : /*
4843 : : * Now we may initialize the subplan.
4844 : : */
4845 : 56560 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4846 : :
4847 : : /*
4848 : : * Do additional per-result-relation initialization.
4849 : : */
4850 [ + + ]: 114323 : for (i = 0; i < nrels; i++)
4851 : : {
4852 : 57763 : resultRelInfo = &mtstate->resultRelInfo[i];
4853 : :
4854 : : /* Let FDWs init themselves for foreign-table result rels */
3560 rhaas@postgresql.org 4855 [ + + ]: 57763 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4856 [ + + ]: 57659 : resultRelInfo->ri_FdwRoutine != NULL &&
4664 tgl@sss.pgh.pa.us 4857 [ + - ]: 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4858 : : {
4859 : 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4860 : :
4861 : 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4862 : : resultRelInfo,
4863 : : fdw_private,
4864 : : i,
4865 : : eflags);
4866 : : }
4867 : :
4868 : : /*
4869 : : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4870 : : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4871 : : * tables, the FDW might have created additional junk attr(s), but
4872 : : * those are no concern of ours.
4873 : : */
1359 alvherre@alvh.no-ip. 4874 [ + + + + : 57763 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4875 : : operation == CMD_MERGE)
4876 : : {
4877 : : char relkind;
4878 : :
1715 tgl@sss.pgh.pa.us 4879 : 15594 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4880 [ + + + + ]: 15594 : if (relkind == RELKIND_RELATION ||
4881 [ + + ]: 348 : relkind == RELKIND_MATVIEW ||
4882 : : relkind == RELKIND_PARTITIONED_TABLE)
4883 : : {
4884 : 15264 : resultRelInfo->ri_RowIdAttNo =
4885 : 15264 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4886 [ - + ]: 15264 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1715 tgl@sss.pgh.pa.us 4887 [ # # ]:UBC 0 : elog(ERROR, "could not find junk ctid column");
4888 : : }
1715 tgl@sss.pgh.pa.us 4889 [ + + ]:CBC 330 : else if (relkind == RELKIND_FOREIGN_TABLE)
4890 : : {
4891 : : /*
4892 : : * We don't support MERGE with foreign tables for now. (It's
4893 : : * problematic because the implementation uses CTID.)
4894 : : */
1359 alvherre@alvh.no-ip. 4895 [ - + ]: 186 : Assert(operation != CMD_MERGE);
4896 : :
4897 : : /*
4898 : : * When there is a row-level trigger, there should be a
4899 : : * wholerow attribute. We also require it to be present in
4900 : : * UPDATE and MERGE, so we can get the values of unchanged
4901 : : * columns.
4902 : : */
1715 tgl@sss.pgh.pa.us 4903 : 186 : resultRelInfo->ri_RowIdAttNo =
4904 : 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4905 : : "wholerow");
1359 alvherre@alvh.no-ip. 4906 [ + + - + ]: 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
1715 tgl@sss.pgh.pa.us 4907 [ - + ]: 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1715 tgl@sss.pgh.pa.us 4908 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4909 : : }
4910 : : else
4911 : : {
4912 : : /* Other valid target relkinds must provide wholerow */
1715 tgl@sss.pgh.pa.us 4913 :CBC 144 : resultRelInfo->ri_RowIdAttNo =
4914 : 144 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4915 : : "wholerow");
4916 [ - + ]: 144 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1715 tgl@sss.pgh.pa.us 4917 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4918 : : }
4919 : : }
4920 : : }
4921 : :
4922 : : /*
4923 : : * If this is an inherited update/delete/merge, there will be a junk
4924 : : * attribute named "tableoid" present in the subplan's targetlist. It
4925 : : * will be used to identify the result relation for a given tuple to be
4926 : : * updated/deleted/merged.
4927 : : */
1715 tgl@sss.pgh.pa.us 4928 :CBC 56560 : mtstate->mt_resultOidAttno =
4929 : 56560 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
272 amitlan@postgresql.o 4930 [ + + - + ]: 56560 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
1715 tgl@sss.pgh.pa.us 4931 : 56560 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4932 : 56560 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4933 : :
4934 : : /* Get the root target relation */
1884 heikki.linnakangas@i 4935 : 56560 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4936 : :
4937 : : /*
4938 : : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4939 : : * or MERGE might need this too, but only if it actually moves tuples
4940 : : * between partitions; in that case setup is done by
4941 : : * ExecCrossPartitionUpdate.
4942 : : */
2888 rhaas@postgresql.org 4943 [ + + + + ]: 56560 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4944 : : operation == CMD_INSERT)
2854 4945 : 2252 : mtstate->mt_partition_tuple_routing =
1715 tgl@sss.pgh.pa.us 4946 : 2252 : ExecSetupPartitionTupleRouting(estate, rel);
4947 : :
4948 : : /*
4949 : : * Initialize any WITH CHECK OPTION constraints if needed.
4950 : : */
4534 sfrost@snowman.net 4951 : 56560 : resultRelInfo = mtstate->resultRelInfo;
312 amitlan@postgresql.o 4952 [ + + + + : 57332 : foreach(l, withCheckOptionLists)
+ + ]
4953 : : {
4534 sfrost@snowman.net 4954 : 772 : List *wcoList = (List *) lfirst(l);
4955 : 772 : List *wcoExprs = NIL;
4956 : : ListCell *ll;
4957 : :
4958 [ + - + + : 2239 : foreach(ll, wcoList)
+ + ]
4959 : : {
4960 : 1467 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
3199 andres@anarazel.de 4961 : 1467 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4962 : : &mtstate->ps);
4963 : :
4534 sfrost@snowman.net 4964 : 1467 : wcoExprs = lappend(wcoExprs, wcoExpr);
4965 : : }
4966 : :
4967 : 772 : resultRelInfo->ri_WithCheckOptions = wcoList;
4968 : 772 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4969 : 772 : resultRelInfo++;
4970 : : }
4971 : :
4972 : : /*
4973 : : * Initialize RETURNING projections if needed.
4974 : : */
312 amitlan@postgresql.o 4975 [ + + ]: 56560 : if (returningLists)
4976 : : {
4977 : : TupleTableSlot *slot;
4978 : : ExprContext *econtext;
4979 : :
4980 : : /*
4981 : : * Initialize result tuple slot and assign its rowtype using the plan
4982 : : * node's declared targetlist, which the planner set up to be the same
4983 : : * as the first (before runtime pruning) RETURNING list. We assume
4984 : : * all the result rels will produce compatible output.
4985 : : */
2588 andres@anarazel.de 4986 : 2382 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5911 tgl@sss.pgh.pa.us 4987 : 2382 : slot = mtstate->ps.ps_ResultTupleSlot;
4988 : :
4989 : : /* Need an econtext too */
3199 andres@anarazel.de 4990 [ + - ]: 2382 : if (mtstate->ps.ps_ExprContext == NULL)
4991 : 2382 : ExecAssignExprContext(estate, &mtstate->ps);
4992 : 2382 : econtext = mtstate->ps.ps_ExprContext;
4993 : :
4994 : : /*
4995 : : * Build a projection for each result rel.
4996 : : */
5408 tgl@sss.pgh.pa.us 4997 : 2382 : resultRelInfo = mtstate->resultRelInfo;
312 amitlan@postgresql.o 4998 [ + - + + : 4939 : foreach(l, returningLists)
+ + ]
4999 : : {
5911 tgl@sss.pgh.pa.us 5000 : 2557 : List *rlist = (List *) lfirst(l);
5001 : :
2811 rhaas@postgresql.org 5002 : 2557 : resultRelInfo->ri_returningList = rlist;
5911 tgl@sss.pgh.pa.us 5003 : 2557 : resultRelInfo->ri_projectReturning =
3199 andres@anarazel.de 5004 : 2557 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
3100 tgl@sss.pgh.pa.us 5005 : 2557 : resultRelInfo->ri_RelationDesc->rd_att);
5911 5006 : 2557 : resultRelInfo++;
5007 : : }
5008 : : }
5009 : : else
5010 : : {
5011 : : /*
5012 : : * We still must construct a dummy result tuple type, because InitPlan
5013 : : * expects one (maybe should change that?).
5014 : : */
2594 andres@anarazel.de 5015 : 54178 : ExecInitResultTypeTL(&mtstate->ps);
5016 : :
5911 tgl@sss.pgh.pa.us 5017 : 54178 : mtstate->ps.ps_ExprContext = NULL;
5018 : : }
5019 : :
5020 : : /* Set the list of arbiter indexes if needed for ON CONFLICT */
2822 alvherre@alvh.no-ip. 5021 : 56560 : resultRelInfo = mtstate->resultRelInfo;
5022 [ + + ]: 56560 : if (node->onConflictAction != ONCONFLICT_NONE)
5023 : : {
5024 : : /* insert may only have one relation, inheritance is not expanded */
272 amitlan@postgresql.o 5025 [ - + ]: 710 : Assert(total_nrels == 1);
2822 alvherre@alvh.no-ip. 5026 : 710 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5027 : : }
5028 : :
5029 : : /*
5030 : : * If needed, Initialize target list, projection and qual for ON CONFLICT
5031 : : * DO UPDATE.
5032 : : */
3875 andres@anarazel.de 5033 [ + + ]: 56560 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5034 : : {
1681 tgl@sss.pgh.pa.us 5035 : 470 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
5036 : : ExprContext *econtext;
5037 : : TupleDesc relationDesc;
5038 : :
5039 : : /* already exists if created by RETURNING processing above */
3875 andres@anarazel.de 5040 [ + + ]: 470 : if (mtstate->ps.ps_ExprContext == NULL)
5041 : 325 : ExecAssignExprContext(estate, &mtstate->ps);
5042 : :
5043 : 470 : econtext = mtstate->ps.ps_ExprContext;
2860 5044 : 470 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5045 : :
5046 : : /* create state for DO UPDATE SET operation */
1681 tgl@sss.pgh.pa.us 5047 : 470 : resultRelInfo->ri_onConflict = onconfl;
5048 : :
5049 : : /* initialize slot for the existing tuple */
5050 : 470 : onconfl->oc_Existing =
2472 andres@anarazel.de 5051 : 470 : table_slot_create(resultRelInfo->ri_RelationDesc,
5052 : 470 : &mtstate->ps.state->es_tupleTable);
5053 : :
5054 : : /*
5055 : : * Create the tuple slot for the UPDATE SET projection. We want a slot
5056 : : * of the table's type here, because the slot will be used to insert
5057 : : * into the table, and for RETURNING processing - which may access
5058 : : * system attributes.
5059 : : */
1681 tgl@sss.pgh.pa.us 5060 : 470 : onconfl->oc_ProjSlot =
5061 : 470 : table_slot_create(resultRelInfo->ri_RelationDesc,
5062 : 470 : &mtstate->ps.state->es_tupleTable);
5063 : :
5064 : : /* build UPDATE SET projection state */
5065 : 470 : onconfl->oc_ProjInfo =
5066 : 470 : ExecBuildUpdateProjection(node->onConflictSet,
5067 : : true,
5068 : : node->onConflictCols,
5069 : : relationDesc,
5070 : : econtext,
5071 : : onconfl->oc_ProjSlot,
5072 : : &mtstate->ps);
5073 : :
5074 : : /* initialize state to evaluate the WHERE clause, if any */
3875 andres@anarazel.de 5075 [ + + ]: 470 : if (node->onConflictWhere)
5076 : : {
5077 : : ExprState *qualexpr;
5078 : :
3199 5079 : 88 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5080 : : &mtstate->ps);
1681 tgl@sss.pgh.pa.us 5081 : 88 : onconfl->oc_WhereClause = qualexpr;
5082 : : }
5083 : : }
5084 : :
5085 : : /*
5086 : : * If we have any secondary relations in an UPDATE or DELETE, they need to
5087 : : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5088 : : * EvalPlanQual mechanism needs to be told about them. This also goes for
5089 : : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5090 : : */
1721 5091 : 56560 : arowmarks = NIL;
5895 5092 [ + + + + : 57992 : foreach(l, node->rowMarks)
+ + ]
5093 : : {
3172 5094 : 1432 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5095 : : ExecRowMark *erm;
5096 : : ExecAuxRowMark *aerm;
5097 : :
5098 : : /*
5099 : : * Ignore "parent" rowmarks, because they are irrelevant at runtime.
5100 : : * Also ignore the rowmarks belonging to child tables that have been
5101 : : * pruned in ExecDoInitialPruning().
5102 : : */
312 amitlan@postgresql.o 5103 [ + + ]: 1432 : if (rc->isParent ||
5104 [ + + ]: 1361 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5895 tgl@sss.pgh.pa.us 5105 : 298 : continue;
5106 : :
5107 : : /* Find ExecRowMark and build ExecAuxRowMark */
3871 5108 : 1134 : erm = ExecFindRowMark(estate, rc->rti, false);
1721 5109 : 1134 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5110 : 1134 : arowmarks = lappend(arowmarks, aerm);
5111 : : }
5112 : :
5113 : : /* For a MERGE command, initialize its state */
1359 alvherre@alvh.no-ip. 5114 [ + + ]: 56560 : if (mtstate->operation == CMD_MERGE)
5115 : 808 : ExecInitMerge(mtstate, estate);
5116 : :
1721 tgl@sss.pgh.pa.us 5117 : 56560 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5118 : :
5119 : : /*
5120 : : * If there are a lot of result relations, use a hash table to speed the
5121 : : * lookups. If there are not a lot, a simple linear search is faster.
5122 : : *
5123 : : * It's not clear where the threshold is, but try 64 for starters. In a
5124 : : * debugging build, use a small threshold so that we get some test
5125 : : * coverage of both code paths.
5126 : : */
5127 : : #ifdef USE_ASSERT_CHECKING
5128 : : #define MT_NRELS_HASH 4
5129 : : #else
5130 : : #define MT_NRELS_HASH 64
5131 : : #endif
5132 [ + + ]: 56560 : if (nrels >= MT_NRELS_HASH)
5133 : : {
5134 : : HASHCTL hash_ctl;
5135 : :
5136 : 167 : hash_ctl.keysize = sizeof(Oid);
5137 : 167 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5138 : 167 : hash_ctl.hcxt = CurrentMemoryContext;
5139 : 167 : mtstate->mt_resultOidHash =
5140 : 167 : hash_create("ModifyTable target hash",
5141 : : nrels, &hash_ctl,
5142 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5143 [ + + ]: 934 : for (i = 0; i < nrels; i++)
5144 : : {
5145 : : Oid hashkey;
5146 : : MTTargetRelLookup *mtlookup;
5147 : : bool found;
5148 : :
5149 : 767 : resultRelInfo = &mtstate->resultRelInfo[i];
5150 : 767 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5151 : : mtlookup = (MTTargetRelLookup *)
5152 : 767 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5153 : : HASH_ENTER, &found);
5154 [ - + ]: 767 : Assert(!found);
5155 : 767 : mtlookup->relationIndex = i;
5156 : : }
5157 : : }
5158 : : else
5159 : 56393 : mtstate->mt_resultOidHash = NULL;
5160 : :
5161 : : /*
5162 : : * Determine if the FDW supports batch insert and determine the batch size
5163 : : * (a FDW may support batching, but it may be disabled for the
5164 : : * server/table).
5165 : : *
5166 : : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5167 : : * remains set to 0.
5168 : : */
1790 tomas.vondra@postgre 5169 [ + + ]: 56560 : if (operation == CMD_INSERT)
5170 : : {
5171 : : /* insert may only have one relation, inheritance is not expanded */
272 amitlan@postgresql.o 5172 [ - + ]: 42169 : Assert(total_nrels == 1);
1790 tomas.vondra@postgre 5173 : 42169 : resultRelInfo = mtstate->resultRelInfo;
1715 tgl@sss.pgh.pa.us 5174 [ + - ]: 42169 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5175 [ + + ]: 42169 : resultRelInfo->ri_FdwRoutine != NULL &&
5176 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5177 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5178 : : {
5179 : 88 : resultRelInfo->ri_BatchSize =
5180 : 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
1790 tomas.vondra@postgre 5181 [ - + ]: 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5182 : : }
5183 : : else
1715 tgl@sss.pgh.pa.us 5184 : 42081 : resultRelInfo->ri_BatchSize = 1;
5185 : : }
5186 : :
5187 : : /*
5188 : : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5189 : : * to estate->es_auxmodifytables so that it will be run to completion by
5190 : : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5191 : : * ModifyTable node too, but there's no need.) Note the use of lcons not
5192 : : * lappend: we need later-initialized ModifyTable nodes to be shut down
5193 : : * before earlier ones. This ensures that we don't throw away RETURNING
5194 : : * rows that need to be seen by a later CTE subplan.
5195 : : */
5408 5196 [ + + ]: 56560 : if (!mtstate->canSetTag)
5197 : 484 : estate->es_auxmodifytables = lcons(mtstate,
5198 : : estate->es_auxmodifytables);
5199 : :
5911 5200 : 56560 : return mtstate;
5201 : : }
5202 : :
5203 : : /* ----------------------------------------------------------------
5204 : : * ExecEndModifyTable
5205 : : *
5206 : : * Shuts down the plan.
5207 : : *
5208 : : * Returns nothing of interest.
5209 : : * ----------------------------------------------------------------
5210 : : */
5211 : : void
5212 : 54331 : ExecEndModifyTable(ModifyTableState *node)
5213 : : {
5214 : : int i;
5215 : :
5216 : : /*
5217 : : * Allow any FDWs to shut down
5218 : : */
1721 5219 [ + + ]: 109710 : for (i = 0; i < node->mt_nrels; i++)
5220 : : {
5221 : : int j;
4664 5222 : 55379 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5223 : :
3560 rhaas@postgresql.org 5224 [ + + ]: 55379 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5225 [ + + ]: 55283 : resultRelInfo->ri_FdwRoutine != NULL &&
4664 tgl@sss.pgh.pa.us 5226 [ + - ]: 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5227 : 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5228 : : resultRelInfo);
5229 : :
5230 : : /*
5231 : : * Cleanup the initialized batch slots. This only matters for FDWs
5232 : : * with batching, but the other cases will have ri_NumSlotsInitialized
5233 : : * == 0.
5234 : : */
1649 tomas.vondra@postgre 5235 [ + + ]: 55407 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5236 : : {
5237 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5238 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5239 : : }
5240 : : }
5241 : :
5242 : : /*
5243 : : * Close all the partitioned tables, leaf partitions, and their indices
5244 : : * and release the slot used for tuple routing, if set.
5245 : : */
2903 rhaas@postgresql.org 5246 [ + + ]: 54331 : if (node->mt_partition_tuple_routing)
5247 : : {
2811 5248 : 2276 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5249 : :
2587 alvherre@alvh.no-ip. 5250 [ + + ]: 2276 : if (node->mt_root_tuple_slot)
5251 : 333 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5252 : : }
5253 : :
5254 : : /*
5255 : : * Terminate EPQ execution if active
5256 : : */
5895 tgl@sss.pgh.pa.us 5257 : 54331 : EvalPlanQualEnd(&node->mt_epqstate);
5258 : :
5259 : : /*
5260 : : * shut down subplan
5261 : : */
1721 5262 : 54331 : ExecEndNode(outerPlanState(node));
5911 5263 : 54331 : }
5264 : :
5265 : : void
5636 tgl@sss.pgh.pa.us 5266 :UBC 0 : ExecReScanModifyTable(ModifyTableState *node)
5267 : : {
5268 : : /*
5269 : : * Currently, we don't need to support rescan on ModifyTable nodes. The
5270 : : * semantics of that would be a bit debatable anyway.
5271 : : */
5911 5272 [ # # ]: 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5273 : : }
|