Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeModifyTable.c
4 : : * routines to handle ModifyTable nodes.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeModifyTable.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /* INTERFACE ROUTINES
16 : : * ExecInitModifyTable - initialize the ModifyTable node
17 : : * ExecModifyTable - retrieve the next tuple from the node
18 : : * ExecEndModifyTable - shut down the ModifyTable node
19 : : * ExecReScanModifyTable - rescan the ModifyTable node
20 : : *
21 : : * NOTES
22 : : * The ModifyTable node receives input from its outerPlan, which is
23 : : * the data to insert for INSERT cases, the changed columns' new
24 : : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : : * row-locating info for DELETE cases.
26 : : *
27 : : * The relation to modify can be an ordinary table, a foreign table, or a
28 : : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : : * targeted a view not in one of those two categories, earlier processing
31 : : * already pointed the ModifyTable result relation to an underlying
32 : : * relation of that other view. This node does process
33 : : * ri_WithCheckOptions, which may have expressions from those other,
34 : : * automatically updatable views.
35 : : *
36 : : * MERGE runs a join between the source relation and the target table.
37 : : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : : * is an outer join that might output tuples without a matching target
39 : : * tuple. In this case, any unmatched target tuples will have NULL
40 : : * row-locating info, and only INSERT can be run. But for matched target
41 : : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : : * SOURCE, all tuples produced by the join will include a matching target
44 : : * tuple, so all tuples contain row-locating info.
45 : : *
46 : : * If the query specifies RETURNING, then the ModifyTable returns a
47 : : * RETURNING tuple after completing each row insert, update, or delete.
48 : : * It must be called again to continue the operation. Without RETURNING,
49 : : * we just loop within the node until all the work is done, then
50 : : * return NULL. This avoids useless call/return overhead.
51 : : */
52 : :
53 : : #include "postgres.h"
54 : :
55 : : #include "access/htup_details.h"
56 : : #include "access/tableam.h"
57 : : #include "access/xact.h"
58 : : #include "commands/trigger.h"
59 : : #include "executor/execPartition.h"
60 : : #include "executor/executor.h"
61 : : #include "executor/nodeModifyTable.h"
62 : : #include "foreign/fdwapi.h"
63 : : #include "miscadmin.h"
64 : : #include "nodes/nodeFuncs.h"
65 : : #include "optimizer/optimizer.h"
66 : : #include "rewrite/rewriteHandler.h"
67 : : #include "rewrite/rewriteManip.h"
68 : : #include "storage/lmgr.h"
69 : : #include "utils/builtins.h"
70 : : #include "utils/datum.h"
71 : : #include "utils/rel.h"
72 : : #include "utils/snapmgr.h"
73 : :
74 : :
75 : : typedef struct MTTargetRelLookup
76 : : {
77 : : Oid relationOid; /* hash key, must be first */
78 : : int relationIndex; /* rel's index in resultRelInfo[] array */
79 : : } MTTargetRelLookup;
80 : :
81 : : /*
82 : : * Context struct for a ModifyTable operation, containing basic execution
83 : : * state and some output variables populated by ExecUpdateAct() and
84 : : * ExecDeleteAct() to report the result of their actions to callers.
85 : : */
86 : : typedef struct ModifyTableContext
87 : : {
88 : : /* Operation state */
89 : : ModifyTableState *mtstate;
90 : : EPQState *epqstate;
91 : : EState *estate;
92 : :
93 : : /*
94 : : * Slot containing tuple obtained from ModifyTable's subplan. Used to
95 : : * access "junk" columns that are not going to be stored.
96 : : */
97 : : TupleTableSlot *planSlot;
98 : :
99 : : /*
100 : : * Information about the changes that were made concurrently to a tuple
101 : : * being updated or deleted
102 : : */
103 : : TM_FailureData tmfd;
104 : :
105 : : /*
106 : : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
107 : : * clause that refers to OLD columns (converted to the root's tuple
108 : : * descriptor).
109 : : */
110 : : TupleTableSlot *cpDeletedSlot;
111 : :
112 : : /*
113 : : * The tuple projected by the INSERT's RETURNING clause, when doing a
114 : : * cross-partition UPDATE
115 : : */
116 : : TupleTableSlot *cpUpdateReturningSlot;
117 : : } ModifyTableContext;
118 : :
119 : : /*
120 : : * Context struct containing output data specific to UPDATE operations.
121 : : */
122 : : typedef struct UpdateContext
123 : : {
124 : : bool crossPartUpdate; /* was it a cross-partition update? */
125 : : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
126 : :
127 : : /*
128 : : * Lock mode to acquire on the latest tuple version before performing
129 : : * EvalPlanQual on it
130 : : */
131 : : LockTupleMode lockmode;
132 : : } UpdateContext;
133 : :
134 : :
135 : : static void ExecBatchInsert(ModifyTableState *mtstate,
136 : : ResultRelInfo *resultRelInfo,
137 : : TupleTableSlot **slots,
138 : : TupleTableSlot **planSlots,
139 : : int numSlots,
140 : : EState *estate,
141 : : bool canSetTag);
142 : : static void ExecPendingInserts(EState *estate);
143 : : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
144 : : ResultRelInfo *sourcePartInfo,
145 : : ResultRelInfo *destPartInfo,
146 : : ItemPointer tupleid,
147 : : TupleTableSlot *oldslot,
148 : : TupleTableSlot *newslot);
149 : : static bool ExecOnConflictUpdate(ModifyTableContext *context,
150 : : ResultRelInfo *resultRelInfo,
151 : : ItemPointer conflictTid,
152 : : TupleTableSlot *excludedSlot,
153 : : bool canSetTag,
154 : : TupleTableSlot **returning);
155 : : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
156 : : EState *estate,
157 : : PartitionTupleRouting *proute,
158 : : ResultRelInfo *targetRelInfo,
159 : : TupleTableSlot *slot,
160 : : ResultRelInfo **partRelInfo);
161 : :
162 : : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
163 : : ResultRelInfo *resultRelInfo,
164 : : ItemPointer tupleid,
165 : : HeapTuple oldtuple,
166 : : bool canSetTag);
167 : : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
168 : : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
169 : : ResultRelInfo *resultRelInfo,
170 : : ItemPointer tupleid,
171 : : HeapTuple oldtuple,
172 : : bool canSetTag,
173 : : bool *matched);
174 : : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
175 : : ResultRelInfo *resultRelInfo,
176 : : bool canSetTag);
177 : :
178 : :
179 : : /*
180 : : * Verify that the tuples to be produced by INSERT match the
181 : : * target relation's rowtype
182 : : *
183 : : * We do this to guard against stale plans. If plan invalidation is
184 : : * functioning properly then we should never get a failure here, but better
185 : : * safe than sorry. Note that this is called after we have obtained lock
186 : : * on the target rel, so the rowtype can't change underneath us.
187 : : *
188 : : * The plan output is represented by its targetlist, because that makes
189 : : * handling the dropped-column case easier.
190 : : *
191 : : * We used to use this for UPDATE as well, but now the equivalent checks
192 : : * are done in ExecBuildUpdateProjection.
193 : : */
194 : : static void
5810 tgl@sss.pgh.pa.us 195 :CBC 41240 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
196 : : {
197 : 41240 : TupleDesc resultDesc = RelationGetDescr(resultRel);
198 : 41240 : int attno = 0;
199 : : ListCell *lc;
200 : :
201 [ + + + + : 128575 : foreach(lc, targetList)
+ + ]
202 : : {
203 : 87335 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
204 : : Form_pg_attribute attr;
205 : :
1620 206 [ - + ]: 87335 : Assert(!tle->resjunk); /* caller removed junk items already */
207 : :
5810 208 [ - + ]: 87335 : if (attno >= resultDesc->natts)
5810 tgl@sss.pgh.pa.us 209 [ # # ]:UBC 0 : ereport(ERROR,
210 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
211 : : errmsg("table row type and query-specified row type do not match"),
212 : : errdetail("Query has too many columns.")));
2939 andres@anarazel.de 213 :CBC 87335 : attr = TupleDescAttr(resultDesc, attno);
214 : 87335 : attno++;
215 : :
216 : : /*
217 : : * Special cases here should match planner's expand_insert_targetlist.
218 : : */
144 tgl@sss.pgh.pa.us 219 [ + + ]: 87335 : if (attr->attisdropped)
220 : : {
221 : : /*
222 : : * For a dropped column, we can't check atttypid (it's likely 0).
223 : : * In any case the planner has most likely inserted an INT4 null.
224 : : * What we insist on is just *some* NULL constant.
225 : : */
226 [ + - ]: 311 : if (!IsA(tle->expr, Const) ||
227 [ - + ]: 311 : !((Const *) tle->expr)->constisnull)
5810 tgl@sss.pgh.pa.us 228 [ # # ]:UBC 0 : ereport(ERROR,
229 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
230 : : errmsg("table row type and query-specified row type do not match"),
231 : : errdetail("Query provides a value for a dropped column at ordinal position %d.",
232 : : attno)));
233 : : }
144 tgl@sss.pgh.pa.us 234 [ + + ]:CBC 87024 : else if (attr->attgenerated)
235 : : {
236 : : /*
237 : : * For a generated column, the planner will have inserted a null
238 : : * of the column's base type (to avoid possibly failing on domain
239 : : * not-null constraints). It doesn't seem worth insisting on that
240 : : * exact type though, since a null value is type-independent. As
241 : : * above, just insist on *some* NULL constant.
242 : : */
5810 243 [ + - ]: 553 : if (!IsA(tle->expr, Const) ||
244 [ - + ]: 553 : !((Const *) tle->expr)->constisnull)
5810 tgl@sss.pgh.pa.us 245 [ # # ]:UBC 0 : ereport(ERROR,
246 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
247 : : errmsg("table row type and query-specified row type do not match"),
248 : : errdetail("Query provides a value for a generated column at ordinal position %d.",
249 : : attno)));
250 : : }
251 : : else
252 : : {
253 : : /* Normal case: demand type match */
144 tgl@sss.pgh.pa.us 254 [ - + ]:CBC 86471 : if (exprType((Node *) tle->expr) != attr->atttypid)
144 tgl@sss.pgh.pa.us 255 [ # # ]:UBC 0 : ereport(ERROR,
256 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
257 : : errmsg("table row type and query-specified row type do not match"),
258 : : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
259 : : format_type_be(attr->atttypid),
260 : : attno,
261 : : format_type_be(exprType((Node *) tle->expr)))));
262 : : }
263 : : }
5810 tgl@sss.pgh.pa.us 264 [ - + ]:CBC 41240 : if (attno != resultDesc->natts)
5810 tgl@sss.pgh.pa.us 265 [ # # ]:UBC 0 : ereport(ERROR,
266 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
267 : : errmsg("table row type and query-specified row type do not match"),
268 : : errdetail("Query has too few columns.")));
5810 tgl@sss.pgh.pa.us 269 :CBC 41240 : }
270 : :
271 : : /*
272 : : * ExecProcessReturning --- evaluate a RETURNING list
273 : : *
274 : : * context: context for the ModifyTable operation
275 : : * resultRelInfo: current result rel
276 : : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
277 : : * oldSlot: slot holding old tuple deleted or updated
278 : : * newSlot: slot holding new tuple inserted or updated
279 : : * planSlot: slot holding tuple returned by top subplan node
280 : : *
281 : : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
282 : : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
283 : : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
284 : : *
285 : : * Returns a slot holding the result tuple
286 : : */
287 : : static TupleTableSlot *
233 dean.a.rasheed@gmail 288 : 4027 : ExecProcessReturning(ModifyTableContext *context,
289 : : ResultRelInfo *resultRelInfo,
290 : : CmdType cmdType,
291 : : TupleTableSlot *oldSlot,
292 : : TupleTableSlot *newSlot,
293 : : TupleTableSlot *planSlot)
294 : : {
295 : 4027 : EState *estate = context->estate;
3459 rhaas@postgresql.org 296 : 4027 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
5810 tgl@sss.pgh.pa.us 297 : 4027 : ExprContext *econtext = projectReturning->pi_exprContext;
298 : :
299 : : /* Make tuple and any needed join variables available to ExecProject */
233 dean.a.rasheed@gmail 300 [ + + - ]: 4027 : switch (cmdType)
301 : : {
302 : 3312 : case CMD_INSERT:
303 : : case CMD_UPDATE:
304 : : /* return new tuple by default */
305 [ + + ]: 3312 : if (newSlot)
306 : 3084 : econtext->ecxt_scantuple = newSlot;
307 : 3312 : break;
308 : :
309 : 715 : case CMD_DELETE:
310 : : /* return old tuple by default */
311 [ + + ]: 715 : if (oldSlot)
312 : 596 : econtext->ecxt_scantuple = oldSlot;
313 : 715 : break;
314 : :
233 dean.a.rasheed@gmail 315 :UBC 0 : default:
316 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
317 : : }
5810 tgl@sss.pgh.pa.us 318 :CBC 4027 : econtext->ecxt_outertuple = planSlot;
319 : :
320 : : /* Make old/new tuples available to ExecProject, if required */
233 dean.a.rasheed@gmail 321 [ + + ]: 4027 : if (oldSlot)
322 : 1901 : econtext->ecxt_oldtuple = oldSlot;
323 [ + + ]: 2126 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
324 : 90 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
325 : : else
326 : 2036 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
327 : :
328 [ + + ]: 4027 : if (newSlot)
329 : 3084 : econtext->ecxt_newtuple = newSlot;
330 [ + + ]: 943 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
331 : 66 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
332 : : else
333 : 877 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
334 : :
335 : : /*
336 : : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
337 : : * information is required to evaluate ReturningExpr nodes and also in
338 : : * ExecEvalSysVar() and ExecEvalWholeRowVar().
339 : : */
340 [ + + ]: 4027 : if (oldSlot == NULL)
341 : 2126 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
342 : : else
343 : 1901 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
344 : :
345 [ + + ]: 4027 : if (newSlot == NULL)
346 : 943 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
347 : : else
348 : 3084 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
349 : :
350 : : /* Compute the RETURNING expressions */
3152 andres@anarazel.de 351 : 4027 : return ExecProject(projectReturning);
352 : : }
353 : :
354 : : /*
355 : : * ExecCheckTupleVisible -- verify tuple is visible
356 : : *
357 : : * It would not be consistent with guarantees of the higher isolation levels to
358 : : * proceed with avoiding insertion (taking speculative insertion's alternative
359 : : * path) on the basis of another tuple that is not visible to MVCC snapshot.
360 : : * Check for the need to raise a serialization failure, and do so as necessary.
361 : : */
362 : : static void
2359 363 : 2622 : ExecCheckTupleVisible(EState *estate,
364 : : Relation rel,
365 : : TupleTableSlot *slot)
366 : : {
3774 367 [ + + ]: 2622 : if (!IsolationUsesXactSnapshot())
368 : 2590 : return;
369 : :
2359 370 [ + + ]: 32 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
371 : : {
372 : : Datum xminDatum;
373 : : TransactionId xmin;
374 : : bool isnull;
375 : :
376 : 20 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
377 [ - + ]: 20 : Assert(!isnull);
378 : 20 : xmin = DatumGetTransactionId(xminDatum);
379 : :
380 : : /*
381 : : * We should not raise a serialization failure if the conflict is
382 : : * against a tuple inserted by our own transaction, even if it's not
383 : : * visible to our snapshot. (This would happen, for example, if
384 : : * conflicting keys are proposed for insertion in a single command.)
385 : : */
386 [ + + ]: 20 : if (!TransactionIdIsCurrentTransactionId(xmin))
3240 tgl@sss.pgh.pa.us 387 [ + - ]: 10 : ereport(ERROR,
388 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
389 : : errmsg("could not serialize access due to concurrent update")));
390 : : }
391 : : }
392 : :
393 : : /*
394 : : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
395 : : */
396 : : static void
3774 andres@anarazel.de 397 : 106 : ExecCheckTIDVisible(EState *estate,
398 : : ResultRelInfo *relinfo,
399 : : ItemPointer tid,
400 : : TupleTableSlot *tempSlot)
401 : : {
402 : 106 : Relation rel = relinfo->ri_RelationDesc;
403 : :
404 : : /* Redundantly check isolation level */
405 [ + + ]: 106 : if (!IsolationUsesXactSnapshot())
406 : 74 : return;
407 : :
2298 408 [ - + ]: 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
3774 andres@anarazel.de 409 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
2359 andres@anarazel.de 410 :CBC 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
411 : 22 : ExecClearTuple(tempSlot);
412 : : }
413 : :
414 : : /*
415 : : * Initialize generated columns handling for a tuple
416 : : *
417 : : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
418 : : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
419 : : * This is used only for stored generated columns.
420 : : *
421 : : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
422 : : * This is used by both stored and virtual generated columns.
423 : : *
424 : : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
425 : : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
426 : : * cross-partition UPDATEs, since a partition might be the target of both
427 : : * UPDATE and INSERT actions.
428 : : */
429 : : void
211 peter@eisentraut.org 430 : 29711 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
431 : : EState *estate,
432 : : CmdType cmdtype)
433 : : {
2352 434 : 29711 : Relation rel = resultRelInfo->ri_RelationDesc;
435 : 29711 : TupleDesc tupdesc = RelationGetDescr(rel);
436 : 29711 : int natts = tupdesc->natts;
437 : : ExprState **ri_GeneratedExprs;
438 : : int ri_NumGeneratedNeeded;
439 : : Bitmapset *updatedCols;
440 : : MemoryContext oldContext;
441 : :
442 : : /* Nothing to do if no generated columns */
211 443 [ + + + + : 29711 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
+ + ]
975 tgl@sss.pgh.pa.us 444 : 29197 : return;
445 : :
446 : : /*
447 : : * In an UPDATE, we can skip computing any generated columns that do not
448 : : * depend on any UPDATE target column. But if there is a BEFORE ROW
449 : : * UPDATE trigger, we cannot skip because the trigger might change more
450 : : * columns.
451 : : */
452 [ + + ]: 514 : if (cmdtype == CMD_UPDATE &&
453 [ + + - + ]: 125 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
454 : 103 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
455 : : else
456 : 411 : updatedCols = NULL;
457 : :
458 : : /*
459 : : * Make sure these data structures are built in the per-query memory
460 : : * context so they'll survive throughout the query.
461 : : */
462 : 514 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
463 : :
915 464 : 514 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
465 : 514 : ri_NumGeneratedNeeded = 0;
466 : :
975 467 [ + + ]: 1988 : for (int i = 0; i < natts; i++)
468 : : {
211 peter@eisentraut.org 469 : 1477 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
470 : :
471 [ + + ]: 1477 : if (attgenerated)
472 : : {
473 : : Expr *expr;
474 : :
475 : : /* Fetch the GENERATED AS expression tree */
975 tgl@sss.pgh.pa.us 476 : 556 : expr = (Expr *) build_column_default(rel, i + 1);
477 [ - + ]: 556 : if (expr == NULL)
975 tgl@sss.pgh.pa.us 478 [ # # ]:UBC 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
479 : : i + 1, RelationGetRelationName(rel));
480 : :
481 : : /*
482 : : * If it's an update with a known set of update target columns,
483 : : * see if we can skip the computation.
484 : : */
975 tgl@sss.pgh.pa.us 485 [ + + ]:CBC 556 : if (updatedCols)
486 : : {
487 : 110 : Bitmapset *attrs_used = NULL;
488 : :
489 : 110 : pull_varattnos((Node *) expr, 1, &attrs_used);
490 : :
491 [ + + ]: 110 : if (!bms_overlap(updatedCols, attrs_used))
492 : 12 : continue; /* need not update this column */
493 : : }
494 : :
495 : : /* No luck, so prepare the expression for execution */
211 peter@eisentraut.org 496 [ + + ]: 544 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
497 : : {
498 : 502 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
499 : 499 : ri_NumGeneratedNeeded++;
500 : : }
501 : :
502 : : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
915 tgl@sss.pgh.pa.us 503 [ + + ]: 541 : if (cmdtype == CMD_UPDATE)
504 : 124 : resultRelInfo->ri_extraUpdatedCols =
505 : 124 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
506 : : i + 1 - FirstLowInvalidHeapAttributeNumber);
507 : : }
508 : : }
509 : :
211 peter@eisentraut.org 510 [ + + ]: 511 : if (ri_NumGeneratedNeeded == 0)
511 : : {
512 : : /* didn't need it after all */
513 : 21 : pfree(ri_GeneratedExprs);
514 : 21 : ri_GeneratedExprs = NULL;
515 : : }
516 : :
517 : : /* Save in appropriate set of fields */
915 tgl@sss.pgh.pa.us 518 [ + + ]: 511 : if (cmdtype == CMD_UPDATE)
519 : : {
520 : : /* Don't call twice */
521 [ - + ]: 125 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
522 : :
523 : 125 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
524 : 125 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
525 : :
211 peter@eisentraut.org 526 : 125 : resultRelInfo->ri_extraUpdatedCols_valid = true;
527 : : }
528 : : else
529 : : {
530 : : /* Don't call twice */
915 tgl@sss.pgh.pa.us 531 [ - + ]: 386 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
532 : :
533 : 386 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
534 : 386 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
535 : : }
536 : :
975 537 : 511 : MemoryContextSwitchTo(oldContext);
538 : : }
539 : :
540 : : /*
541 : : * Compute stored generated columns for a tuple
542 : : */
543 : : void
544 : 685 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
545 : : EState *estate, TupleTableSlot *slot,
546 : : CmdType cmdtype)
547 : : {
548 : 685 : Relation rel = resultRelInfo->ri_RelationDesc;
549 : 685 : TupleDesc tupdesc = RelationGetDescr(rel);
550 : 685 : int natts = tupdesc->natts;
551 [ + + ]: 685 : ExprContext *econtext = GetPerTupleExprContext(estate);
552 : : ExprState **ri_GeneratedExprs;
553 : : MemoryContext oldContext;
554 : : Datum *values;
555 : : bool *nulls;
556 : :
557 : : /* We should not be called unless this is true */
558 [ + - - + ]: 685 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
559 : :
560 : : /*
561 : : * Initialize the expressions if we didn't already, and check whether we
562 : : * can exit early because nothing needs to be computed.
563 : : */
915 564 [ + + ]: 685 : if (cmdtype == CMD_UPDATE)
565 : : {
566 [ + + ]: 131 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
211 peter@eisentraut.org 567 : 100 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
915 tgl@sss.pgh.pa.us 568 [ + + ]: 131 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
569 : 9 : return;
570 : 122 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
571 : : }
572 : : else
573 : : {
574 [ + + ]: 554 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
211 peter@eisentraut.org 575 : 389 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
576 : : /* Early exit is impossible given the prior Assert */
915 tgl@sss.pgh.pa.us 577 [ - + ]: 551 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
578 : 551 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
579 : : }
580 : :
2352 peter@eisentraut.org 581 [ + - ]: 673 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
582 : :
583 : 673 : values = palloc(sizeof(*values) * natts);
584 : 673 : nulls = palloc(sizeof(*nulls) * natts);
585 : :
2306 586 : 673 : slot_getallattrs(slot);
587 : 673 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
588 : :
2352 589 [ + + ]: 2533 : for (int i = 0; i < natts; i++)
590 : : {
260 drowley@postgresql.o 591 : 1872 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
592 : :
915 tgl@sss.pgh.pa.us 593 [ + + ]: 1872 : if (ri_GeneratedExprs[i])
594 : : {
595 : : Datum val;
596 : : bool isnull;
597 : :
260 drowley@postgresql.o 598 [ - + ]: 684 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
599 : :
2352 peter@eisentraut.org 600 : 684 : econtext->ecxt_scantuple = slot;
601 : :
915 tgl@sss.pgh.pa.us 602 : 684 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
603 : :
604 : : /*
605 : : * We must make a copy of val as we have no guarantees about where
606 : : * memory for a pass-by-reference Datum is located.
607 : : */
1967 drowley@postgresql.o 608 [ + + ]: 672 : if (!isnull)
609 : 648 : val = datumCopy(val, attr->attbyval, attr->attlen);
610 : :
2352 peter@eisentraut.org 611 : 672 : values[i] = val;
612 : 672 : nulls[i] = isnull;
613 : : }
614 : : else
615 : : {
2306 616 [ + + ]: 1188 : if (!nulls[i])
617 : 1114 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
618 : : }
619 : : }
620 : :
621 : 661 : ExecClearTuple(slot);
622 : 661 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
623 : 661 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
624 : 661 : ExecStoreVirtualTuple(slot);
625 : 661 : ExecMaterializeSlot(slot);
626 : :
2352 627 : 661 : MemoryContextSwitchTo(oldContext);
628 : : }
629 : :
630 : : /*
631 : : * ExecInitInsertProjection
632 : : * Do one-time initialization of projection data for INSERT tuples.
633 : : *
634 : : * INSERT queries may need a projection to filter out junk attrs in the tlist.
635 : : *
636 : : * This is also a convenient place to verify that the
637 : : * output of an INSERT matches the target table.
638 : : */
639 : : static void
1614 tgl@sss.pgh.pa.us 640 : 40714 : ExecInitInsertProjection(ModifyTableState *mtstate,
641 : : ResultRelInfo *resultRelInfo)
642 : : {
643 : 40714 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
644 : 40714 : Plan *subplan = outerPlan(node);
645 : 40714 : EState *estate = mtstate->ps.state;
646 : 40714 : List *insertTargetList = NIL;
647 : 40714 : bool need_projection = false;
648 : : ListCell *l;
649 : :
650 : : /* Extract non-junk columns of the subplan's result tlist. */
651 [ + + + + : 126680 : foreach(l, subplan->targetlist)
+ + ]
652 : : {
653 : 85966 : TargetEntry *tle = (TargetEntry *) lfirst(l);
654 : :
655 [ + - ]: 85966 : if (!tle->resjunk)
656 : 85966 : insertTargetList = lappend(insertTargetList, tle);
657 : : else
1614 tgl@sss.pgh.pa.us 658 :UBC 0 : need_projection = true;
659 : : }
660 : :
661 : : /*
662 : : * The junk-free list must produce a tuple suitable for the result
663 : : * relation.
664 : : */
1614 tgl@sss.pgh.pa.us 665 :CBC 40714 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
666 : :
667 : : /* We'll need a slot matching the table's format. */
668 : 40714 : resultRelInfo->ri_newTupleSlot =
669 : 40714 : table_slot_create(resultRelInfo->ri_RelationDesc,
670 : : &estate->es_tupleTable);
671 : :
672 : : /* Build ProjectionInfo if needed (it probably isn't). */
673 [ - + ]: 40714 : if (need_projection)
674 : : {
1614 tgl@sss.pgh.pa.us 675 :UBC 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
676 : :
677 : : /* need an expression context to do the projection */
678 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
679 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
680 : :
681 : 0 : resultRelInfo->ri_projectNew =
682 : 0 : ExecBuildProjectionInfo(insertTargetList,
683 : : mtstate->ps.ps_ExprContext,
684 : : resultRelInfo->ri_newTupleSlot,
685 : : &mtstate->ps,
686 : : relDesc);
687 : : }
688 : :
1614 tgl@sss.pgh.pa.us 689 :CBC 40714 : resultRelInfo->ri_projectNewInfoValid = true;
690 : 40714 : }
691 : :
692 : : /*
693 : : * ExecInitUpdateProjection
694 : : * Do one-time initialization of projection data for UPDATE tuples.
695 : : *
696 : : * UPDATE always needs a projection, because (1) there's always some junk
697 : : * attrs, and (2) we may need to merge values of not-updated columns from
698 : : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
699 : : * the subplan contains only new values for the changed columns, plus row
700 : : * identity info in the junk attrs.
701 : : *
702 : : * This is "one-time" for any given result rel, but we might touch more than
703 : : * one result rel in the course of an inherited UPDATE, and each one needs
704 : : * its own projection due to possible column order variation.
705 : : *
706 : : * This is also a convenient place to verify that the output of an UPDATE
707 : : * matches the target table (ExecBuildUpdateProjection does that).
708 : : */
709 : : static void
710 : 6657 : ExecInitUpdateProjection(ModifyTableState *mtstate,
711 : : ResultRelInfo *resultRelInfo)
712 : : {
713 : 6657 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
714 : 6657 : Plan *subplan = outerPlan(node);
715 : 6657 : EState *estate = mtstate->ps.state;
716 : 6657 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
717 : : int whichrel;
718 : : List *updateColnos;
719 : :
720 : : /*
721 : : * Usually, mt_lastResultIndex matches the target rel. If it happens not
722 : : * to, we can get the index the hard way with an integer division.
723 : : */
724 : 6657 : whichrel = mtstate->mt_lastResultIndex;
725 [ - + ]: 6657 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
726 : : {
1614 tgl@sss.pgh.pa.us 727 :UBC 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
728 [ # # # # ]: 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
729 : : }
730 : :
211 amitlan@postgresql.o 731 :CBC 6657 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
732 : :
733 : : /*
734 : : * For UPDATE, we use the old tuple to fill up missing values in the tuple
735 : : * produced by the subplan to get the new tuple. We need two slots, both
736 : : * matching the table's desired format.
737 : : */
1614 tgl@sss.pgh.pa.us 738 : 6657 : resultRelInfo->ri_oldTupleSlot =
739 : 6657 : table_slot_create(resultRelInfo->ri_RelationDesc,
740 : : &estate->es_tupleTable);
741 : 6657 : resultRelInfo->ri_newTupleSlot =
742 : 6657 : table_slot_create(resultRelInfo->ri_RelationDesc,
743 : : &estate->es_tupleTable);
744 : :
745 : : /* need an expression context to do the projection */
746 [ + + ]: 6657 : if (mtstate->ps.ps_ExprContext == NULL)
747 : 5963 : ExecAssignExprContext(estate, &mtstate->ps);
748 : :
749 : 6657 : resultRelInfo->ri_projectNew =
750 : 6657 : ExecBuildUpdateProjection(subplan->targetlist,
751 : : false, /* subplan did the evaluation */
752 : : updateColnos,
753 : : relDesc,
754 : : mtstate->ps.ps_ExprContext,
755 : : resultRelInfo->ri_newTupleSlot,
756 : : &mtstate->ps);
757 : :
758 : 6657 : resultRelInfo->ri_projectNewInfoValid = true;
759 : 6657 : }
760 : :
761 : : /*
762 : : * ExecGetInsertNewTuple
763 : : * This prepares a "new" tuple ready to be inserted into given result
764 : : * relation, by removing any junk columns of the plan's output tuple
765 : : * and (if necessary) coercing the tuple to the right tuple format.
766 : : */
767 : : static TupleTableSlot *
1620 768 : 6146424 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
769 : : TupleTableSlot *planSlot)
770 : : {
771 : 6146424 : ProjectionInfo *newProj = relinfo->ri_projectNew;
772 : : ExprContext *econtext;
773 : :
774 : : /*
775 : : * If there's no projection to be done, just make sure the slot is of the
776 : : * right type for the target rel. If the planSlot is the right type we
777 : : * can use it as-is, else copy the data into ri_newTupleSlot.
778 : : */
779 [ + - ]: 6146424 : if (newProj == NULL)
780 : : {
781 [ + + ]: 6146424 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
782 : : {
783 : 5752546 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
784 : 5752546 : return relinfo->ri_newTupleSlot;
785 : : }
786 : : else
787 : 393878 : return planSlot;
788 : : }
789 : :
790 : : /*
791 : : * Else project; since the projection output slot is ri_newTupleSlot, this
792 : : * will also fix any slot-type problem.
793 : : *
794 : : * Note: currently, this is dead code, because INSERT cases don't receive
795 : : * any junk columns so there's never a projection to be done.
796 : : */
1620 tgl@sss.pgh.pa.us 797 :UBC 0 : econtext = newProj->pi_exprContext;
798 : 0 : econtext->ecxt_outertuple = planSlot;
799 : 0 : return ExecProject(newProj);
800 : : }
801 : :
802 : : /*
803 : : * ExecGetUpdateNewTuple
804 : : * This prepares a "new" tuple by combining an UPDATE subplan's output
805 : : * tuple (which contains values of changed columns) with unchanged
806 : : * columns taken from the old tuple.
807 : : *
808 : : * The subplan tuple might also contain junk columns, which are ignored.
809 : : * Note that the projection also ensures we have a slot of the right type.
810 : : */
811 : : TupleTableSlot *
1620 tgl@sss.pgh.pa.us 812 :CBC 158834 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
813 : : TupleTableSlot *planSlot,
814 : : TupleTableSlot *oldSlot)
815 : : {
908 dean.a.rasheed@gmail 816 : 158834 : ProjectionInfo *newProj = relinfo->ri_projectNew;
817 : : ExprContext *econtext;
818 : :
819 : : /* Use a few extra Asserts to protect against outside callers */
1614 tgl@sss.pgh.pa.us 820 [ - + ]: 158834 : Assert(relinfo->ri_projectNewInfoValid);
1620 821 [ + - - + ]: 158834 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
822 [ + - - + ]: 158834 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
823 : :
824 : 158834 : econtext = newProj->pi_exprContext;
825 : 158834 : econtext->ecxt_outertuple = planSlot;
826 : 158834 : econtext->ecxt_scantuple = oldSlot;
827 : 158834 : return ExecProject(newProj);
828 : : }
829 : :
830 : : /* ----------------------------------------------------------------
831 : : * ExecInsert
832 : : *
833 : : * For INSERT, we have to insert the tuple into the target relation
834 : : * (or partition thereof) and insert appropriate tuples into the index
835 : : * relations.
836 : : *
837 : : * slot contains the new tuple value to be stored.
838 : : *
839 : : * Returns RETURNING result if any, otherwise NULL.
840 : : * *inserted_tuple is the tuple that's effectively inserted;
841 : : * *insert_destrel is the relation where it was inserted.
842 : : * These are only set on success.
843 : : *
844 : : * This may change the currently active tuple conversion map in
845 : : * mtstate->mt_transition_capture, so the callers must take care to
846 : : * save the previous value to avoid losing track of it.
847 : : * ----------------------------------------------------------------
848 : : */
849 : : static TupleTableSlot *
1269 alvherre@alvh.no-ip. 850 : 6147817 : ExecInsert(ModifyTableContext *context,
851 : : ResultRelInfo *resultRelInfo,
852 : : TupleTableSlot *slot,
853 : : bool canSetTag,
854 : : TupleTableSlot **inserted_tuple,
855 : : ResultRelInfo **insert_destrel)
856 : : {
857 : 6147817 : ModifyTableState *mtstate = context->mtstate;
858 : 6147817 : EState *estate = context->estate;
859 : : Relation resultRelationDesc;
5810 tgl@sss.pgh.pa.us 860 : 6147817 : List *recheckIndexes = NIL;
1269 alvherre@alvh.no-ip. 861 : 6147817 : TupleTableSlot *planSlot = context->planSlot;
3071 rhaas@postgresql.org 862 : 6147817 : TupleTableSlot *result = NULL;
863 : : TransitionCaptureState *ar_insert_trig_tcs;
2728 alvherre@alvh.no-ip. 864 : 6147817 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
865 : 6147817 : OnConflictAction onconflict = node->onConflictAction;
1788 heikki.linnakangas@i 866 : 6147817 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
867 : : MemoryContext oldContext;
868 : :
869 : : /*
870 : : * If the input result relation is a partitioned table, find the leaf
871 : : * partition to insert the tuple into.
872 : : */
873 [ + + ]: 6147817 : if (proute)
874 : : {
875 : : ResultRelInfo *partRelInfo;
876 : :
877 : 369498 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
878 : : resultRelInfo, slot,
879 : : &partRelInfo);
880 : 369387 : resultRelInfo = partRelInfo;
881 : : }
882 : :
883 : 6147706 : ExecMaterializeSlot(slot);
884 : :
5810 tgl@sss.pgh.pa.us 885 : 6147706 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
886 : :
887 : : /*
888 : : * Open the table's indexes, if we have not done so already, so that we
889 : : * can add new index entries for the inserted tuple.
890 : : */
1614 891 [ + + ]: 6147706 : if (resultRelationDesc->rd_rel->relhasindex &&
892 [ + + ]: 1459574 : resultRelInfo->ri_IndexRelationDescs == NULL)
893 : 14604 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
894 : :
895 : : /*
896 : : * BEFORE ROW INSERT Triggers.
897 : : *
898 : : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
899 : : * INSERT ... ON CONFLICT statement. We cannot check for constraint
900 : : * violations before firing these triggers, because they can change the
901 : : * values to insert. Also, they can run arbitrary user-defined code with
902 : : * side-effects that we can't cancel by just not inserting the tuple.
903 : : */
5810 904 [ + + ]: 6147706 : if (resultRelInfo->ri_TrigDesc &&
5445 905 [ + + ]: 37590 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
906 : : {
907 : : /* Flush any pending inserts, so rows are visible to the triggers */
1016 efujita@postgresql.o 908 [ + + ]: 1031 : if (estate->es_insert_pending_result_relations != NIL)
909 : 3 : ExecPendingInserts(estate);
910 : :
2384 andres@anarazel.de 911 [ + + ]: 1031 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
912 : 100 : return NULL; /* "do nothing" */
913 : : }
914 : :
915 : : /* INSTEAD OF ROW INSERT Triggers */
5445 tgl@sss.pgh.pa.us 916 [ + + ]: 6147557 : if (resultRelInfo->ri_TrigDesc &&
917 [ + + ]: 37441 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
918 : : {
2384 andres@anarazel.de 919 [ + + ]: 84 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
920 : 3 : return NULL; /* "do nothing" */
921 : : }
4563 tgl@sss.pgh.pa.us 922 [ + + ]: 6147473 : else if (resultRelInfo->ri_FdwRoutine)
923 : : {
924 : : /*
925 : : * GENERATED expressions might reference the tableoid column, so
926 : : * (re-)initialize tts_tableOid before evaluating them.
927 : : */
1569 928 : 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
929 : :
930 : : /*
931 : : * Compute stored generated columns
932 : : */
2352 peter@eisentraut.org 933 [ + + ]: 1010 : if (resultRelationDesc->rd_att->constr &&
934 [ + + ]: 183 : resultRelationDesc->rd_att->constr->has_generated_stored)
1788 heikki.linnakangas@i 935 : 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
936 : : CMD_INSERT);
937 : :
938 : : /*
939 : : * If the FDW supports batching, and batching is requested, accumulate
940 : : * rows and insert them in batches. Otherwise use the per-row inserts.
941 : : */
1690 tomas.vondra@postgre 942 [ + + ]: 1010 : if (resultRelInfo->ri_BatchSize > 1)
943 : : {
1016 efujita@postgresql.o 944 : 145 : bool flushed = false;
945 : :
946 : : /*
947 : : * When we've reached the desired batch size, perform the
948 : : * insertion.
949 : : */
1690 tomas.vondra@postgre 950 [ + + ]: 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
951 : : {
952 : 10 : ExecBatchInsert(mtstate, resultRelInfo,
953 : : resultRelInfo->ri_Slots,
954 : : resultRelInfo->ri_PlanSlots,
955 : : resultRelInfo->ri_NumSlots,
956 : : estate, canSetTag);
1016 efujita@postgresql.o 957 : 10 : flushed = true;
958 : : }
959 : :
1690 tomas.vondra@postgre 960 : 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
961 : :
962 [ + + ]: 145 : if (resultRelInfo->ri_Slots == NULL)
963 : : {
964 : 30 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
1578 tgl@sss.pgh.pa.us 965 : 15 : resultRelInfo->ri_BatchSize);
1690 tomas.vondra@postgre 966 : 15 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
1578 tgl@sss.pgh.pa.us 967 : 15 : resultRelInfo->ri_BatchSize);
968 : : }
969 : :
970 : : /*
971 : : * Initialize the batch slots. We don't know how many slots will
972 : : * be needed, so we initialize them as the batch grows, and we
973 : : * keep them across batches. To mitigate an inefficiency in how
974 : : * resource owner handles objects with many references (as with
975 : : * many slots all referencing the same tuple descriptor) we copy
976 : : * the appropriate tuple descriptor for each slot.
977 : : */
1548 tomas.vondra@postgre 978 [ + + ]: 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
979 : : {
1531 andrew@dunslane.net 980 : 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
981 : : TupleDesc plan_tdesc =
841 tgl@sss.pgh.pa.us 982 : 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
983 : :
1548 tomas.vondra@postgre 984 : 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
985 : 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
986 : :
987 : 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
1486 988 : 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
989 : :
990 : : /* remember how many batch slots we initialized */
1548 991 : 72 : resultRelInfo->ri_NumSlotsInitialized++;
992 : : }
993 : :
1543 994 : 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
995 : : slot);
996 : :
997 : 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
998 : : planSlot);
999 : :
1000 : : /*
1001 : : * If these are the first tuples stored in the buffers, add the
1002 : : * target rel and the mtstate to the
1003 : : * es_insert_pending_result_relations and
1004 : : * es_insert_pending_modifytables lists respectively, except in
1005 : : * the case where flushing was done above, in which case they
1006 : : * would already have been added to the lists, so no need to do
1007 : : * this.
1008 : : */
1016 efujita@postgresql.o 1009 [ + + + + ]: 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1010 : : {
1011 [ - + ]: 19 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1012 : : resultRelInfo));
1013 : 19 : estate->es_insert_pending_result_relations =
1014 : 19 : lappend(estate->es_insert_pending_result_relations,
1015 : : resultRelInfo);
1003 1016 : 19 : estate->es_insert_pending_modifytables =
1017 : 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1018 : : }
1016 1019 [ - + ]: 145 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1020 : : resultRelInfo));
1021 : :
1690 tomas.vondra@postgre 1022 : 145 : resultRelInfo->ri_NumSlots++;
1023 : :
1024 : 145 : MemoryContextSwitchTo(oldContext);
1025 : :
1026 : 145 : return NULL;
1027 : : }
1028 : :
1029 : : /*
1030 : : * insert into foreign table: let the FDW do it
1031 : : */
4563 tgl@sss.pgh.pa.us 1032 : 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1033 : : resultRelInfo,
1034 : : slot,
1035 : : planSlot);
1036 : :
1037 [ + + ]: 862 : if (slot == NULL) /* "do nothing" */
1038 : 2 : return NULL;
1039 : :
1040 : : /*
1041 : : * AFTER ROW Triggers or RETURNING expressions might reference the
1042 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1043 : : * them. (This covers the case where the FDW replaced the slot.)
1044 : : */
2384 andres@anarazel.de 1045 : 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1046 : : }
1047 : : else
1048 : : {
1049 : : WCOKind wco_kind;
1050 : :
1051 : : /*
1052 : : * Constraints and GENERATED expressions might reference the tableoid
1053 : : * column, so (re-)initialize tts_tableOid before evaluating them.
1054 : : */
1055 : 6146463 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1056 : :
1057 : : /*
1058 : : * Compute stored generated columns
1059 : : */
2352 peter@eisentraut.org 1060 [ + + ]: 6146463 : if (resultRelationDesc->rd_att->constr &&
1061 [ + + ]: 1870367 : resultRelationDesc->rd_att->constr->has_generated_stored)
1788 heikki.linnakangas@i 1062 : 529 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1063 : : CMD_INSERT);
1064 : :
1065 : : /*
1066 : : * Check any RLS WITH CHECK policies.
1067 : : *
1068 : : * Normally we should check INSERT policies. But if the insert is the
1069 : : * result of a partition key update that moved the tuple to a new
1070 : : * partition, we should instead check UPDATE policies, because we are
1071 : : * executing policies defined on the target table, and not those
1072 : : * defined on the child partitions.
1073 : : *
1074 : : * If we're running MERGE, we refer to the action that we're executing
1075 : : * to know if we're doing an INSERT or UPDATE to a partition table.
1076 : : */
1258 alvherre@alvh.no-ip. 1077 [ + + ]: 6146448 : if (mtstate->operation == CMD_UPDATE)
1078 : 393 : wco_kind = WCO_RLS_UPDATE_CHECK;
1079 [ + + ]: 6146055 : else if (mtstate->operation == CMD_MERGE)
538 dean.a.rasheed@gmail 1080 : 880 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1258 alvherre@alvh.no-ip. 1081 [ + + ]: 880 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1082 : : else
1083 : 6145175 : wco_kind = WCO_RLS_INSERT_CHECK;
1084 : :
1085 : : /*
1086 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1087 : : * we are looking for at this point.
1088 : : */
3788 sfrost@snowman.net 1089 [ + + ]: 6146448 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2787 rhaas@postgresql.org 1090 : 300 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1091 : :
1092 : : /*
1093 : : * Check the constraints of the tuple.
1094 : : */
2644 alvherre@alvh.no-ip. 1095 [ + + ]: 6146352 : if (resultRelationDesc->rd_att->constr)
1096 : 1870301 : ExecConstraints(resultRelInfo, slot, estate);
1097 : :
1098 : : /*
1099 : : * Also check the tuple against the partition constraint, if there is
1100 : : * one; except that if we got here via tuple-routing, we don't need to
1101 : : * if there's no BR trigger defined on the partition.
1102 : : */
1816 tgl@sss.pgh.pa.us 1103 [ + + ]: 6145999 : if (resultRelationDesc->rd_rel->relispartition &&
1671 heikki.linnakangas@i 1104 [ + + ]: 370499 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
2644 alvherre@alvh.no-ip. 1105 [ + + ]: 369090 : (resultRelInfo->ri_TrigDesc &&
1106 [ + + ]: 787 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1107 : 1507 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1108 : :
3774 andres@anarazel.de 1109 [ + + + - ]: 6145915 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1110 : 2062 : {
1111 : : /* Perform a speculative insertion. */
1112 : : uint32 specToken;
1113 : : ItemPointerData conflictTid;
1114 : : ItemPointerData invalidItemPtr;
1115 : : bool specConflict;
1116 : : List *arbiterIndexes;
1117 : :
382 akapila@postgresql.o 1118 : 4776 : ItemPointerSetInvalid(&invalidItemPtr);
2721 alvherre@alvh.no-ip. 1119 : 4776 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1120 : :
1121 : : /*
1122 : : * Do a non-conclusive check for conflicts first.
1123 : : *
1124 : : * We're not holding any locks yet, so this doesn't guarantee that
1125 : : * the later insert won't conflict. But it avoids leaving behind
1126 : : * a lot of canceled speculative insertions, if you run a lot of
1127 : : * INSERT ON CONFLICT statements that do conflict.
1128 : : *
1129 : : * We loop back here if we find a conflict below, either during
1130 : : * the pre-check, or when we re-check after inserting the tuple
1131 : : * speculatively. Better allow interrupts in case some bug makes
1132 : : * this an infinite loop.
1133 : : */
3774 andres@anarazel.de 1134 : 5 : vlock:
1129 tgl@sss.pgh.pa.us 1135 [ - + ]: 4781 : CHECK_FOR_INTERRUPTS();
3774 andres@anarazel.de 1136 : 4781 : specConflict = false;
1788 heikki.linnakangas@i 1137 [ + + ]: 4781 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1138 : : &conflictTid, &invalidItemPtr,
1139 : : arbiterIndexes))
1140 : : {
1141 : : /* committed conflict tuple found */
3774 andres@anarazel.de 1142 [ + + ]: 2708 : if (onconflict == ONCONFLICT_UPDATE)
1143 : : {
1144 : : /*
1145 : : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1146 : : * part. Be prepared to retry if the UPDATE fails because
1147 : : * of another concurrent UPDATE/DELETE to the conflict
1148 : : * tuple.
1149 : : */
1150 : 2602 : TupleTableSlot *returning = NULL;
1151 : :
1269 alvherre@alvh.no-ip. 1152 [ + - ]: 2602 : if (ExecOnConflictUpdate(context, resultRelInfo,
1153 : : &conflictTid, slot, canSetTag,
1154 : : &returning))
1155 : : {
2706 1156 [ - + ]: 2563 : InstrCountTuples2(&mtstate->ps, 1);
3774 andres@anarazel.de 1157 : 2563 : return returning;
1158 : : }
1159 : : else
3774 andres@anarazel.de 1160 :UBC 0 : goto vlock;
1161 : : }
1162 : : else
1163 : : {
1164 : : /*
1165 : : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1166 : : * verify that the tuple is visible to the executor's MVCC
1167 : : * snapshot at higher isolation levels.
1168 : : *
1169 : : * Using ExecGetReturningSlot() to store the tuple for the
1170 : : * recheck isn't that pretty, but we can't trivially use
1171 : : * the input slot, because it might not be of a compatible
1172 : : * type. As there's no conflicting usage of
1173 : : * ExecGetReturningSlot() in the DO NOTHING case...
1174 : : */
3774 andres@anarazel.de 1175 [ - + ]:CBC 106 : Assert(onconflict == ONCONFLICT_NOTHING);
2359 1176 : 106 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1177 : : ExecGetReturningSlot(estate, resultRelInfo));
2706 alvherre@alvh.no-ip. 1178 [ - + ]: 96 : InstrCountTuples2(&mtstate->ps, 1);
3774 andres@anarazel.de 1179 : 96 : return NULL;
1180 : : }
1181 : : }
1182 : :
1183 : : /*
1184 : : * Before we start insertion proper, acquire our "speculative
1185 : : * insertion lock". Others can use that to wait for us to decide
1186 : : * if we're going to go ahead with the insertion, instead of
1187 : : * waiting for the whole transaction to complete.
1188 : : */
1189 : 2070 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1190 : :
1191 : : /* insert the tuple, with the speculative token */
2298 1192 : 2070 : table_tuple_insert_speculative(resultRelationDesc, slot,
1193 : : estate->es_output_cid,
1194 : : 0,
1195 : : NULL,
1196 : : specToken);
1197 : :
1198 : : /* insert index entries for tuple */
1788 heikki.linnakangas@i 1199 : 2070 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1200 : : slot, estate, false, true,
1201 : : &specConflict,
1202 : : arbiterIndexes,
1203 : : false);
1204 : :
1205 : : /* adjust the tuple's state accordingly */
2298 andres@anarazel.de 1206 : 2067 : table_tuple_complete_speculative(resultRelationDesc, slot,
1207 : 2067 : specToken, !specConflict);
1208 : :
1209 : : /*
1210 : : * Wake up anyone waiting for our decision. They will re-check
1211 : : * the tuple, see that it's no longer speculative, and wait on our
1212 : : * XID as if this was a regularly inserted tuple all along. Or if
1213 : : * we killed the tuple, they will see it's dead, and proceed as if
1214 : : * the tuple never existed.
1215 : : */
3774 1216 : 2067 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1217 : :
1218 : : /*
1219 : : * If there was a conflict, start from the beginning. We'll do
1220 : : * the pre-check again, which will now find the conflicting tuple
1221 : : * (unless it aborts before we get there).
1222 : : */
1223 [ + + ]: 2067 : if (specConflict)
1224 : : {
1225 : 5 : list_free(recheckIndexes);
1226 : 5 : goto vlock;
1227 : : }
1228 : :
1229 : : /* Since there was no insertion conflict, we're done */
1230 : : }
1231 : : else
1232 : : {
1233 : : /* insert the tuple normally */
513 akorotkov@postgresql 1234 : 6141139 : table_tuple_insert(resultRelationDesc, slot,
1235 : : estate->es_output_cid,
1236 : : 0, NULL);
1237 : :
1238 : : /* insert index entries for tuple */
1239 [ + + ]: 6141127 : if (resultRelInfo->ri_NumIndices > 0)
1788 heikki.linnakangas@i 1240 : 1454514 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1241 : : slot, estate, false,
1242 : : false, NULL, NIL,
1243 : : false);
1244 : : }
1245 : : }
1246 : :
5307 tgl@sss.pgh.pa.us 1247 [ + + ]: 6143841 : if (canSetTag)
1248 : 6143252 : (estate->es_processed)++;
1249 : :
1250 : : /*
1251 : : * If this insert is the result of a partition key update that moved the
1252 : : * tuple to a new partition, put this row into the transition NEW TABLE,
1253 : : * if there is one. We need to do this separately for DELETE and INSERT
1254 : : * because they happen on different tables.
1255 : : */
2787 rhaas@postgresql.org 1256 : 6143841 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1257 [ + + + + ]: 6143841 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1258 [ + + ]: 27 : && mtstate->mt_transition_capture->tcs_update_new_table)
1259 : : {
1266 alvherre@alvh.no-ip. 1260 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1261 : : NULL, NULL,
1262 : : NULL,
1263 : : NULL,
1264 : : slot,
1265 : : NULL,
1266 : 24 : mtstate->mt_transition_capture,
1267 : : false);
1268 : :
1269 : : /*
1270 : : * We've already captured the NEW TABLE row, so make sure any AR
1271 : : * INSERT trigger fired below doesn't capture it again.
1272 : : */
2787 rhaas@postgresql.org 1273 : 24 : ar_insert_trig_tcs = NULL;
1274 : : }
1275 : :
1276 : : /* AFTER ROW INSERT Triggers */
2384 andres@anarazel.de 1277 : 6143841 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1278 : : ar_insert_trig_tcs);
1279 : :
5697 tgl@sss.pgh.pa.us 1280 : 6143840 : list_free(recheckIndexes);
1281 : :
1282 : : /*
1283 : : * Check any WITH CHECK OPTION constraints from parent views. We are
1284 : : * required to do this after testing all constraints and uniqueness
1285 : : * violations per the SQL spec, so we do it after actually inserting the
1286 : : * record into the heap and all indexes.
1287 : : *
1288 : : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1289 : : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1290 : : *
1291 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1292 : : * are looking for at this point.
1293 : : */
4433 sfrost@snowman.net 1294 [ + + ]: 6143840 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3788 1295 : 197 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1296 : :
1297 : : /* Process RETURNING if present */
5810 tgl@sss.pgh.pa.us 1298 [ + + ]: 6143767 : if (resultRelInfo->ri_projectReturning)
1299 : : {
233 dean.a.rasheed@gmail 1300 : 1801 : TupleTableSlot *oldSlot = NULL;
1301 : :
1302 : : /*
1303 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1304 : : * refers to any OLD columns, ExecDelete() will have saved the tuple
1305 : : * deleted from the original partition, which we must use here to
1306 : : * compute the OLD column values. Otherwise, all OLD column values
1307 : : * will be NULL.
1308 : : */
1309 [ + + ]: 1801 : if (context->cpDeletedSlot)
1310 : : {
1311 : : TupleConversionMap *tupconv_map;
1312 : :
1313 : : /*
1314 : : * Convert the OLD tuple to the new partition's format/slot, if
1315 : : * needed. Note that ExecDelete() already converted it to the
1316 : : * root's partition's format/slot.
1317 : : */
1318 : 22 : oldSlot = context->cpDeletedSlot;
1319 : 22 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1320 [ + + ]: 22 : if (tupconv_map != NULL)
1321 : : {
1322 : 7 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1323 : : oldSlot,
1324 : : ExecGetReturningSlot(estate,
1325 : : resultRelInfo));
1326 : :
1327 : 7 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1328 : 7 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1329 : : }
1330 : : }
1331 : :
1332 : 1801 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1333 : : oldSlot, slot, planSlot);
1334 : :
1335 : : /*
1336 : : * For a cross-partition UPDATE, release the old tuple, first making
1337 : : * sure that the result slot has a local copy of any pass-by-reference
1338 : : * values.
1339 : : */
1340 [ + + ]: 1795 : if (context->cpDeletedSlot)
1341 : : {
1342 : 22 : ExecMaterializeSlot(result);
1343 : 22 : ExecClearTuple(oldSlot);
1344 [ + + ]: 22 : if (context->cpDeletedSlot != oldSlot)
1345 : 7 : ExecClearTuple(context->cpDeletedSlot);
1346 : 22 : context->cpDeletedSlot = NULL;
1347 : : }
1348 : : }
1349 : :
1266 alvherre@alvh.no-ip. 1350 [ + + ]: 6143761 : if (inserted_tuple)
1351 : 406 : *inserted_tuple = slot;
1352 [ + + ]: 6143761 : if (insert_destrel)
1353 : 406 : *insert_destrel = resultRelInfo;
1354 : :
3152 rhaas@postgresql.org 1355 : 6143761 : return result;
1356 : : }
1357 : :
1358 : : /* ----------------------------------------------------------------
1359 : : * ExecBatchInsert
1360 : : *
1361 : : * Insert multiple tuples in an efficient way.
1362 : : * Currently, this handles inserting into a foreign table without
1363 : : * RETURNING clause.
1364 : : * ----------------------------------------------------------------
1365 : : */
1366 : : static void
1690 tomas.vondra@postgre 1367 : 29 : ExecBatchInsert(ModifyTableState *mtstate,
1368 : : ResultRelInfo *resultRelInfo,
1369 : : TupleTableSlot **slots,
1370 : : TupleTableSlot **planSlots,
1371 : : int numSlots,
1372 : : EState *estate,
1373 : : bool canSetTag)
1374 : : {
1375 : : int i;
1376 : 29 : int numInserted = numSlots;
1377 : 29 : TupleTableSlot *slot = NULL;
1378 : : TupleTableSlot **rslots;
1379 : :
1380 : : /*
1381 : : * insert into foreign table: let the FDW do it
1382 : : */
1383 : 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1384 : : resultRelInfo,
1385 : : slots,
1386 : : planSlots,
1387 : : &numInserted);
1388 : :
1389 [ + + ]: 173 : for (i = 0; i < numInserted; i++)
1390 : : {
1391 : 145 : slot = rslots[i];
1392 : :
1393 : : /*
1394 : : * AFTER ROW Triggers might reference the tableoid column, so
1395 : : * (re-)initialize tts_tableOid before evaluating them.
1396 : : */
1397 : 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1398 : :
1399 : : /* AFTER ROW INSERT Triggers */
1400 : 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1401 : 145 : mtstate->mt_transition_capture);
1402 : :
1403 : : /*
1404 : : * Check any WITH CHECK OPTION constraints from parent views. See the
1405 : : * comment in ExecInsert.
1406 : : */
1407 [ - + ]: 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1690 tomas.vondra@postgre 1408 :UBC 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1409 : : }
1410 : :
1690 tomas.vondra@postgre 1411 [ + - + - ]:CBC 28 : if (canSetTag && numInserted > 0)
1412 : 28 : estate->es_processed += numInserted;
1413 : :
1414 : : /* Clean up all the slots, ready for the next batch */
865 michael@paquier.xyz 1415 [ + + ]: 172 : for (i = 0; i < numSlots; i++)
1416 : : {
1417 : 144 : ExecClearTuple(slots[i]);
1418 : 144 : ExecClearTuple(planSlots[i]);
1419 : : }
1420 : 28 : resultRelInfo->ri_NumSlots = 0;
1690 tomas.vondra@postgre 1421 : 28 : }
1422 : :
1423 : : /*
1424 : : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1425 : : */
1426 : : static void
1016 efujita@postgresql.o 1427 : 18 : ExecPendingInserts(EState *estate)
1428 : : {
1429 : : ListCell *l1,
1430 : : *l2;
1431 : :
1003 1432 [ + - + + : 36 : forboth(l1, estate->es_insert_pending_result_relations,
+ - + + +
+ + - +
+ ]
1433 : : l2, estate->es_insert_pending_modifytables)
1434 : : {
1435 : 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1436 : 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1437 : :
1016 1438 [ - + ]: 19 : Assert(mtstate);
1439 : 19 : ExecBatchInsert(mtstate, resultRelInfo,
1440 : : resultRelInfo->ri_Slots,
1441 : : resultRelInfo->ri_PlanSlots,
1442 : : resultRelInfo->ri_NumSlots,
1443 : 19 : estate, mtstate->canSetTag);
1444 : : }
1445 : :
1446 : 17 : list_free(estate->es_insert_pending_result_relations);
1003 1447 : 17 : list_free(estate->es_insert_pending_modifytables);
1016 1448 : 17 : estate->es_insert_pending_result_relations = NIL;
1003 1449 : 17 : estate->es_insert_pending_modifytables = NIL;
1016 1450 : 17 : }
1451 : :
1452 : : /*
1453 : : * ExecDeletePrologue -- subroutine for ExecDelete
1454 : : *
1455 : : * Prepare executor state for DELETE. Actually, the only thing we have to do
1456 : : * here is execute BEFORE ROW triggers. We return false if one of them makes
1457 : : * the delete a no-op; otherwise, return true.
1458 : : */
1459 : : static bool
1269 alvherre@alvh.no-ip. 1460 : 769212 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1461 : : ItemPointer tupleid, HeapTuple oldtuple,
1462 : : TupleTableSlot **epqreturnslot, TM_Result *result)
1463 : : {
908 dean.a.rasheed@gmail 1464 [ + + ]: 769212 : if (result)
1465 : 783 : *result = TM_Ok;
1466 : :
1467 : : /* BEFORE ROW DELETE triggers */
1269 alvherre@alvh.no-ip. 1468 [ + + ]: 769212 : if (resultRelInfo->ri_TrigDesc &&
1469 [ + + ]: 3501 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1470 : : {
1471 : : /* Flush any pending inserts, so rows are visible to the triggers */
1016 efujita@postgresql.o 1472 [ + + ]: 173 : if (context->estate->es_insert_pending_result_relations != NIL)
1473 : 1 : ExecPendingInserts(context->estate);
1474 : :
1269 alvherre@alvh.no-ip. 1475 : 165 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1476 : : resultRelInfo, tupleid, oldtuple,
1477 : : epqreturnslot, result, &context->tmfd,
50 dean.a.rasheed@gmail 1478 : 173 : context->mtstate->operation == CMD_MERGE);
1479 : : }
1480 : :
1269 alvherre@alvh.no-ip. 1481 : 769039 : return true;
1482 : : }
1483 : :
1484 : : /*
1485 : : * ExecDeleteAct -- subroutine for ExecDelete
1486 : : *
1487 : : * Actually delete the tuple from a plain table.
1488 : : *
1489 : : * Caller is in charge of doing EvalPlanQual as necessary
1490 : : */
1491 : : static TM_Result
1492 : 769121 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1493 : : ItemPointer tupleid, bool changingPart)
1494 : : {
1495 : 769121 : EState *estate = context->estate;
1496 : :
1497 : 769121 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1498 : : estate->es_output_cid,
1499 : : estate->es_snapshot,
1500 : : estate->es_crosscheck_snapshot,
1501 : : true /* wait for commit */ ,
1502 : : &context->tmfd,
1503 : : changingPart);
1504 : : }
1505 : :
1506 : : /*
1507 : : * ExecDeleteEpilogue -- subroutine for ExecDelete
1508 : : *
1509 : : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1510 : : * including the UPDATE triggers if the deletion is being done as part of a
1511 : : * cross-partition tuple move.
1512 : : */
1513 : : static void
1514 : 769094 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1515 : : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1516 : : {
1517 : 769094 : ModifyTableState *mtstate = context->mtstate;
1518 : 769094 : EState *estate = context->estate;
1519 : : TransitionCaptureState *ar_delete_trig_tcs;
1520 : :
1521 : : /*
1522 : : * If this delete is the result of a partition key update that moved the
1523 : : * tuple to a new partition, put this row into the transition OLD TABLE,
1524 : : * if there is one. We need to do this separately for DELETE and INSERT
1525 : : * because they happen on different tables.
1526 : : */
1527 : 769094 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1528 [ + + + + ]: 769094 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1529 [ + + ]: 27 : mtstate->mt_transition_capture->tcs_update_old_table)
1530 : : {
1266 1531 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1532 : : NULL, NULL,
1533 : : tupleid, oldtuple,
513 akorotkov@postgresql 1534 : 24 : NULL, NULL, mtstate->mt_transition_capture,
1535 : : false);
1536 : :
1537 : : /*
1538 : : * We've already captured the OLD TABLE row, so make sure any AR
1539 : : * DELETE trigger fired below doesn't capture it again.
1540 : : */
1269 alvherre@alvh.no-ip. 1541 : 24 : ar_delete_trig_tcs = NULL;
1542 : : }
1543 : :
1544 : : /* AFTER ROW DELETE Triggers */
513 akorotkov@postgresql 1545 : 769094 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1546 : : ar_delete_trig_tcs, changingPart);
1269 alvherre@alvh.no-ip. 1547 : 769092 : }
1548 : :
1549 : : /* ----------------------------------------------------------------
1550 : : * ExecDelete
1551 : : *
1552 : : * DELETE is like UPDATE, except that we delete the tuple and no
1553 : : * index modifications are needed.
1554 : : *
1555 : : * When deleting from a table, tupleid identifies the tuple to delete and
1556 : : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1557 : : * oldtuple is passed to the triggers and identifies what to delete, and
1558 : : * tupleid is invalid. When deleting from a foreign table, tupleid is
1559 : : * invalid; the FDW has to figure out which row to delete using data from
1560 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
1561 : : * NULL when the foreign table has no relevant triggers. We use
1562 : : * tupleDeleted to indicate whether the tuple is actually deleted,
1563 : : * callers can use it to decide whether to continue the operation. When
1564 : : * this DELETE is a part of an UPDATE of partition-key, then the slot
1565 : : * returned by EvalPlanQual() is passed back using output parameter
1566 : : * epqreturnslot.
1567 : : *
1568 : : * Returns RETURNING result if any, otherwise NULL.
1569 : : * ----------------------------------------------------------------
1570 : : */
1571 : : static TupleTableSlot *
1572 : 768955 : ExecDelete(ModifyTableContext *context,
1573 : : ResultRelInfo *resultRelInfo,
1574 : : ItemPointer tupleid,
1575 : : HeapTuple oldtuple,
1576 : : bool processReturning,
1577 : : bool changingPart,
1578 : : bool canSetTag,
1579 : : TM_Result *tmresult,
1580 : : bool *tupleDeleted,
1581 : : TupleTableSlot **epqreturnslot)
1582 : : {
1583 : 768955 : EState *estate = context->estate;
1788 heikki.linnakangas@i 1584 : 768955 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
4563 tgl@sss.pgh.pa.us 1585 : 768955 : TupleTableSlot *slot = NULL;
1586 : : TM_Result result;
1587 : : bool saveOld;
1588 : :
2787 rhaas@postgresql.org 1589 [ + + ]: 768955 : if (tupleDeleted)
1590 : 526 : *tupleDeleted = false;
1591 : :
1592 : : /*
1593 : : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1594 : : * done if it says we are.
1595 : : */
1269 alvherre@alvh.no-ip. 1596 [ + + ]: 768955 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1597 : : epqreturnslot, tmresult))
1598 : 26 : return NULL;
1599 : :
1600 : : /* INSTEAD OF ROW DELETE Triggers */
5445 tgl@sss.pgh.pa.us 1601 [ + + ]: 768921 : if (resultRelInfo->ri_TrigDesc &&
1602 [ + + ]: 3439 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
5810 1603 : 24 : {
1604 : : bool dodelete;
1605 : :
5445 1606 [ - + ]: 27 : Assert(oldtuple != NULL);
4185 noah@leadboat.com 1607 : 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1608 : :
5445 tgl@sss.pgh.pa.us 1609 [ + + ]: 27 : if (!dodelete) /* "do nothing" */
5810 1610 : 3 : return NULL;
1611 : : }
4563 1612 [ + + ]: 768894 : else if (resultRelInfo->ri_FdwRoutine)
1613 : : {
1614 : : /*
1615 : : * delete from foreign table: let the FDW do it
1616 : : *
1617 : : * We offer the returning slot as a place to store RETURNING data,
1618 : : * although the FDW can return some other slot if it wants.
1619 : : */
2384 andres@anarazel.de 1620 : 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4563 tgl@sss.pgh.pa.us 1621 : 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1622 : : resultRelInfo,
1623 : : slot,
1624 : : context->planSlot);
1625 : :
1626 [ - + ]: 23 : if (slot == NULL) /* "do nothing" */
4563 tgl@sss.pgh.pa.us 1627 :UBC 0 : return NULL;
1628 : :
1629 : : /*
1630 : : * RETURNING expressions might reference the tableoid column, so
1631 : : * (re)initialize tts_tableOid before evaluating them.
1632 : : */
2518 andres@anarazel.de 1633 [ + + ]:CBC 23 : if (TTS_EMPTY(slot))
3502 rhaas@postgresql.org 1634 : 5 : ExecStoreAllNullTuple(slot);
1635 : :
2384 andres@anarazel.de 1636 : 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1637 : : }
1638 : : else
1639 : : {
1640 : : /*
1641 : : * delete the tuple
1642 : : *
1643 : : * Note: if context->estate->es_crosscheck_snapshot isn't
1644 : : * InvalidSnapshot, we check that the row to be deleted is visible to
1645 : : * that snapshot, and throw a can't-serialize error if not. This is a
1646 : : * special-case behavior needed for referential integrity updates in
1647 : : * transaction-snapshot mode transactions.
1648 : : */
1062 john.naylor@postgres 1649 : 768871 : ldelete:
513 akorotkov@postgresql 1650 : 768873 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1651 : :
625 dean.a.rasheed@gmail 1652 [ + + ]: 768855 : if (tmresult)
1653 : 509 : *tmresult = result;
1654 : :
5445 tgl@sss.pgh.pa.us 1655 [ + + + + : 768855 : switch (result)
- ]
1656 : : {
2359 andres@anarazel.de 1657 : 15 : case TM_SelfModified:
1658 : :
1659 : : /*
1660 : : * The target tuple was already updated or deleted by the
1661 : : * current command, or by a later command in the current
1662 : : * transaction. The former case is possible in a join DELETE
1663 : : * where multiple tuples join to the same target tuple. This
1664 : : * is somewhat questionable, but Postgres has always allowed
1665 : : * it: we just ignore additional deletion attempts.
1666 : : *
1667 : : * The latter case arises if the tuple is modified by a
1668 : : * command in a BEFORE trigger, or perhaps by a command in a
1669 : : * volatile function used in the query. In such situations we
1670 : : * should not ignore the deletion, but it is equally unsafe to
1671 : : * proceed. We don't want to discard the original DELETE
1672 : : * while keeping the triggered actions based on its deletion;
1673 : : * and it would be no better to allow the original DELETE
1674 : : * while discarding updates that it triggered. The row update
1675 : : * carries some information that might be important according
1676 : : * to business rules; so throwing an error is the only safe
1677 : : * course.
1678 : : *
1679 : : * If a trigger actually intends this type of interaction, it
1680 : : * can re-execute the DELETE and then return NULL to cancel
1681 : : * the outer delete.
1682 : : */
1269 alvherre@alvh.no-ip. 1683 [ + + ]: 15 : if (context->tmfd.cmax != estate->es_output_cid)
4698 kgrittn@postgresql.o 1684 [ + - ]: 3 : ereport(ERROR,
1685 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1686 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1687 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1688 : :
1689 : : /* Else, already deleted by self; nothing to do */
5445 tgl@sss.pgh.pa.us 1690 : 12 : return NULL;
1691 : :
2359 andres@anarazel.de 1692 : 768805 : case TM_Ok:
5445 tgl@sss.pgh.pa.us 1693 : 768805 : break;
1694 : :
2359 andres@anarazel.de 1695 : 32 : case TM_Updated:
1696 : : {
1697 : : TupleTableSlot *inputslot;
1698 : : TupleTableSlot *epqslot;
1699 : :
1700 [ + + ]: 32 : if (IsolationUsesXactSnapshot())
1701 [ + - ]: 1 : ereport(ERROR,
1702 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1703 : : errmsg("could not serialize access due to concurrent update")));
1704 : :
1705 : : /*
1706 : : * Already know that we're going to need to do EPQ, so
1707 : : * fetch tuple directly into the right slot.
1708 : : */
513 akorotkov@postgresql 1709 : 31 : EvalPlanQualBegin(context->epqstate);
1710 : 31 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1711 : : resultRelInfo->ri_RangeTableIndex);
1712 : :
1713 : 31 : result = table_tuple_lock(resultRelationDesc, tupleid,
1714 : : estate->es_snapshot,
1715 : : inputslot, estate->es_output_cid,
1716 : : LockTupleExclusive, LockWaitBlock,
1717 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1718 : : &context->tmfd);
1719 : :
1720 [ + + + - ]: 27 : switch (result)
1721 : : {
1722 : 24 : case TM_Ok:
1723 [ - + ]: 24 : Assert(context->tmfd.traversed);
1724 : 24 : epqslot = EvalPlanQual(context->epqstate,
1725 : : resultRelationDesc,
1726 : : resultRelInfo->ri_RangeTableIndex,
1727 : : inputslot);
1728 [ + - + + ]: 24 : if (TupIsNull(epqslot))
1729 : : /* Tuple not passing quals anymore, exiting... */
1730 : 15 : return NULL;
1731 : :
1732 : : /*
1733 : : * If requested, skip delete and pass back the
1734 : : * updated row.
1735 : : */
1736 [ + + ]: 9 : if (epqreturnslot)
1737 : : {
1738 : 7 : *epqreturnslot = epqslot;
1739 : 7 : return NULL;
1740 : : }
1741 : : else
1742 : 2 : goto ldelete;
1743 : :
1744 : 2 : case TM_SelfModified:
1745 : :
1746 : : /*
1747 : : * This can be reached when following an update
1748 : : * chain from a tuple updated by another session,
1749 : : * reaching a tuple that was already updated in
1750 : : * this transaction. If previously updated by this
1751 : : * command, ignore the delete, otherwise error
1752 : : * out.
1753 : : *
1754 : : * See also TM_SelfModified response to
1755 : : * table_tuple_delete() above.
1756 : : */
1757 [ + + ]: 2 : if (context->tmfd.cmax != estate->es_output_cid)
1758 [ + - ]: 1 : ereport(ERROR,
1759 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1760 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1761 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1762 : 1 : return NULL;
1763 : :
1764 : 1 : case TM_Deleted:
1765 : : /* tuple already deleted; nothing to do */
1766 : 1 : return NULL;
1767 : :
513 akorotkov@postgresql 1768 :UBC 0 : default:
1769 : :
1770 : : /*
1771 : : * TM_Invisible should be impossible because we're
1772 : : * waiting for updated row versions, and would
1773 : : * already have errored out if the first version
1774 : : * is invisible.
1775 : : *
1776 : : * TM_Updated should be impossible, because we're
1777 : : * locking the latest version via
1778 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1779 : : */
1780 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1781 : : result);
1782 : : return NULL;
1783 : : }
1784 : :
1785 : : Assert(false);
1786 : : break;
1787 : : }
1788 : :
2359 andres@anarazel.de 1789 :CBC 3 : case TM_Deleted:
1790 [ - + ]: 3 : if (IsolationUsesXactSnapshot())
2359 andres@anarazel.de 1791 [ # # ]:UBC 0 : ereport(ERROR,
1792 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1793 : : errmsg("could not serialize access due to concurrent delete")));
1794 : : /* tuple already deleted; nothing to do */
5445 tgl@sss.pgh.pa.us 1795 :CBC 3 : return NULL;
1796 : :
5445 tgl@sss.pgh.pa.us 1797 :UBC 0 : default:
2298 andres@anarazel.de 1798 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1799 : : result);
1800 : : return NULL;
1801 : : }
1802 : :
1803 : : /*
1804 : : * Note: Normally one would think that we have to delete index tuples
1805 : : * associated with the heap tuple now...
1806 : : *
1807 : : * ... but in POSTGRES, we have no need to do this because VACUUM will
1808 : : * take care of it later. We can't delete index tuples immediately
1809 : : * anyway, since the tuple is still visible to other transactions.
1810 : : */
1811 : : }
1812 : :
5307 tgl@sss.pgh.pa.us 1813 [ + + ]:CBC 768852 : if (canSetTag)
1814 : 768254 : (estate->es_processed)++;
1815 : :
1816 : : /* Tell caller that the delete actually happened. */
2787 rhaas@postgresql.org 1817 [ + + ]: 768852 : if (tupleDeleted)
1818 : 484 : *tupleDeleted = true;
1819 : :
513 akorotkov@postgresql 1820 : 768852 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1821 : :
1822 : : /*
1823 : : * Process RETURNING if present and if requested.
1824 : : *
1825 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1826 : : * refers to any OLD column values, save the old tuple here for later
1827 : : * processing of the RETURNING list by ExecInsert().
1828 : : */
233 dean.a.rasheed@gmail 1829 [ + + + + ]: 768923 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1830 [ + + ]: 73 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1831 : :
1832 [ + + + + : 768850 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
+ + ]
1833 : : {
1834 : : /*
1835 : : * We have to put the target tuple into a slot, which means first we
1836 : : * gotta fetch it. We can use the trigger tuple slot.
1837 : : */
1838 : : TupleTableSlot *rslot;
1839 : :
4563 tgl@sss.pgh.pa.us 1840 [ + + ]: 501 : if (resultRelInfo->ri_FdwRoutine)
1841 : : {
1842 : : /* FDW must have provided a slot containing the deleted row */
1843 [ + - - + ]: 7 : Assert(!TupIsNull(slot));
1844 : : }
1845 : : else
1846 : : {
2384 andres@anarazel.de 1847 : 494 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4563 tgl@sss.pgh.pa.us 1848 [ + + ]: 494 : if (oldtuple != NULL)
1849 : : {
2332 andres@anarazel.de 1850 : 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1851 : : }
1852 : : else
1853 : : {
513 akorotkov@postgresql 1854 [ - + ]: 482 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1855 : : SnapshotAny, slot))
513 akorotkov@postgresql 1856 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1857 : : }
1858 : : }
1859 : :
1860 : : /*
1861 : : * If required, save the old tuple for later processing of the
1862 : : * RETURNING list by ExecInsert().
1863 : : */
233 dean.a.rasheed@gmail 1864 [ + + ]:CBC 501 : if (saveOld)
1865 : : {
1866 : : TupleConversionMap *tupconv_map;
1867 : :
1868 : : /*
1869 : : * Convert the tuple into the root partition's format/slot, if
1870 : : * needed. ExecInsert() will then convert it to the new
1871 : : * partition's format/slot, if necessary.
1872 : : */
1873 : 22 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1874 [ + + ]: 22 : if (tupconv_map != NULL)
1875 : : {
1876 : 9 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1877 : 9 : TupleTableSlot *oldSlot = slot;
1878 : :
1879 : 9 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1880 : : slot,
1881 : : ExecGetReturningSlot(estate,
1882 : : rootRelInfo));
1883 : :
1884 : 9 : slot->tts_tableOid = oldSlot->tts_tableOid;
1885 : 9 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1886 : : }
1887 : :
1888 : 22 : context->cpDeletedSlot = slot;
1889 : :
1890 : 22 : return NULL;
1891 : : }
1892 : :
1893 : 479 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1894 : : slot, NULL, context->planSlot);
1895 : :
1896 : : /*
1897 : : * Before releasing the target tuple again, make sure rslot has a
1898 : : * local copy of any pass-by-reference values.
1899 : : */
4563 tgl@sss.pgh.pa.us 1900 : 479 : ExecMaterializeSlot(rslot);
1901 : :
5810 1902 : 479 : ExecClearTuple(slot);
1903 : :
1904 : 479 : return rslot;
1905 : : }
1906 : :
1907 : 768349 : return NULL;
1908 : : }
1909 : :
1910 : : /*
1911 : : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1912 : : *
1913 : : * This works by first deleting the old tuple from the current partition,
1914 : : * followed by inserting the new tuple into the root parent table, that is,
1915 : : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1916 : : * correct partition.
1917 : : *
1918 : : * Returns true if the tuple has been successfully moved, or if it's found
1919 : : * that the tuple was concurrently deleted so there's nothing more to do
1920 : : * for the caller.
1921 : : *
1922 : : * False is returned if the tuple we're trying to move is found to have been
1923 : : * concurrently updated. In that case, the caller must check if the updated
1924 : : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1925 : : * this function again or perform a regular update accordingly. For MERGE,
1926 : : * the updated tuple is not returned in *retry_slot; it has its own retry
1927 : : * logic.
1928 : : */
1929 : : static bool
1269 alvherre@alvh.no-ip. 1930 : 550 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1931 : : ResultRelInfo *resultRelInfo,
1932 : : ItemPointer tupleid, HeapTuple oldtuple,
1933 : : TupleTableSlot *slot,
1934 : : bool canSetTag,
1935 : : UpdateContext *updateCxt,
1936 : : TM_Result *tmresult,
1937 : : TupleTableSlot **retry_slot,
1938 : : TupleTableSlot **inserted_tuple,
1939 : : ResultRelInfo **insert_destrel)
1940 : : {
1941 : 550 : ModifyTableState *mtstate = context->mtstate;
1787 heikki.linnakangas@i 1942 : 550 : EState *estate = mtstate->ps.state;
1943 : : TupleConversionMap *tupconv_map;
1944 : : bool tuple_deleted;
1945 : 550 : TupleTableSlot *epqslot = NULL;
1946 : :
233 dean.a.rasheed@gmail 1947 : 550 : context->cpDeletedSlot = NULL;
1269 alvherre@alvh.no-ip. 1948 : 550 : context->cpUpdateReturningSlot = NULL;
908 dean.a.rasheed@gmail 1949 : 550 : *retry_slot = NULL;
1950 : :
1951 : : /*
1952 : : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1953 : : * to migrate to a different partition. Maybe this can be implemented
1954 : : * some day, but it seems a fringe feature with little redeeming value.
1955 : : */
1787 heikki.linnakangas@i 1956 [ - + ]: 550 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1787 heikki.linnakangas@i 1957 [ # # ]:UBC 0 : ereport(ERROR,
1958 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1959 : : errmsg("invalid ON UPDATE specification"),
1960 : : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1961 : :
1962 : : /*
1963 : : * When an UPDATE is run directly on a leaf partition, simply fail with a
1964 : : * partition constraint violation error.
1965 : : */
1614 tgl@sss.pgh.pa.us 1966 [ + + ]:CBC 550 : if (resultRelInfo == mtstate->rootResultRelInfo)
1787 heikki.linnakangas@i 1967 : 24 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1968 : :
1969 : : /* Initialize tuple routing info if not already done. */
1614 tgl@sss.pgh.pa.us 1970 [ + + ]: 526 : if (mtstate->mt_partition_tuple_routing == NULL)
1971 : : {
1972 : 336 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1973 : : MemoryContext oldcxt;
1974 : :
1975 : : /* Things built here have to last for the query duration. */
1976 : 336 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1977 : :
1978 : 336 : mtstate->mt_partition_tuple_routing =
1979 : 336 : ExecSetupPartitionTupleRouting(estate, rootRel);
1980 : :
1981 : : /*
1982 : : * Before a partition's tuple can be re-routed, it must first be
1983 : : * converted to the root's format, so we'll need a slot for storing
1984 : : * such tuples.
1985 : : */
1986 [ - + ]: 336 : Assert(mtstate->mt_root_tuple_slot == NULL);
1987 : 336 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1988 : :
1989 : 336 : MemoryContextSwitchTo(oldcxt);
1990 : : }
1991 : :
1992 : : /*
1993 : : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1994 : : * We want to return rows from INSERT.
1995 : : */
1269 alvherre@alvh.no-ip. 1996 : 526 : ExecDelete(context, resultRelInfo,
1997 : : tupleid, oldtuple,
1998 : : false, /* processReturning */
1999 : : true, /* changingPart */
2000 : : false, /* canSetTag */
2001 : : tmresult, &tuple_deleted, &epqslot);
2002 : :
2003 : : /*
2004 : : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2005 : : * it was already deleted by self, or it was concurrently deleted by
2006 : : * another transaction), then we should skip the insert as well;
2007 : : * otherwise, an UPDATE could cause an increase in the total number of
2008 : : * rows across all partitions, which is clearly wrong.
2009 : : *
2010 : : * For a normal UPDATE, the case where the tuple has been the subject of a
2011 : : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2012 : : * machinery, but for an UPDATE that we've translated into a DELETE from
2013 : : * this partition and an INSERT into some other partition, that's not
2014 : : * available, because CTID chains can't span relation boundaries. We
2015 : : * mimic the semantics to a limited extent by skipping the INSERT if the
2016 : : * DELETE fails to find a tuple. This ensures that two concurrent
2017 : : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2018 : : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2019 : : * it.
2020 : : */
1787 heikki.linnakangas@i 2021 [ + + ]: 523 : if (!tuple_deleted)
2022 : : {
2023 : : /*
2024 : : * epqslot will be typically NULL. But when ExecDelete() finds that
2025 : : * another transaction has concurrently updated the same row, it
2026 : : * re-fetches the row, skips the delete, and epqslot is set to the
2027 : : * re-fetched tuple slot. In that case, we need to do all the checks
2028 : : * again. For MERGE, we leave everything to the caller (it must do
2029 : : * additional rechecking, and might end up executing a different
2030 : : * action entirely).
2031 : : */
538 dean.a.rasheed@gmail 2032 [ + + ]: 39 : if (mtstate->operation == CMD_MERGE)
625 2033 : 18 : return *tmresult == TM_Ok;
908 2034 [ + + - + ]: 21 : else if (TupIsNull(epqslot))
1787 heikki.linnakangas@i 2035 : 18 : return true;
2036 : : else
2037 : : {
2038 : : /* Fetch the most recent version of old tuple. */
2039 : : TupleTableSlot *oldSlot;
2040 : :
2041 : : /* ... but first, make sure ri_oldTupleSlot is initialized. */
513 akorotkov@postgresql 2042 [ - + ]: 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
513 akorotkov@postgresql 2043 :UBC 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
513 akorotkov@postgresql 2044 :CBC 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2045 [ - + ]: 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2046 : : tupleid,
2047 : : SnapshotAny,
2048 : : oldSlot))
513 akorotkov@postgresql 2049 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
2050 : : /* and project the new tuple to retry the UPDATE with */
908 dean.a.rasheed@gmail 2051 :CBC 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2052 : : oldSlot);
1787 heikki.linnakangas@i 2053 : 3 : return false;
2054 : : }
2055 : : }
2056 : :
2057 : : /*
2058 : : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2059 : : * convert the tuple into root's tuple descriptor if needed, since
2060 : : * ExecInsert() starts the search from root.
2061 : : */
1614 tgl@sss.pgh.pa.us 2062 : 484 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1787 heikki.linnakangas@i 2063 [ + + ]: 484 : if (tupconv_map != NULL)
2064 : 157 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2065 : : slot,
2066 : : mtstate->mt_root_tuple_slot);
2067 : :
2068 : : /* Tuple routing starts from the root table. */
1269 alvherre@alvh.no-ip. 2069 : 420 : context->cpUpdateReturningSlot =
1266 2070 : 484 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2071 : : inserted_tuple, insert_destrel);
2072 : :
2073 : : /*
2074 : : * Reset the transition state that may possibly have been written by
2075 : : * INSERT.
2076 : : */
1787 heikki.linnakangas@i 2077 [ + + ]: 420 : if (mtstate->mt_transition_capture)
2078 : 27 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2079 : :
2080 : : /* We're done moving. */
2081 : 420 : return true;
2082 : : }
2083 : :
2084 : : /*
2085 : : * ExecUpdatePrologue -- subroutine for ExecUpdate
2086 : : *
2087 : : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2088 : : * triggers. We return false if one of them makes the update a no-op;
2089 : : * otherwise, return true.
2090 : : */
2091 : : static bool
1269 alvherre@alvh.no-ip. 2092 : 162418 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2093 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2094 : : TM_Result *result)
2095 : : {
2096 : 162418 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2097 : :
908 dean.a.rasheed@gmail 2098 [ + + ]: 162418 : if (result)
2099 : 1077 : *result = TM_Ok;
2100 : :
1269 alvherre@alvh.no-ip. 2101 : 162418 : ExecMaterializeSlot(slot);
2102 : :
2103 : : /*
2104 : : * Open the table's indexes, if we have not done so already, so that we
2105 : : * can add new index entries for the updated tuple.
2106 : : */
2107 [ + + ]: 162418 : if (resultRelationDesc->rd_rel->relhasindex &&
2108 [ + + ]: 116928 : resultRelInfo->ri_IndexRelationDescs == NULL)
2109 : 4417 : ExecOpenIndices(resultRelInfo, false);
2110 : :
2111 : : /* BEFORE ROW UPDATE triggers */
2112 [ + + ]: 162418 : if (resultRelInfo->ri_TrigDesc &&
2113 [ + + ]: 3131 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2114 : : {
2115 : : /* Flush any pending inserts, so rows are visible to the triggers */
1016 efujita@postgresql.o 2116 [ + + ]: 1284 : if (context->estate->es_insert_pending_result_relations != NIL)
2117 : 1 : ExecPendingInserts(context->estate);
2118 : :
1269 alvherre@alvh.no-ip. 2119 : 1272 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2120 : : resultRelInfo, tupleid, oldtuple, slot,
2121 : : result, &context->tmfd,
50 dean.a.rasheed@gmail 2122 : 1284 : context->mtstate->operation == CMD_MERGE);
2123 : : }
2124 : :
1269 alvherre@alvh.no-ip. 2125 : 161134 : return true;
2126 : : }
2127 : :
2128 : : /*
2129 : : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2130 : : *
2131 : : * Apply the final modifications to the tuple slot before the update.
2132 : : * (This is split out because we also need it in the foreign-table code path.)
2133 : : */
2134 : : static void
2135 : 162280 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2136 : : TupleTableSlot *slot,
2137 : : EState *estate)
2138 : : {
2139 : 162280 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2140 : :
2141 : : /*
2142 : : * Constraints and GENERATED expressions might reference the tableoid
2143 : : * column, so (re-)initialize tts_tableOid before evaluating them.
2144 : : */
2145 : 162280 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2146 : :
2147 : : /*
2148 : : * Compute stored generated columns
2149 : : */
2150 [ + + ]: 162280 : if (resultRelationDesc->rd_att->constr &&
2151 [ + + ]: 98489 : resultRelationDesc->rd_att->constr->has_generated_stored)
2152 : 129 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2153 : : CMD_UPDATE);
2154 : 162280 : }
2155 : :
2156 : : /*
2157 : : * ExecUpdateAct -- subroutine for ExecUpdate
2158 : : *
2159 : : * Actually update the tuple, when operating on a plain table. If the
2160 : : * table is a partition, and the command was called referencing an ancestor
2161 : : * partitioned table, this routine migrates the resulting tuple to another
2162 : : * partition.
2163 : : *
2164 : : * The caller is in charge of keeping indexes current as necessary. The
2165 : : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2166 : : * be concurrently updated. However, in case of a cross-partition update,
2167 : : * this routine does it.
2168 : : */
2169 : : static TM_Result
2170 : 162182 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2171 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2172 : : bool canSetTag, UpdateContext *updateCxt)
2173 : : {
2174 : 162182 : EState *estate = context->estate;
2175 : 162182 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2176 : : bool partition_constraint_failed;
2177 : : TM_Result result;
2178 : :
2179 : 162182 : updateCxt->crossPartUpdate = false;
2180 : :
2181 : : /*
2182 : : * If we move the tuple to a new partition, we loop back here to recompute
2183 : : * GENERATED values (which are allowed to be different across partitions)
2184 : : * and recheck any RLS policies and constraints. We do not fire any
2185 : : * BEFORE triggers of the new partition, however.
2186 : : */
1062 john.naylor@postgres 2187 : 162185 : lreplace:
2188 : : /* Fill in GENERATEd columns */
915 tgl@sss.pgh.pa.us 2189 : 162185 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2190 : :
2191 : : /* ensure slot is independent, consider e.g. EPQ */
1269 alvherre@alvh.no-ip. 2192 : 162185 : ExecMaterializeSlot(slot);
2193 : :
2194 : : /*
2195 : : * If partition constraint fails, this row might get moved to another
2196 : : * partition, in which case we should check the RLS CHECK policy just
2197 : : * before inserting into the new partition, rather than doing it here.
2198 : : * This is because a trigger on that partition might again change the row.
2199 : : * So skip the WCO checks if the partition constraint fails.
2200 : : */
2201 : 162185 : partition_constraint_failed =
2202 [ + + ]: 163551 : resultRelationDesc->rd_rel->relispartition &&
2203 [ + + ]: 1366 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2204 : :
2205 : : /* Check any RLS UPDATE WITH CHECK policies */
2206 [ + + ]: 162185 : if (!partition_constraint_failed &&
2207 [ + + ]: 161635 : resultRelInfo->ri_WithCheckOptions != NIL)
2208 : : {
2209 : : /*
2210 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2211 : : * we are looking for at this point.
2212 : : */
2213 : 246 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2214 : : resultRelInfo, slot, estate);
2215 : : }
2216 : :
2217 : : /*
2218 : : * If a partition check failed, try to move the row into the right
2219 : : * partition.
2220 : : */
2221 [ + + ]: 162158 : if (partition_constraint_failed)
2222 : : {
2223 : : TupleTableSlot *inserted_tuple,
2224 : : *retry_slot;
1266 2225 : 550 : ResultRelInfo *insert_destrel = NULL;
2226 : :
2227 : : /*
2228 : : * ExecCrossPartitionUpdate will first DELETE the row from the
2229 : : * partition it's currently in and then insert it back into the root
2230 : : * table, which will re-route it to the correct partition. However,
2231 : : * if the tuple has been concurrently updated, a retry is needed.
2232 : : */
1269 2233 [ + + ]: 550 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2234 : : tupleid, oldtuple, slot,
2235 : : canSetTag, updateCxt,
2236 : : &result,
2237 : : &retry_slot,
2238 : : &inserted_tuple,
2239 : : &insert_destrel))
2240 : : {
2241 : : /* success! */
2242 : 450 : updateCxt->crossPartUpdate = true;
2243 : :
2244 : : /*
2245 : : * If the partitioned table being updated is referenced in foreign
2246 : : * keys, queue up trigger events to check that none of them were
2247 : : * violated. No special treatment is needed in
2248 : : * non-cross-partition update situations, because the leaf
2249 : : * partition's AR update triggers will take care of that. During
2250 : : * cross-partition updates implemented as delete on the source
2251 : : * partition followed by insert on the destination partition,
2252 : : * AR-UPDATE triggers of the root table (that is, the table
2253 : : * mentioned in the query) must be fired.
2254 : : *
2255 : : * NULL insert_destrel means that the move failed to occur, that
2256 : : * is, the update failed, so no need to anything in that case.
2257 : : */
1266 2258 [ + + ]: 450 : if (insert_destrel &&
2259 [ + + ]: 406 : resultRelInfo->ri_TrigDesc &&
2260 [ + + ]: 181 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2261 : 150 : ExecCrossPartitionUpdateForeignKey(context,
2262 : : resultRelInfo,
2263 : : insert_destrel,
2264 : : tupleid, slot,
2265 : : inserted_tuple);
2266 : :
1269 2267 : 453 : return TM_Ok;
2268 : : }
2269 : :
2270 : : /*
2271 : : * No luck, a retry is needed. If running MERGE, we do not do so
2272 : : * here; instead let it handle that on its own rules.
2273 : : */
538 dean.a.rasheed@gmail 2274 [ + + ]: 9 : if (context->mtstate->operation == CMD_MERGE)
625 2275 : 6 : return result;
2276 : :
2277 : : /*
2278 : : * ExecCrossPartitionUpdate installed an updated version of the new
2279 : : * tuple in the retry slot; start over.
2280 : : */
908 2281 : 3 : slot = retry_slot;
1269 alvherre@alvh.no-ip. 2282 : 3 : goto lreplace;
2283 : : }
2284 : :
2285 : : /*
2286 : : * Check the constraints of the tuple. We've already checked the
2287 : : * partition constraint above; however, we must still ensure the tuple
2288 : : * passes all other constraints, so we will call ExecConstraints() and
2289 : : * have it validate all remaining checks.
2290 : : */
2291 [ + + ]: 161608 : if (resultRelationDesc->rd_att->constr)
2292 : 98177 : ExecConstraints(resultRelInfo, slot, estate);
2293 : :
2294 : : /*
2295 : : * replace the heap tuple
2296 : : *
2297 : : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2298 : : * the row to be updated is visible to that snapshot, and throw a
2299 : : * can't-serialize error if not. This is a special-case behavior needed
2300 : : * for referential integrity updates in transaction-snapshot mode
2301 : : * transactions.
2302 : : */
2303 : 161571 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2304 : : estate->es_output_cid,
2305 : : estate->es_snapshot,
2306 : : estate->es_crosscheck_snapshot,
2307 : : true /* wait for commit */ ,
2308 : : &context->tmfd, &updateCxt->lockmode,
2309 : : &updateCxt->updateIndexes);
2310 : :
2311 : 161559 : return result;
2312 : : }
2313 : :
2314 : : /*
2315 : : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2316 : : *
2317 : : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2318 : : * returns indicating that the tuple was updated.
2319 : : */
2320 : : static void
2321 : 161586 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2322 : : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2323 : : HeapTuple oldtuple, TupleTableSlot *slot)
2324 : : {
2325 : 161586 : ModifyTableState *mtstate = context->mtstate;
908 dean.a.rasheed@gmail 2326 : 161586 : List *recheckIndexes = NIL;
2327 : :
2328 : : /* insert index entries for tuple if necessary */
901 tomas.vondra@postgre 2329 [ + + + + ]: 161586 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
1269 alvherre@alvh.no-ip. 2330 : 88559 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2331 : : slot, context->estate,
2332 : : true, false,
2333 : : NULL, NIL,
901 tomas.vondra@postgre 2334 : 88559 : (updateCxt->updateIndexes == TU_Summarizing));
2335 : :
2336 : : /* AFTER ROW UPDATE Triggers */
1269 alvherre@alvh.no-ip. 2337 : 161540 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2338 : : NULL, NULL,
2339 : : tupleid, oldtuple, slot,
2340 : : recheckIndexes,
2341 [ + + ]: 161540 : mtstate->operation == CMD_INSERT ?
2342 : : mtstate->mt_oc_transition_capture :
2343 : : mtstate->mt_transition_capture,
2344 : : false);
2345 : :
908 dean.a.rasheed@gmail 2346 : 161538 : list_free(recheckIndexes);
2347 : :
2348 : : /*
2349 : : * Check any WITH CHECK OPTION constraints from parent views. We are
2350 : : * required to do this after testing all constraints and uniqueness
2351 : : * violations per the SQL spec, so we do it after actually updating the
2352 : : * record in the heap and all indexes.
2353 : : *
2354 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2355 : : * are looking for at this point.
2356 : : */
1269 alvherre@alvh.no-ip. 2357 [ + + ]: 161538 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2358 : 233 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2359 : : slot, context->estate);
2360 : 161497 : }
2361 : :
2362 : : /*
2363 : : * Queues up an update event using the target root partitioned table's
2364 : : * trigger to check that a cross-partition update hasn't broken any foreign
2365 : : * keys pointing into it.
2366 : : */
2367 : : static void
1266 2368 : 150 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2369 : : ResultRelInfo *sourcePartInfo,
2370 : : ResultRelInfo *destPartInfo,
2371 : : ItemPointer tupleid,
2372 : : TupleTableSlot *oldslot,
2373 : : TupleTableSlot *newslot)
2374 : : {
2375 : : ListCell *lc;
2376 : : ResultRelInfo *rootRelInfo;
2377 : : List *ancestorRels;
2378 : :
2379 : 150 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2380 : 150 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2381 : :
2382 : : /*
2383 : : * For any foreign keys that point directly into a non-root ancestors of
2384 : : * the source partition, we can in theory fire an update event to enforce
2385 : : * those constraints using their triggers, if we could tell that both the
2386 : : * source and the destination partitions are under the same ancestor. But
2387 : : * for now, we simply report an error that those cannot be enforced.
2388 : : */
2389 [ + - + + : 327 : foreach(lc, ancestorRels)
+ + ]
2390 : : {
2391 : 180 : ResultRelInfo *rInfo = lfirst(lc);
2392 : 180 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2393 : 180 : bool has_noncloned_fkey = false;
2394 : :
2395 : : /* Root ancestor's triggers will be processed. */
2396 [ + + ]: 180 : if (rInfo == rootRelInfo)
2397 : 147 : continue;
2398 : :
2399 [ + - + - ]: 33 : if (trigdesc && trigdesc->trig_update_after_row)
2400 : : {
2401 [ + + ]: 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2402 : : {
2403 : 84 : Trigger *trig = &trigdesc->triggers[i];
2404 : :
2405 [ + + + - ]: 87 : if (!trig->tgisclone &&
2406 : 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2407 : : {
2408 : 3 : has_noncloned_fkey = true;
2409 : 3 : break;
2410 : : }
2411 : : }
2412 : : }
2413 : :
2414 [ + + ]: 33 : if (has_noncloned_fkey)
2415 [ + - ]: 3 : ereport(ERROR,
2416 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2417 : : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2418 : : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2419 : : RelationGetRelationName(rInfo->ri_RelationDesc),
2420 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2421 : : errhint("Consider defining the foreign key on table \"%s\".",
2422 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2423 : : }
2424 : :
2425 : : /* Perform the root table's triggers. */
2426 : 147 : ExecARUpdateTriggers(context->estate,
2427 : : rootRelInfo, sourcePartInfo, destPartInfo,
2428 : : tupleid, NULL, newslot, NIL, NULL, true);
2429 : 147 : }
2430 : :
2431 : : /* ----------------------------------------------------------------
2432 : : * ExecUpdate
2433 : : *
2434 : : * note: we can't run UPDATE queries with transactions
2435 : : * off because UPDATEs are actually INSERTs and our
2436 : : * scan will mistakenly loop forever, updating the tuple
2437 : : * it just inserted.. This should be fixed but until it
2438 : : * is, we don't want to get stuck in an infinite loop
2439 : : * which corrupts your database..
2440 : : *
2441 : : * When updating a table, tupleid identifies the tuple to update and
2442 : : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2443 : : * oldtuple is passed to the triggers and identifies what to update, and
2444 : : * tupleid is invalid. When updating a foreign table, tupleid is
2445 : : * invalid; the FDW has to figure out which row to update using data from
2446 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
2447 : : * NULL when the foreign table has no relevant triggers.
2448 : : *
2449 : : * oldSlot contains the old tuple value.
2450 : : * slot contains the new tuple value to be stored.
2451 : : * planSlot is the output of the ModifyTable's subplan; we use it
2452 : : * to access values from other input tables (for RETURNING),
2453 : : * row-ID junk columns, etc.
2454 : : *
2455 : : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2456 : : * had identified the tuple to update, it will identify the tuple
2457 : : * actually updated after EvalPlanQual.
2458 : : * ----------------------------------------------------------------
2459 : : */
2460 : : static TupleTableSlot *
1269 2461 : 161341 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2462 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2463 : : TupleTableSlot *slot, bool canSetTag)
2464 : : {
2465 : 161341 : EState *estate = context->estate;
1788 heikki.linnakangas@i 2466 : 161341 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1269 alvherre@alvh.no-ip. 2467 : 161341 : UpdateContext updateCxt = {0};
2468 : : TM_Result result;
2469 : :
2470 : : /*
2471 : : * abort the operation if not running transactions
2472 : : */
5810 tgl@sss.pgh.pa.us 2473 [ - + ]: 161341 : if (IsBootstrapProcessingMode())
5810 tgl@sss.pgh.pa.us 2474 [ # # ]:UBC 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2475 : :
2476 : : /*
2477 : : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2478 : : * done if it says we are.
2479 : : */
908 dean.a.rasheed@gmail 2480 [ + + ]:CBC 161341 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
1269 alvherre@alvh.no-ip. 2481 : 66 : return NULL;
2482 : :
2483 : : /* INSTEAD OF ROW UPDATE Triggers */
5445 tgl@sss.pgh.pa.us 2484 [ + + ]: 161263 : if (resultRelInfo->ri_TrigDesc &&
2485 [ + + ]: 2875 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2486 : : {
2384 andres@anarazel.de 2487 [ + + ]: 63 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2488 : : oldtuple, slot))
2299 tgl@sss.pgh.pa.us 2489 : 9 : return NULL; /* "do nothing" */
2490 : : }
4563 2491 [ + + ]: 161200 : else if (resultRelInfo->ri_FdwRoutine)
2492 : : {
2493 : : /* Fill in GENERATEd columns */
1269 alvherre@alvh.no-ip. 2494 : 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2495 : :
2496 : : /*
2497 : : * update in foreign table: let the FDW do it
2498 : : */
4563 tgl@sss.pgh.pa.us 2499 : 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2500 : : resultRelInfo,
2501 : : slot,
2502 : : context->planSlot);
2503 : :
2504 [ + + ]: 95 : if (slot == NULL) /* "do nothing" */
2505 : 1 : return NULL;
2506 : :
2507 : : /*
2508 : : * AFTER ROW Triggers or RETURNING expressions might reference the
2509 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2510 : : * them. (This covers the case where the FDW replaced the slot.)
2511 : : */
2384 andres@anarazel.de 2512 : 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2513 : : }
2514 : : else
2515 : : {
2516 : : ItemPointerData lockedtid;
2517 : :
2518 : : /*
2519 : : * If we generate a new candidate tuple after EvalPlanQual testing, we
2520 : : * must loop back here to try again. (We don't need to redo triggers,
2521 : : * however. If there are any BEFORE triggers then trigger.c will have
2522 : : * done table_tuple_lock to lock the correct tuple, so there's no need
2523 : : * to do them again.)
2524 : : */
1269 alvherre@alvh.no-ip. 2525 : 161105 : redo_act:
347 noah@leadboat.com 2526 : 161154 : lockedtid = *tupleid;
1269 alvherre@alvh.no-ip. 2527 : 161154 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2528 : : canSetTag, &updateCxt);
2529 : :
2530 : : /*
2531 : : * If ExecUpdateAct reports that a cross-partition update was done,
2532 : : * then the RETURNING tuple (if any) has been projected and there's
2533 : : * nothing else for us to do.
2534 : : */
2535 [ + + ]: 160996 : if (updateCxt.crossPartUpdate)
2536 : 444 : return context->cpUpdateReturningSlot;
2537 : :
5445 tgl@sss.pgh.pa.us 2538 [ + + + + : 160616 : switch (result)
- ]
2539 : : {
2359 andres@anarazel.de 2540 : 42 : case TM_SelfModified:
2541 : :
2542 : : /*
2543 : : * The target tuple was already updated or deleted by the
2544 : : * current command, or by a later command in the current
2545 : : * transaction. The former case is possible in a join UPDATE
2546 : : * where multiple tuples join to the same target tuple. This
2547 : : * is pretty questionable, but Postgres has always allowed it:
2548 : : * we just execute the first update action and ignore
2549 : : * additional update attempts.
2550 : : *
2551 : : * The latter case arises if the tuple is modified by a
2552 : : * command in a BEFORE trigger, or perhaps by a command in a
2553 : : * volatile function used in the query. In such situations we
2554 : : * should not ignore the update, but it is equally unsafe to
2555 : : * proceed. We don't want to discard the original UPDATE
2556 : : * while keeping the triggered actions based on it; and we
2557 : : * have no principled way to merge this update with the
2558 : : * previous ones. So throwing an error is the only safe
2559 : : * course.
2560 : : *
2561 : : * If a trigger actually intends this type of interaction, it
2562 : : * can re-execute the UPDATE (assuming it can figure out how)
2563 : : * and then return NULL to cancel the outer update.
2564 : : */
1269 alvherre@alvh.no-ip. 2565 [ + + ]: 42 : if (context->tmfd.cmax != estate->es_output_cid)
4698 kgrittn@postgresql.o 2566 [ + - ]: 3 : ereport(ERROR,
2567 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2568 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2569 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2570 : :
2571 : : /* Else, already updated by self; nothing to do */
5445 tgl@sss.pgh.pa.us 2572 : 39 : return NULL;
2573 : :
2359 andres@anarazel.de 2574 : 160495 : case TM_Ok:
5445 tgl@sss.pgh.pa.us 2575 : 160495 : break;
2576 : :
2359 andres@anarazel.de 2577 : 75 : case TM_Updated:
2578 : : {
2579 : : TupleTableSlot *inputslot;
2580 : : TupleTableSlot *epqslot;
2581 : :
2582 [ + + ]: 75 : if (IsolationUsesXactSnapshot())
2583 [ + - ]: 2 : ereport(ERROR,
2584 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2585 : : errmsg("could not serialize access due to concurrent update")));
2586 : :
2587 : : /*
2588 : : * Already know that we're going to need to do EPQ, so
2589 : : * fetch tuple directly into the right slot.
2590 : : */
513 akorotkov@postgresql 2591 : 73 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2592 : : resultRelInfo->ri_RangeTableIndex);
2593 : :
2594 : 73 : result = table_tuple_lock(resultRelationDesc, tupleid,
2595 : : estate->es_snapshot,
2596 : : inputslot, estate->es_output_cid,
2597 : : updateCxt.lockmode, LockWaitBlock,
2598 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2599 : : &context->tmfd);
2600 : :
2601 [ + + + - ]: 71 : switch (result)
2602 : : {
2603 : 66 : case TM_Ok:
2604 [ - + ]: 66 : Assert(context->tmfd.traversed);
2605 : :
2606 : 66 : epqslot = EvalPlanQual(context->epqstate,
2607 : : resultRelationDesc,
2608 : : resultRelInfo->ri_RangeTableIndex,
2609 : : inputslot);
2610 [ + + + + ]: 66 : if (TupIsNull(epqslot))
2611 : : /* Tuple not passing quals anymore, exiting... */
2612 : 17 : return NULL;
2613 : :
2614 : : /* Make sure ri_oldTupleSlot is initialized. */
2615 [ - + ]: 49 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
513 akorotkov@postgresql 2616 :UBC 0 : ExecInitUpdateProjection(context->mtstate,
2617 : : resultRelInfo);
2618 : :
347 noah@leadboat.com 2619 [ + + ]:CBC 49 : if (resultRelInfo->ri_needLockTagTuple)
2620 : : {
2621 : 1 : UnlockTuple(resultRelationDesc,
2622 : : &lockedtid, InplaceUpdateTupleLock);
2623 : 1 : LockTuple(resultRelationDesc,
2624 : : tupleid, InplaceUpdateTupleLock);
2625 : : }
2626 : :
2627 : : /* Fetch the most recent version of old tuple. */
513 akorotkov@postgresql 2628 : 49 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2629 [ - + ]: 49 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2630 : : tupleid,
2631 : : SnapshotAny,
2632 : : oldSlot))
513 akorotkov@postgresql 2633 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
513 akorotkov@postgresql 2634 :CBC 49 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2635 : : epqslot, oldSlot);
2636 : 49 : goto redo_act;
2637 : :
2638 : 1 : case TM_Deleted:
2639 : : /* tuple already deleted; nothing to do */
2640 : 1 : return NULL;
2641 : :
2642 : 4 : case TM_SelfModified:
2643 : :
2644 : : /*
2645 : : * This can be reached when following an update
2646 : : * chain from a tuple updated by another session,
2647 : : * reaching a tuple that was already updated in
2648 : : * this transaction. If previously modified by
2649 : : * this command, ignore the redundant update,
2650 : : * otherwise error out.
2651 : : *
2652 : : * See also TM_SelfModified response to
2653 : : * table_tuple_update() above.
2654 : : */
2655 [ + + ]: 4 : if (context->tmfd.cmax != estate->es_output_cid)
2656 [ + - ]: 1 : ereport(ERROR,
2657 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2658 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2659 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2660 : 3 : return NULL;
2661 : :
513 akorotkov@postgresql 2662 :UBC 0 : default:
2663 : : /* see table_tuple_lock call in ExecDelete() */
2664 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2665 : : result);
2666 : : return NULL;
2667 : : }
2668 : : }
2669 : :
2670 : : break;
2671 : :
2359 andres@anarazel.de 2672 :CBC 4 : case TM_Deleted:
2673 [ - + ]: 4 : if (IsolationUsesXactSnapshot())
2359 andres@anarazel.de 2674 [ # # ]:UBC 0 : ereport(ERROR,
2675 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2676 : : errmsg("could not serialize access due to concurrent delete")));
2677 : : /* tuple already deleted; nothing to do */
5445 tgl@sss.pgh.pa.us 2678 :CBC 4 : return NULL;
2679 : :
5445 tgl@sss.pgh.pa.us 2680 :UBC 0 : default:
2298 andres@anarazel.de 2681 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2682 : : result);
2683 : : return NULL;
2684 : : }
2685 : : }
2686 : :
5307 tgl@sss.pgh.pa.us 2687 [ + + ]:CBC 160637 : if (canSetTag)
2688 : 160338 : (estate->es_processed)++;
2689 : :
1269 alvherre@alvh.no-ip. 2690 : 160637 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2691 : : slot);
2692 : :
2693 : : /* Process RETURNING if present */
5810 tgl@sss.pgh.pa.us 2694 [ + + ]: 160554 : if (resultRelInfo->ri_projectReturning)
233 dean.a.rasheed@gmail 2695 : 1194 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2696 : : oldSlot, slot, context->planSlot);
2697 : :
5810 tgl@sss.pgh.pa.us 2698 : 159360 : return NULL;
2699 : : }
2700 : :
2701 : : /*
2702 : : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2703 : : *
2704 : : * Try to lock tuple for update as part of speculative insertion. If
2705 : : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2706 : : * (but still lock row, even though it may not satisfy estate's
2707 : : * snapshot).
2708 : : *
2709 : : * Returns true if we're done (with or without an update), or false if
2710 : : * the caller must retry the INSERT from scratch.
2711 : : */
2712 : : static bool
1269 alvherre@alvh.no-ip. 2713 : 2602 : ExecOnConflictUpdate(ModifyTableContext *context,
2714 : : ResultRelInfo *resultRelInfo,
2715 : : ItemPointer conflictTid,
2716 : : TupleTableSlot *excludedSlot,
2717 : : bool canSetTag,
2718 : : TupleTableSlot **returning)
2719 : : {
2720 : 2602 : ModifyTableState *mtstate = context->mtstate;
3774 andres@anarazel.de 2721 : 2602 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2722 : 2602 : Relation relation = resultRelInfo->ri_RelationDesc;
2721 alvherre@alvh.no-ip. 2723 : 2602 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2376 andres@anarazel.de 2724 : 2602 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2725 : : TM_FailureData tmfd;
2726 : : LockTupleMode lockmode;
2727 : : TM_Result test;
2728 : : Datum xminDatum;
2729 : : TransactionId xmin;
2730 : : bool isnull;
2731 : :
2732 : : /*
2733 : : * Parse analysis should have blocked ON CONFLICT for all system
2734 : : * relations, which includes these. There's no fundamental obstacle to
2735 : : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2736 : : * ExecUpdate() caller.
2737 : : */
347 noah@leadboat.com 2738 [ - + ]: 2602 : Assert(!resultRelInfo->ri_needLockTagTuple);
2739 : :
2740 : : /* Determine lock mode to use */
1269 alvherre@alvh.no-ip. 2741 : 2602 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2742 : :
2743 : : /*
2744 : : * Lock tuple for update. Don't follow updates when tuple cannot be
2745 : : * locked without doing so. A row locking conflict here means our
2746 : : * previous conclusion that the tuple is conclusively committed is not
2747 : : * true anymore.
2748 : : */
2298 andres@anarazel.de 2749 : 2602 : test = table_tuple_lock(relation, conflictTid,
1269 alvherre@alvh.no-ip. 2750 : 2602 : context->estate->es_snapshot,
2751 : 2602 : existing, context->estate->es_output_cid,
2752 : : lockmode, LockWaitBlock, 0,
2753 : : &tmfd);
3774 andres@anarazel.de 2754 [ + + - - : 2602 : switch (test)
- - ]
2755 : : {
2359 2756 : 2590 : case TM_Ok:
2757 : : /* success! */
3774 2758 : 2590 : break;
2759 : :
2359 2760 : 12 : case TM_Invisible:
2761 : :
2762 : : /*
2763 : : * This can occur when a just inserted tuple is updated again in
2764 : : * the same command. E.g. because multiple rows with the same
2765 : : * conflicting key values are inserted.
2766 : : *
2767 : : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2768 : : * case. We do not want to proceed because it would lead to the
2769 : : * same row being updated a second time in some unspecified order,
2770 : : * and in contrast to plain UPDATEs there's no historical behavior
2771 : : * to break.
2772 : : *
2773 : : * It is the user's responsibility to prevent this situation from
2774 : : * occurring. These problems are why the SQL standard similarly
2775 : : * specifies that for SQL MERGE, an exception must be raised in
2776 : : * the event of an attempt to update the same row twice.
2777 : : */
2778 : 12 : xminDatum = slot_getsysattr(existing,
2779 : : MinTransactionIdAttributeNumber,
2780 : : &isnull);
2781 [ - + ]: 12 : Assert(!isnull);
2782 : 12 : xmin = DatumGetTransactionId(xminDatum);
2783 : :
2784 [ + - ]: 12 : if (TransactionIdIsCurrentTransactionId(xmin))
3774 2785 [ + - ]: 12 : ereport(ERROR,
2786 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2787 : : /* translator: %s is a SQL command name */
2788 : : errmsg("%s command cannot affect row a second time",
2789 : : "ON CONFLICT DO UPDATE"),
2790 : : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2791 : :
2792 : : /* This shouldn't happen */
3774 andres@anarazel.de 2793 [ # # ]:UBC 0 : elog(ERROR, "attempted to lock invisible tuple");
2794 : : break;
2795 : :
2359 2796 : 0 : case TM_SelfModified:
2797 : :
2798 : : /*
2799 : : * This state should never be reached. As a dirty snapshot is used
2800 : : * to find conflicting tuples, speculative insertion wouldn't have
2801 : : * seen this row to conflict with.
2802 : : */
3774 2803 [ # # ]: 0 : elog(ERROR, "unexpected self-updated tuple");
2804 : : break;
2805 : :
2359 2806 : 0 : case TM_Updated:
3774 2807 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2808 [ # # ]: 0 : ereport(ERROR,
2809 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2810 : : errmsg("could not serialize access due to concurrent update")));
2811 : :
2812 : : /*
2813 : : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2814 : : * a partitioned table we shouldn't reach to a case where tuple to
2815 : : * be lock is moved to another partition due to concurrent update
2816 : : * of the partition key.
2817 : : */
2359 2818 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2819 : :
2820 : : /*
2821 : : * Tell caller to try again from the very start.
2822 : : *
2823 : : * It does not make sense to use the usual EvalPlanQual() style
2824 : : * loop here, as the new version of the row might not conflict
2825 : : * anymore, or the conflicting tuple has actually been deleted.
2826 : : */
2827 : 0 : ExecClearTuple(existing);
2828 : 0 : return false;
2829 : :
2830 : 0 : case TM_Deleted:
2831 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2832 [ # # ]: 0 : ereport(ERROR,
2833 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2834 : : errmsg("could not serialize access due to concurrent delete")));
2835 : :
2836 : : /* see TM_Updated case */
2837 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2838 : 0 : ExecClearTuple(existing);
3774 2839 : 0 : return false;
2840 : :
2841 : 0 : default:
2298 2842 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2843 : : }
2844 : :
2845 : : /* Success, the tuple is locked. */
2846 : :
2847 : : /*
2848 : : * Verify that the tuple is visible to our MVCC snapshot if the current
2849 : : * isolation level mandates that.
2850 : : *
2851 : : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2852 : : * CONFLICT ... WHERE clause may prevent us from reaching that.
2853 : : *
2854 : : * This means we only ever continue when a new command in the current
2855 : : * transaction could see the row, even though in READ COMMITTED mode the
2856 : : * tuple will not be visible according to the current statement's
2857 : : * snapshot. This is in line with the way UPDATE deals with newer tuple
2858 : : * versions.
2859 : : */
1269 alvherre@alvh.no-ip. 2860 :CBC 2590 : ExecCheckTupleVisible(context->estate, relation, existing);
2861 : :
2862 : : /*
2863 : : * Make tuple and any needed join variables available to ExecQual and
2864 : : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2865 : : * the target's existing tuple is installed in the scantuple. EXCLUDED
2866 : : * has been made to reference INNER_VAR in setrefs.c, but there is no
2867 : : * other redirection.
2868 : : */
2376 andres@anarazel.de 2869 : 2590 : econtext->ecxt_scantuple = existing;
3774 2870 : 2590 : econtext->ecxt_innertuple = excludedSlot;
2871 : 2590 : econtext->ecxt_outertuple = NULL;
2872 : :
3098 2873 [ + + ]: 2590 : if (!ExecQual(onConflictSetWhere, econtext))
2874 : : {
2376 2875 : 16 : ExecClearTuple(existing); /* see return below */
3774 2876 [ - + ]: 16 : InstrCountFiltered1(&mtstate->ps, 1);
2877 : 16 : return true; /* done with the tuple */
2878 : : }
2879 : :
2880 [ + + ]: 2574 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2881 : : {
2882 : : /*
2883 : : * Check target's existing tuple against UPDATE-applicable USING
2884 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2885 : : *
2886 : : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2887 : : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2888 : : * but that's almost the extent of its special handling for ON
2889 : : * CONFLICT DO UPDATE.
2890 : : *
2891 : : * The rewriter will also have associated UPDATE applicable straight
2892 : : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2893 : : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2894 : : * kinds, so there is no danger of spurious over-enforcement in the
2895 : : * INSERT or UPDATE path.
2896 : : */
2897 : 30 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2898 : : existing,
2899 : : mtstate->ps.state);
2900 : : }
2901 : :
2902 : : /* Project the new tuple version */
2721 alvherre@alvh.no-ip. 2903 : 2562 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2904 : :
2905 : : /*
2906 : : * Note that it is possible that the target tuple has been modified in
2907 : : * this session, after the above table_tuple_lock. We choose to not error
2908 : : * out in that case, in line with ExecUpdate's treatment of similar cases.
2909 : : * This can happen if an UPDATE is triggered from within ExecQual(),
2910 : : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2911 : : * wCTE in the ON CONFLICT's SET.
2912 : : */
2913 : :
2914 : : /* Execute UPDATE with projection */
1269 2915 : 5109 : *returning = ExecUpdate(context, resultRelInfo,
2916 : : conflictTid, NULL, existing,
2376 andres@anarazel.de 2917 : 2562 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2918 : : canSetTag);
2919 : :
2920 : : /*
2921 : : * Clear out existing tuple, as there might not be another conflict among
2922 : : * the next input rows. Don't want to hold resources till the end of the
2923 : : * query. First though, make sure that the returning slot, if any, has a
2924 : : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2925 : : * columns.
2926 : : */
233 dean.a.rasheed@gmail 2927 [ + + ]: 2547 : if (*returning != NULL &&
2928 [ + + ]: 110 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2929 : 3 : ExecMaterializeSlot(*returning);
2930 : :
2376 andres@anarazel.de 2931 : 2547 : ExecClearTuple(existing);
2932 : :
3774 2933 : 2547 : return true;
2934 : : }
2935 : :
2936 : : /*
2937 : : * Perform MERGE.
2938 : : */
2939 : : static TupleTableSlot *
1258 alvherre@alvh.no-ip. 2940 : 7626 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2941 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2942 : : {
538 dean.a.rasheed@gmail 2943 : 7626 : TupleTableSlot *rslot = NULL;
2944 : : bool matched;
2945 : :
2946 : : /*-----
2947 : : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2948 : : * valid, depending on whether the result relation is a table or a view.
2949 : : * We execute the first action for which the additional WHEN MATCHED AND
2950 : : * quals pass. If an action without quals is found, that action is
2951 : : * executed.
2952 : : *
2953 : : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2954 : : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2955 : : * in sequence until one passes. This is almost identical to the WHEN
2956 : : * MATCHED case, and both cases are handled by ExecMergeMatched().
2957 : : *
2958 : : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2959 : : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2960 : : * TARGET] actions in sequence until one passes.
2961 : : *
2962 : : * Things get interesting in case of concurrent update/delete of the
2963 : : * target tuple. Such concurrent update/delete is detected while we are
2964 : : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2965 : : *
2966 : : * A concurrent update can:
2967 : : *
2968 : : * 1. modify the target tuple so that the results from checking any
2969 : : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2970 : : * SOURCE actions potentially change, but the result from the join
2971 : : * quals does not change.
2972 : : *
2973 : : * In this case, we are still dealing with the same kind of match
2974 : : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2975 : : * actions from the start and choose the first one that satisfies the
2976 : : * new target tuple.
2977 : : *
2978 : : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2979 : : * quals no longer pass and hence the source and target tuples no
2980 : : * longer match.
2981 : : *
2982 : : * In this case, we are now dealing with a NOT MATCHED case, and we
2983 : : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2984 : : * TARGET] actions. First ExecMergeMatched() processes the list of
2985 : : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2986 : : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2987 : : * TARGET] actions in sequence until one passes. Thus we may execute
2988 : : * two actions; one of each kind.
2989 : : *
2990 : : * Thus we support concurrent updates that turn MATCHED candidate rows
2991 : : * into NOT MATCHED rows. However, we do not attempt to support cases
2992 : : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2993 : : * cause a target row to match a different source row.
2994 : : *
2995 : : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2996 : : * [BY TARGET].
2997 : : *
2998 : : * ExecMergeMatched() takes care of following the update chain and
2999 : : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3000 : : * action, as long as the target tuple still exists. If the target tuple
3001 : : * gets deleted or a concurrent update causes the join quals to fail, it
3002 : : * returns a matched status of false and we call ExecMergeNotMatched().
3003 : : * Given that ExecMergeMatched() always makes progress by following the
3004 : : * update chain and we never switch from ExecMergeNotMatched() to
3005 : : * ExecMergeMatched(), there is no risk of a livelock.
3006 : : */
555 3007 [ + + + + ]: 7626 : matched = tupleid != NULL || oldtuple != NULL;
1258 alvherre@alvh.no-ip. 3008 [ + + ]: 7626 : if (matched)
538 dean.a.rasheed@gmail 3009 : 6290 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3010 : : canSetTag, &matched);
3011 : :
3012 : : /*
3013 : : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3014 : : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3015 : : * "matched" to false, indicating that it no longer matches).
3016 : : */
1258 alvherre@alvh.no-ip. 3017 [ + + ]: 7579 : if (!matched)
3018 : : {
3019 : : /*
3020 : : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3021 : : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3022 : : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3023 : : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3024 : : * SOURCE action, and computed the row to return. If so, we cannot
3025 : : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3026 : : * pending (to be processed on the next call to ExecModifyTable()).
3027 : : * Otherwise, just process the action now.
3028 : : */
525 dean.a.rasheed@gmail 3029 [ + + ]: 1344 : if (rslot == NULL)
3030 : 1343 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3031 : : else
3032 : 1 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3033 : : }
3034 : :
538 3035 : 7549 : return rslot;
3036 : : }
3037 : :
3038 : : /*
3039 : : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3040 : : * action, depending on whether the join quals are satisfied. If the target
3041 : : * relation is a table, the current target tuple is identified by tupleid.
3042 : : * Otherwise, if the target relation is a view, oldtuple is the current target
3043 : : * tuple from the view.
3044 : : *
3045 : : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3046 : : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3047 : : * action do not pass, we check the second, then the third and so on. If we
3048 : : * reach the end without finding a qualifying action, we return NULL.
3049 : : * Otherwise, we execute the qualifying action and return its RETURNING
3050 : : * result, if any, or NULL.
3051 : : *
3052 : : * On entry, "*matched" is assumed to be true. If a concurrent update or
3053 : : * delete is detected that causes the join quals to no longer pass, we set it
3054 : : * to false, indicating that the caller should process any NOT MATCHED [BY
3055 : : * TARGET] actions.
3056 : : *
3057 : : * After a concurrent update, we restart from the first action to look for a
3058 : : * new qualifying action to execute. If the join quals originally passed, and
3059 : : * the concurrent update caused them to no longer pass, then we switch from
3060 : : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3061 : : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3062 : : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3063 : : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3064 : : */
3065 : : static TupleTableSlot *
1258 alvherre@alvh.no-ip. 3066 : 6290 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3067 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3068 : : bool *matched)
3069 : : {
3070 : 6290 : ModifyTableState *mtstate = context->mtstate;
525 dean.a.rasheed@gmail 3071 : 6290 : List **mergeActions = resultRelInfo->ri_MergeActions;
3072 : : ItemPointerData lockedtid;
3073 : : List *actionStates;
538 3074 : 6290 : TupleTableSlot *newslot = NULL;
3075 : 6290 : TupleTableSlot *rslot = NULL;
1258 alvherre@alvh.no-ip. 3076 : 6290 : EState *estate = context->estate;
3077 : 6290 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3078 : : bool isNull;
3079 : 6290 : EPQState *epqstate = &mtstate->mt_epqstate;
3080 : : ListCell *l;
3081 : :
3082 : : /* Expect matched to be true on entry */
525 dean.a.rasheed@gmail 3083 [ - + ]: 6290 : Assert(*matched);
3084 : :
3085 : : /*
3086 : : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3087 : : * are done.
3088 : : */
3089 [ + + ]: 6290 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3090 [ + + ]: 600 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
538 3091 : 264 : return NULL;
3092 : :
3093 : : /*
3094 : : * Make tuple and any needed join variables available to ExecQual and
3095 : : * ExecProject. The target's existing tuple is installed in the scantuple.
3096 : : * This target relation's slot is required only in the case of a MATCHED
3097 : : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3098 : : */
1258 alvherre@alvh.no-ip. 3099 : 6026 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3100 : 6026 : econtext->ecxt_innertuple = context->planSlot;
3101 : 6026 : econtext->ecxt_outertuple = NULL;
3102 : :
3103 : : /*
3104 : : * This routine is only invoked for matched target rows, so we should
3105 : : * either have the tupleid of the target row, or an old tuple from the
3106 : : * target wholerow junk attr.
3107 : : */
555 dean.a.rasheed@gmail 3108 [ + + - + ]: 6026 : Assert(tupleid != NULL || oldtuple != NULL);
347 noah@leadboat.com 3109 : 6026 : ItemPointerSetInvalid(&lockedtid);
555 dean.a.rasheed@gmail 3110 [ + + ]: 6026 : if (oldtuple != NULL)
3111 : : {
347 noah@leadboat.com 3112 [ - + ]: 48 : Assert(!resultRelInfo->ri_needLockTagTuple);
555 dean.a.rasheed@gmail 3113 : 48 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3114 : : false);
3115 : : }
3116 : : else
3117 : : {
347 noah@leadboat.com 3118 [ + + ]: 5978 : if (resultRelInfo->ri_needLockTagTuple)
3119 : : {
3120 : : /*
3121 : : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3122 : : * that don't match mas_whenqual. MERGE on system catalogs is a
3123 : : * minor use case, so don't bother optimizing those.
3124 : : */
3125 : 4007 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3126 : : InplaceUpdateTupleLock);
3127 : 4007 : lockedtid = *tupleid;
3128 : : }
3129 [ - + ]: 5978 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3130 : : tupleid,
3131 : : SnapshotAny,
3132 : : resultRelInfo->ri_oldTupleSlot))
347 noah@leadboat.com 3133 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3134 : : }
3135 : :
3136 : : /*
3137 : : * Test the join condition. If it's satisfied, perform a MATCHED action.
3138 : : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3139 : : *
3140 : : * Note that this join condition will be NULL if there are no NOT MATCHED
3141 : : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3142 : : * need only consider MATCHED actions here.
3143 : : */
525 dean.a.rasheed@gmail 3144 [ + + ]:CBC 6026 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3145 : 5935 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3146 : : else
3147 : 91 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3148 : :
3149 : 6026 : lmerge_matched:
3150 : :
3151 [ + + + + : 10803 : foreach(l, actionStates)
+ + ]
3152 : : {
1258 alvherre@alvh.no-ip. 3153 : 6102 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3154 : 6102 : CmdType commandType = relaction->mas_action->commandType;
3155 : : TM_Result result;
3156 : 6102 : UpdateContext updateCxt = {0};
3157 : :
3158 : : /*
3159 : : * Test condition, if any.
3160 : : *
3161 : : * In the absence of any condition, we perform the action
3162 : : * unconditionally (no need to check separately since ExecQual() will
3163 : : * return true if there are no conditions to evaluate).
3164 : : */
3165 [ + + ]: 6102 : if (!ExecQual(relaction->mas_whenqual, econtext))
3166 : 4741 : continue;
3167 : :
3168 : : /*
3169 : : * Check if the existing target tuple meets the USING checks of
3170 : : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3171 : : * error.
3172 : : *
3173 : : * The WITH CHECK quals for UPDATE RLS policies are applied in
3174 : : * ExecUpdateAct() and hence we need not do anything special to handle
3175 : : * them.
3176 : : *
3177 : : * NOTE: We must do this after WHEN quals are evaluated, so that we
3178 : : * check policies only when they matter.
3179 : : */
761 dean.a.rasheed@gmail 3180 [ + + + + ]: 1361 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3181 : : {
1258 alvherre@alvh.no-ip. 3182 : 45 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3183 : : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3184 : : resultRelInfo,
3185 : : resultRelInfo->ri_oldTupleSlot,
3186 [ + + ]: 45 : context->mtstate->ps.state);
3187 : : }
3188 : :
3189 : : /* Perform stated action */
3190 [ + + + - ]: 1349 : switch (commandType)
3191 : : {
3192 : 1077 : case CMD_UPDATE:
3193 : :
3194 : : /*
3195 : : * Project the output tuple, and use that to update the table.
3196 : : * We don't need to filter out junk attributes, because the
3197 : : * UPDATE action's targetlist doesn't have any.
3198 : : */
3199 : 1077 : newslot = ExecProject(relaction->mas_proj);
3200 : :
538 dean.a.rasheed@gmail 3201 : 1077 : mtstate->mt_merge_action = relaction;
1258 alvherre@alvh.no-ip. 3202 [ + + ]: 1077 : if (!ExecUpdatePrologue(context, resultRelInfo,
3203 : : tupleid, NULL, newslot, &result))
3204 : : {
908 dean.a.rasheed@gmail 3205 [ + + ]: 10 : if (result == TM_Ok)
347 noah@leadboat.com 3206 : 78 : goto out; /* "do nothing" */
3207 : :
908 dean.a.rasheed@gmail 3208 : 7 : break; /* concurrent update/delete */
3209 : : }
3210 : :
3211 : : /* INSTEAD OF ROW UPDATE Triggers */
555 3212 [ + + ]: 1067 : if (resultRelInfo->ri_TrigDesc &&
3213 [ + + ]: 168 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3214 : : {
3215 [ - + ]: 39 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3216 : : oldtuple, newslot))
347 noah@leadboat.com 3217 :UBC 0 : goto out; /* "do nothing" */
3218 : : }
3219 : : else
3220 : : {
3221 : : /* checked ri_needLockTagTuple above */
420 noah@leadboat.com 3222 [ - + ]:CBC 1028 : Assert(oldtuple == NULL);
3223 : :
555 dean.a.rasheed@gmail 3224 : 1028 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3225 : : NULL, newslot, canSetTag,
3226 : : &updateCxt);
3227 : :
3228 : : /*
3229 : : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3230 : : * cross-partition update was done, then there's nothing
3231 : : * else for us to do --- the UPDATE has been turned into a
3232 : : * DELETE and an INSERT, and we must not perform any of
3233 : : * the usual post-update tasks. Also, the RETURNING tuple
3234 : : * (if any) has been projected, so we can just return
3235 : : * that.
3236 : : */
3237 [ + + ]: 1016 : if (updateCxt.crossPartUpdate)
3238 : : {
3239 : 67 : mtstate->mt_merge_updated += 1;
347 noah@leadboat.com 3240 : 67 : rslot = context->cpUpdateReturningSlot;
3241 : 67 : goto out;
3242 : : }
3243 : : }
3244 : :
555 dean.a.rasheed@gmail 3245 [ + + ]: 988 : if (result == TM_Ok)
3246 : : {
1258 alvherre@alvh.no-ip. 3247 : 949 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3248 : : tupleid, NULL, newslot);
3249 : 943 : mtstate->mt_merge_updated += 1;
3250 : : }
3251 : 982 : break;
3252 : :
3253 : 257 : case CMD_DELETE:
538 dean.a.rasheed@gmail 3254 : 257 : mtstate->mt_merge_action = relaction;
1258 alvherre@alvh.no-ip. 3255 [ + + ]: 257 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3256 : : NULL, NULL, &result))
3257 : : {
908 dean.a.rasheed@gmail 3258 [ + + ]: 6 : if (result == TM_Ok)
347 noah@leadboat.com 3259 : 3 : goto out; /* "do nothing" */
3260 : :
908 dean.a.rasheed@gmail 3261 : 3 : break; /* concurrent update/delete */
3262 : : }
3263 : :
3264 : : /* INSTEAD OF ROW DELETE Triggers */
555 3265 [ + + ]: 251 : if (resultRelInfo->ri_TrigDesc &&
3266 [ + + ]: 22 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3267 : : {
3268 [ - + ]: 3 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3269 : : oldtuple))
347 noah@leadboat.com 3270 :UBC 0 : goto out; /* "do nothing" */
3271 : : }
3272 : : else
3273 : : {
3274 : : /* checked ri_needLockTagTuple above */
420 noah@leadboat.com 3275 [ - + ]:CBC 248 : Assert(oldtuple == NULL);
3276 : :
555 dean.a.rasheed@gmail 3277 : 248 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3278 : : false);
3279 : : }
3280 : :
1258 alvherre@alvh.no-ip. 3281 [ + + ]: 251 : if (result == TM_Ok)
3282 : : {
3283 : 242 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3284 : : false);
3285 : 242 : mtstate->mt_merge_deleted += 1;
3286 : : }
3287 : 251 : break;
3288 : :
3289 : 15 : case CMD_NOTHING:
3290 : : /* Doing nothing is always OK */
3291 : 15 : result = TM_Ok;
3292 : 15 : break;
3293 : :
1258 alvherre@alvh.no-ip. 3294 :UBC 0 : default:
525 dean.a.rasheed@gmail 3295 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3296 : : }
3297 : :
1258 alvherre@alvh.no-ip. 3298 [ + + + + :CBC 1258 : switch (result)
- - ]
3299 : : {
3300 : 1200 : case TM_Ok:
3301 : : /* all good; perform final actions */
1024 3302 [ + + + + ]: 1200 : if (canSetTag && commandType != CMD_NOTHING)
1258 3303 : 1174 : (estate->es_processed)++;
3304 : :
3305 : 1200 : break;
3306 : :
3307 : 16 : case TM_SelfModified:
3308 : :
3309 : : /*
3310 : : * The target tuple was already updated or deleted by the
3311 : : * current command, or by a later command in the current
3312 : : * transaction. The former case is explicitly disallowed by
3313 : : * the SQL standard for MERGE, which insists that the MERGE
3314 : : * join condition should not join a target row to more than
3315 : : * one source row.
3316 : : *
3317 : : * The latter case arises if the tuple is modified by a
3318 : : * command in a BEFORE trigger, or perhaps by a command in a
3319 : : * volatile function used in the query. In such situations we
3320 : : * should not ignore the MERGE action, but it is equally
3321 : : * unsafe to proceed. We don't want to discard the original
3322 : : * MERGE action while keeping the triggered actions based on
3323 : : * it; and it would be no better to allow the original MERGE
3324 : : * action while discarding the updates that it triggered. So
3325 : : * throwing an error is the only safe course.
3326 : : */
548 dean.a.rasheed@gmail 3327 [ + + ]: 16 : if (context->tmfd.cmax != estate->es_output_cid)
3328 [ + - ]: 6 : ereport(ERROR,
3329 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3330 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3331 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3332 : :
1258 alvherre@alvh.no-ip. 3333 [ + - ]: 10 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3334 [ + - ]: 10 : ereport(ERROR,
3335 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3336 : : /* translator: %s is a SQL command name */
3337 : : errmsg("%s command cannot affect row a second time",
3338 : : "MERGE"),
3339 : : errhint("Ensure that not more than one source row matches any one target row.")));
3340 : :
3341 : : /* This shouldn't happen */
1258 alvherre@alvh.no-ip. 3342 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3343 : : break;
3344 : :
1258 alvherre@alvh.no-ip. 3345 :CBC 5 : case TM_Deleted:
3346 [ - + ]: 5 : if (IsolationUsesXactSnapshot())
1258 alvherre@alvh.no-ip. 3347 [ # # ]:UBC 0 : ereport(ERROR,
3348 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3349 : : errmsg("could not serialize access due to concurrent delete")));
3350 : :
3351 : : /*
3352 : : * If the tuple was already deleted, set matched to false to
3353 : : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3354 : : */
538 dean.a.rasheed@gmail 3355 :CBC 5 : *matched = false;
347 noah@leadboat.com 3356 : 5 : goto out;
3357 : :
1258 alvherre@alvh.no-ip. 3358 : 37 : case TM_Updated:
3359 : : {
3360 : : bool was_matched;
3361 : : Relation resultRelationDesc;
3362 : : TupleTableSlot *epqslot,
3363 : : *inputslot;
3364 : : LockTupleMode lockmode;
3365 : :
3366 : : /*
3367 : : * The target tuple was concurrently updated by some other
3368 : : * transaction. If we are currently processing a MATCHED
3369 : : * action, use EvalPlanQual() with the new version of the
3370 : : * tuple and recheck the join qual, to detect a change
3371 : : * from the MATCHED to the NOT MATCHED cases. If we are
3372 : : * already processing a NOT MATCHED BY SOURCE action, we
3373 : : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3374 : : * MATCHED).
3375 : : */
525 dean.a.rasheed@gmail 3376 : 37 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
1258 alvherre@alvh.no-ip. 3377 : 37 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3378 : 37 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3379 : :
525 dean.a.rasheed@gmail 3380 [ + - ]: 37 : if (was_matched)
3381 : 37 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3382 : : resultRelInfo->ri_RangeTableIndex);
3383 : : else
525 dean.a.rasheed@gmail 3384 :UBC 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3385 : :
1258 alvherre@alvh.no-ip. 3386 :CBC 37 : result = table_tuple_lock(resultRelationDesc, tupleid,
3387 : : estate->es_snapshot,
3388 : : inputslot, estate->es_output_cid,
3389 : : lockmode, LockWaitBlock,
3390 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3391 : : &context->tmfd);
3392 [ + - + - ]: 37 : switch (result)
3393 : : {
3394 : 36 : case TM_Ok:
3395 : :
3396 : : /*
3397 : : * If the tuple was updated and migrated to
3398 : : * another partition concurrently, the current
3399 : : * MERGE implementation can't follow. There's
3400 : : * probably a better way to handle this case, but
3401 : : * it'd require recognizing the relation to which
3402 : : * the tuple moved, and setting our current
3403 : : * resultRelInfo to that.
3404 : : */
1 dean.a.rasheed@gmail 3405 [ - + ]: 36 : if (ItemPointerIndicatesMovedPartitions(tupleid))
1258 alvherre@alvh.no-ip. 3406 [ # # ]:UBC 0 : ereport(ERROR,
3407 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3408 : : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3409 : :
3410 : : /*
3411 : : * If this was a MATCHED case, use EvalPlanQual()
3412 : : * to recheck the join condition.
3413 : : */
525 dean.a.rasheed@gmail 3414 [ + - ]:CBC 36 : if (was_matched)
3415 : : {
3416 : 36 : epqslot = EvalPlanQual(epqstate,
3417 : : resultRelationDesc,
3418 : : resultRelInfo->ri_RangeTableIndex,
3419 : : inputslot);
3420 : :
3421 : : /*
3422 : : * If the subplan didn't return a tuple, then
3423 : : * we must be dealing with an inner join for
3424 : : * which the join condition no longer matches.
3425 : : * This can only happen if there are no NOT
3426 : : * MATCHED actions, and so there is nothing
3427 : : * more to do.
3428 : : */
3429 [ + - - + ]: 36 : if (TupIsNull(epqslot))
347 noah@leadboat.com 3430 :UBC 0 : goto out;
3431 : :
3432 : : /*
3433 : : * If we got a NULL ctid from the subplan, the
3434 : : * join quals no longer pass and we switch to
3435 : : * the NOT MATCHED BY SOURCE case.
3436 : : */
525 dean.a.rasheed@gmail 3437 :CBC 36 : (void) ExecGetJunkAttribute(epqslot,
3438 : 36 : resultRelInfo->ri_RowIdAttNo,
3439 : : &isNull);
3440 [ + + ]: 36 : if (isNull)
3441 : 2 : *matched = false;
3442 : :
3443 : : /*
3444 : : * Otherwise, recheck the join quals to see if
3445 : : * we need to switch to the NOT MATCHED BY
3446 : : * SOURCE case.
3447 : : */
347 noah@leadboat.com 3448 [ + + ]: 36 : if (resultRelInfo->ri_needLockTagTuple)
3449 : : {
3450 [ + - ]: 1 : if (ItemPointerIsValid(&lockedtid))
3451 : 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3452 : : InplaceUpdateTupleLock);
1 dean.a.rasheed@gmail 3453 : 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3454 : : InplaceUpdateTupleLock);
3455 : 1 : lockedtid = *tupleid;
3456 : : }
3457 : :
525 3458 [ - + ]: 36 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3459 : : tupleid,
3460 : : SnapshotAny,
3461 : : resultRelInfo->ri_oldTupleSlot))
525 dean.a.rasheed@gmail 3462 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3463 : :
525 dean.a.rasheed@gmail 3464 [ + + ]:CBC 36 : if (*matched)
3465 : 34 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3466 : : econtext);
3467 : :
3468 : : /* Switch lists, if necessary */
3469 [ + + ]: 36 : if (!*matched)
3470 : 3 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3471 : : }
3472 : :
3473 : : /*
3474 : : * Loop back and process the MATCHED or NOT
3475 : : * MATCHED BY SOURCE actions from the start.
3476 : : */
1258 alvherre@alvh.no-ip. 3477 : 36 : goto lmerge_matched;
3478 : :
1258 alvherre@alvh.no-ip. 3479 :UBC 0 : case TM_Deleted:
3480 : :
3481 : : /*
3482 : : * tuple already deleted; tell caller to run NOT
3483 : : * MATCHED [BY TARGET] actions
3484 : : */
538 dean.a.rasheed@gmail 3485 : 0 : *matched = false;
347 noah@leadboat.com 3486 : 0 : goto out;
3487 : :
1258 alvherre@alvh.no-ip. 3488 :CBC 1 : case TM_SelfModified:
3489 : :
3490 : : /*
3491 : : * This can be reached when following an update
3492 : : * chain from a tuple updated by another session,
3493 : : * reaching a tuple that was already updated or
3494 : : * deleted by the current command, or by a later
3495 : : * command in the current transaction. As above,
3496 : : * this should always be treated as an error.
3497 : : */
3498 [ - + ]: 1 : if (context->tmfd.cmax != estate->es_output_cid)
1258 alvherre@alvh.no-ip. 3499 [ # # ]:UBC 0 : ereport(ERROR,
3500 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3501 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3502 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3503 : :
548 dean.a.rasheed@gmail 3504 [ + - ]:CBC 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3505 [ + - ]: 1 : ereport(ERROR,
3506 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3507 : : /* translator: %s is a SQL command name */
3508 : : errmsg("%s command cannot affect row a second time",
3509 : : "MERGE"),
3510 : : errhint("Ensure that not more than one source row matches any one target row.")));
3511 : :
3512 : : /* This shouldn't happen */
548 dean.a.rasheed@gmail 3513 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3514 : : goto out;
3515 : :
1258 alvherre@alvh.no-ip. 3516 : 0 : default:
3517 : : /* see table_tuple_lock call in ExecDelete() */
3518 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3519 : : result);
3520 : : goto out;
3521 : : }
3522 : : }
3523 : :
3524 : 0 : case TM_Invisible:
3525 : : case TM_WouldBlock:
3526 : : case TM_BeingModified:
3527 : : /* these should not occur */
3528 [ # # ]: 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3529 : : break;
3530 : : }
3531 : :
3532 : : /* Process RETURNING if present */
538 dean.a.rasheed@gmail 3533 [ + + ]:CBC 1200 : if (resultRelInfo->ri_projectReturning)
3534 : : {
3535 [ + + - - ]: 206 : switch (commandType)
3536 : : {
3537 : 89 : case CMD_UPDATE:
233 3538 : 89 : rslot = ExecProcessReturning(context,
3539 : : resultRelInfo,
3540 : : CMD_UPDATE,
3541 : : resultRelInfo->ri_oldTupleSlot,
3542 : : newslot,
3543 : : context->planSlot);
538 3544 : 89 : break;
3545 : :
3546 : 117 : case CMD_DELETE:
233 3547 : 117 : rslot = ExecProcessReturning(context,
3548 : : resultRelInfo,
3549 : : CMD_DELETE,
3550 : : resultRelInfo->ri_oldTupleSlot,
3551 : : NULL,
3552 : : context->planSlot);
538 3553 : 117 : break;
3554 : :
538 dean.a.rasheed@gmail 3555 :UBC 0 : case CMD_NOTHING:
3556 : 0 : break;
3557 : :
3558 : 0 : default:
3559 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d",
3560 : : (int) commandType);
3561 : : }
3562 : : }
3563 : :
3564 : : /*
3565 : : * We've activated one of the WHEN clauses, so we don't search
3566 : : * further. This is required behaviour, not an optimization.
3567 : : */
1258 alvherre@alvh.no-ip. 3568 :CBC 1200 : break;
3569 : : }
3570 : :
3571 : : /*
3572 : : * Successfully executed an action or no qualifying action was found.
3573 : : */
347 noah@leadboat.com 3574 : 5979 : out:
3575 [ + + ]: 5979 : if (ItemPointerIsValid(&lockedtid))
3576 : 4007 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3577 : : InplaceUpdateTupleLock);
538 dean.a.rasheed@gmail 3578 : 5979 : return rslot;
3579 : : }
3580 : :
3581 : : /*
3582 : : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3583 : : */
3584 : : static TupleTableSlot *
1258 alvherre@alvh.no-ip. 3585 : 1344 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3586 : : bool canSetTag)
3587 : : {
3588 : 1344 : ModifyTableState *mtstate = context->mtstate;
3589 : 1344 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3590 : : List *actionStates;
538 dean.a.rasheed@gmail 3591 : 1344 : TupleTableSlot *rslot = NULL;
3592 : : ListCell *l;
3593 : :
3594 : : /*
3595 : : * For INSERT actions, the root relation's merge action is OK since the
3596 : : * INSERT's targetlist and the WHEN conditions can only refer to the
3597 : : * source relation and hence it does not matter which result relation we
3598 : : * work with.
3599 : : *
3600 : : * XXX does this mean that we can avoid creating copies of actionStates on
3601 : : * partitioned tables, for not-matched actions?
3602 : : */
525 3603 : 1344 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3604 : :
3605 : : /*
3606 : : * Make source tuple available to ExecQual and ExecProject. We don't need
3607 : : * the target tuple, since the WHEN quals and targetlist can't refer to
3608 : : * the target columns.
3609 : : */
1258 alvherre@alvh.no-ip. 3610 : 1344 : econtext->ecxt_scantuple = NULL;
3611 : 1344 : econtext->ecxt_innertuple = context->planSlot;
3612 : 1344 : econtext->ecxt_outertuple = NULL;
3613 : :
3614 [ + - + + : 1779 : foreach(l, actionStates)
+ + ]
3615 : : {
3616 : 1344 : MergeActionState *action = (MergeActionState *) lfirst(l);
3617 : 1344 : CmdType commandType = action->mas_action->commandType;
3618 : : TupleTableSlot *newslot;
3619 : :
3620 : : /*
3621 : : * Test condition, if any.
3622 : : *
3623 : : * In the absence of any condition, we perform the action
3624 : : * unconditionally (no need to check separately since ExecQual() will
3625 : : * return true if there are no conditions to evaluate).
3626 : : */
3627 [ + + ]: 1344 : if (!ExecQual(action->mas_whenqual, econtext))
3628 : 435 : continue;
3629 : :
3630 : : /* Perform stated action */
3631 [ + - - ]: 909 : switch (commandType)
3632 : : {
3633 : 909 : case CMD_INSERT:
3634 : :
3635 : : /*
3636 : : * Project the tuple. In case of a partitioned table, the
3637 : : * projection was already built to use the root's descriptor,
3638 : : * so we don't need to map the tuple here.
3639 : : */
3640 : 909 : newslot = ExecProject(action->mas_proj);
538 dean.a.rasheed@gmail 3641 : 909 : mtstate->mt_merge_action = action;
3642 : :
3643 : 909 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3644 : : newslot, canSetTag, NULL, NULL);
1258 alvherre@alvh.no-ip. 3645 : 879 : mtstate->mt_merge_inserted += 1;
3646 : 879 : break;
1258 alvherre@alvh.no-ip. 3647 :UBC 0 : case CMD_NOTHING:
3648 : : /* Do nothing */
3649 : 0 : break;
3650 : 0 : default:
3651 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3652 : : }
3653 : :
3654 : : /*
3655 : : * We've activated one of the WHEN clauses, so we don't search
3656 : : * further. This is required behaviour, not an optimization.
3657 : : */
1258 alvherre@alvh.no-ip. 3658 :CBC 879 : break;
3659 : : }
3660 : :
538 dean.a.rasheed@gmail 3661 : 1314 : return rslot;
3662 : : }
3663 : :
3664 : : /*
3665 : : * Initialize state for execution of MERGE.
3666 : : */
3667 : : void
1258 alvherre@alvh.no-ip. 3668 : 781 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3669 : : {
201 amitlan@postgresql.o 3670 : 781 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3671 : 781 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
1258 alvherre@alvh.no-ip. 3672 : 781 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3673 : : ResultRelInfo *resultRelInfo;
3674 : : ExprContext *econtext;
3675 : : ListCell *lc;
3676 : : int i;
3677 : :
201 amitlan@postgresql.o 3678 [ - + ]: 781 : if (mergeActionLists == NIL)
1258 alvherre@alvh.no-ip. 3679 :UBC 0 : return;
3680 : :
1258 alvherre@alvh.no-ip. 3681 :CBC 781 : mtstate->mt_merge_subcommands = 0;
3682 : :
3683 [ + + ]: 781 : if (mtstate->ps.ps_ExprContext == NULL)
3684 : 649 : ExecAssignExprContext(estate, &mtstate->ps);
3685 : 781 : econtext = mtstate->ps.ps_ExprContext;
3686 : :
3687 : : /*
3688 : : * Create a MergeActionState for each action on the mergeActionList and
3689 : : * add it to either a list of matched actions or not-matched actions.
3690 : : *
3691 : : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3692 : : * anything here, do so there too.
3693 : : */
3694 : 781 : i = 0;
201 amitlan@postgresql.o 3695 [ + - + + : 1681 : foreach(lc, mergeActionLists)
+ + ]
3696 : : {
1258 alvherre@alvh.no-ip. 3697 : 900 : List *mergeActionList = lfirst(lc);
3698 : : Node *joinCondition;
3699 : : TupleDesc relationDesc;
3700 : : ListCell *l;
3701 : :
201 amitlan@postgresql.o 3702 : 900 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
1258 alvherre@alvh.no-ip. 3703 : 900 : resultRelInfo = mtstate->resultRelInfo + i;
3704 : 900 : i++;
3705 : 900 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3706 : :
3707 : : /* initialize slots for MERGE fetches from this rel */
3708 [ + - ]: 900 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3709 : 900 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3710 : :
3711 : : /* initialize state for join condition checking */
525 dean.a.rasheed@gmail 3712 : 900 : resultRelInfo->ri_MergeJoinCondition =
3713 : 900 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3714 : :
1258 alvherre@alvh.no-ip. 3715 [ + - + + : 2490 : foreach(l, mergeActionList)
+ + ]
3716 : : {
3717 : 1590 : MergeAction *action = (MergeAction *) lfirst(l);
3718 : : MergeActionState *action_state;
3719 : : TupleTableSlot *tgtslot;
3720 : : TupleDesc tgtdesc;
3721 : :
3722 : : /*
3723 : : * Build action merge state for this rel. (For partitions,
3724 : : * equivalent code exists in ExecInitPartitionInfo.)
3725 : : */
3726 : 1590 : action_state = makeNode(MergeActionState);
3727 : 1590 : action_state->mas_action = action;
3728 : 1590 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3729 : : &mtstate->ps);
3730 : :
3731 : : /*
3732 : : * We create three lists - one for each MergeMatchKind - and stick
3733 : : * the MergeActionState into the appropriate list.
3734 : : */
525 dean.a.rasheed@gmail 3735 : 3180 : resultRelInfo->ri_MergeActions[action->matchKind] =
3736 : 1590 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3737 : : action_state);
3738 : :
1258 alvherre@alvh.no-ip. 3739 [ + + + + : 1590 : switch (action->commandType)
- ]
3740 : : {
3741 : 526 : case CMD_INSERT:
3742 : : /* INSERT actions always use rootRelInfo */
3743 : 526 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3744 : : action->targetList);
3745 : :
3746 : : /*
3747 : : * If the MERGE targets a partitioned table, any INSERT
3748 : : * actions must be routed through it, not the child
3749 : : * relations. Initialize the routing struct and the root
3750 : : * table's "new" tuple slot for that, if not already done.
3751 : : * The projection we prepare, for all relations, uses the
3752 : : * root relation descriptor, and targets the plan's root
3753 : : * slot. (This is consistent with the fact that we
3754 : : * checked the plan output to match the root relation,
3755 : : * above.)
3756 : : */
3757 [ + + ]: 526 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3758 : : RELKIND_PARTITIONED_TABLE)
3759 : : {
3760 [ + + ]: 164 : if (mtstate->mt_partition_tuple_routing == NULL)
3761 : : {
3762 : : /*
3763 : : * Initialize planstate for routing if not already
3764 : : * done.
3765 : : *
3766 : : * Note that the slot is managed as a standalone
3767 : : * slot belonging to ModifyTableState, so we pass
3768 : : * NULL for the 2nd argument.
3769 : : */
3770 : 77 : mtstate->mt_root_tuple_slot =
3771 : 77 : table_slot_create(rootRelInfo->ri_RelationDesc,
3772 : : NULL);
3773 : 77 : mtstate->mt_partition_tuple_routing =
3774 : 77 : ExecSetupPartitionTupleRouting(estate,
3775 : : rootRelInfo->ri_RelationDesc);
3776 : : }
3777 : 164 : tgtslot = mtstate->mt_root_tuple_slot;
3778 : 164 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3779 : : }
3780 : : else
3781 : : {
3782 : : /*
3783 : : * If the MERGE targets an inherited table, we insert
3784 : : * into the root table, so we must initialize its
3785 : : * "new" tuple slot, if not already done, and use its
3786 : : * relation descriptor for the projection.
3787 : : *
3788 : : * For non-inherited tables, rootRelInfo and
3789 : : * resultRelInfo are the same, and the "new" tuple
3790 : : * slot will already have been initialized.
3791 : : */
98 dean.a.rasheed@gmail 3792 [ + + ]: 362 : if (rootRelInfo->ri_newTupleSlot == NULL)
3793 : 18 : rootRelInfo->ri_newTupleSlot =
3794 : 18 : table_slot_create(rootRelInfo->ri_RelationDesc,
3795 : : &estate->es_tupleTable);
3796 : :
3797 : 362 : tgtslot = rootRelInfo->ri_newTupleSlot;
3798 : 362 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3799 : : }
3800 : :
1258 alvherre@alvh.no-ip. 3801 : 526 : action_state->mas_proj =
3802 : 526 : ExecBuildProjectionInfo(action->targetList, econtext,
3803 : : tgtslot,
3804 : : &mtstate->ps,
3805 : : tgtdesc);
3806 : :
3807 : 526 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3808 : 526 : break;
3809 : 800 : case CMD_UPDATE:
3810 : 800 : action_state->mas_proj =
3811 : 800 : ExecBuildUpdateProjection(action->targetList,
3812 : : true,
3813 : : action->updateColnos,
3814 : : relationDesc,
3815 : : econtext,
3816 : : resultRelInfo->ri_newTupleSlot,
3817 : : &mtstate->ps);
3818 : 800 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3819 : 800 : break;
3820 : 229 : case CMD_DELETE:
3821 : 229 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3822 : 229 : break;
3823 : 35 : case CMD_NOTHING:
3824 : 35 : break;
1258 alvherre@alvh.no-ip. 3825 :UBC 0 : default:
161 dean.a.rasheed@gmail 3826 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3827 : : break;
3828 : : }
3829 : : }
3830 : : }
3831 : :
3832 : : /*
3833 : : * If the MERGE targets an inherited table, any INSERT actions will use
3834 : : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
3835 : : * Therefore we must initialize its WITH CHECK OPTION constraints and
3836 : : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
3837 : : * entries.
3838 : : *
3839 : : * Note that the planner does not build a withCheckOptionList or
3840 : : * returningList for the root relation, but as in ExecInitPartitionInfo,
3841 : : * we can use the first resultRelInfo entry as a reference to calculate
3842 : : * the attno's for the root table.
3843 : : */
98 dean.a.rasheed@gmail 3844 [ + + ]:CBC 781 : if (rootRelInfo != mtstate->resultRelInfo &&
3845 [ + + ]: 122 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
3846 [ + + ]: 24 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
3847 : : {
3848 : 18 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3849 : 18 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
3850 : 18 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
3851 : 18 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
3852 : 18 : AttrMap *part_attmap = NULL;
3853 : : bool found_whole_row;
3854 : :
3855 [ + + ]: 18 : if (node->withCheckOptionLists != NIL)
3856 : : {
3857 : : List *wcoList;
3858 : 9 : List *wcoExprs = NIL;
3859 : :
3860 : : /* There should be as many WCO lists as result rels */
3861 [ - + ]: 9 : Assert(list_length(node->withCheckOptionLists) ==
3862 : : list_length(node->resultRelations));
3863 : :
3864 : : /*
3865 : : * Use the first WCO list as a reference. In the most common case,
3866 : : * this will be for the same relation as rootRelInfo, and so there
3867 : : * will be no need to adjust its attno's.
3868 : : */
3869 : 9 : wcoList = linitial(node->withCheckOptionLists);
3870 [ + - ]: 9 : if (rootRelation != firstResultRel)
3871 : : {
3872 : : /* Convert any Vars in it to contain the root's attno's */
3873 : : part_attmap =
3874 : 9 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3875 : : RelationGetDescr(firstResultRel),
3876 : : false);
3877 : :
3878 : : wcoList = (List *)
3879 : 9 : map_variable_attnos((Node *) wcoList,
3880 : : firstVarno, 0,
3881 : : part_attmap,
3882 : 9 : RelationGetForm(rootRelation)->reltype,
3883 : : &found_whole_row);
3884 : : }
3885 : :
3886 [ + - + + : 45 : foreach(lc, wcoList)
+ + ]
3887 : : {
3888 : 36 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
3889 : 36 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
3890 : : &mtstate->ps);
3891 : :
3892 : 36 : wcoExprs = lappend(wcoExprs, wcoExpr);
3893 : : }
3894 : :
3895 : 9 : rootRelInfo->ri_WithCheckOptions = wcoList;
3896 : 9 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
3897 : : }
3898 : :
3899 [ + + ]: 18 : if (node->returningLists != NIL)
3900 : : {
3901 : : List *returningList;
3902 : :
3903 : : /* There should be as many returning lists as result rels */
3904 [ - + ]: 3 : Assert(list_length(node->returningLists) ==
3905 : : list_length(node->resultRelations));
3906 : :
3907 : : /*
3908 : : * Use the first returning list as a reference. In the most common
3909 : : * case, this will be for the same relation as rootRelInfo, and so
3910 : : * there will be no need to adjust its attno's.
3911 : : */
3912 : 3 : returningList = linitial(node->returningLists);
3913 [ + - ]: 3 : if (rootRelation != firstResultRel)
3914 : : {
3915 : : /* Convert any Vars in it to contain the root's attno's */
3916 [ - + ]: 3 : if (part_attmap == NULL)
3917 : : part_attmap =
98 dean.a.rasheed@gmail 3918 :UBC 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3919 : : RelationGetDescr(firstResultRel),
3920 : : false);
3921 : :
3922 : : returningList = (List *)
98 dean.a.rasheed@gmail 3923 :CBC 3 : map_variable_attnos((Node *) returningList,
3924 : : firstVarno, 0,
3925 : : part_attmap,
3926 : 3 : RelationGetForm(rootRelation)->reltype,
3927 : : &found_whole_row);
3928 : : }
3929 : 3 : rootRelInfo->ri_returningList = returningList;
3930 : :
3931 : : /* Initialize the RETURNING projection */
3932 : 3 : rootRelInfo->ri_projectReturning =
3933 : 3 : ExecBuildProjectionInfo(returningList, econtext,
3934 : : mtstate->ps.ps_ResultTupleSlot,
3935 : : &mtstate->ps,
3936 : : RelationGetDescr(rootRelation));
3937 : : }
3938 : : }
3939 : : }
3940 : :
3941 : : /*
3942 : : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3943 : : *
3944 : : * We mark 'projectNewInfoValid' even though the projections themselves
3945 : : * are not initialized here.
3946 : : */
3947 : : void
1258 alvherre@alvh.no-ip. 3948 : 912 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3949 : : ResultRelInfo *resultRelInfo)
3950 : : {
3951 : 912 : EState *estate = mtstate->ps.state;
3952 : :
3953 [ - + ]: 912 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3954 : :
3955 : 912 : resultRelInfo->ri_oldTupleSlot =
3956 : 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
3957 : : &estate->es_tupleTable);
3958 : 912 : resultRelInfo->ri_newTupleSlot =
3959 : 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
3960 : : &estate->es_tupleTable);
3961 : 912 : resultRelInfo->ri_projectNewInfoValid = true;
3962 : 912 : }
3963 : :
3964 : : /*
3965 : : * Process BEFORE EACH STATEMENT triggers
3966 : : */
3967 : : static void
5810 tgl@sss.pgh.pa.us 3968 : 54862 : fireBSTriggers(ModifyTableState *node)
3969 : : {
2728 alvherre@alvh.no-ip. 3970 : 54862 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1783 heikki.linnakangas@i 3971 : 54862 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3972 : :
5810 tgl@sss.pgh.pa.us 3973 [ + + + + : 54862 : switch (node->operation)
- ]
3974 : : {
3975 : 41305 : case CMD_INSERT:
3050 rhaas@postgresql.org 3976 : 41305 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
2728 alvherre@alvh.no-ip. 3977 [ + + ]: 41299 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3774 andres@anarazel.de 3978 : 419 : ExecBSUpdateTriggers(node->ps.state,
3979 : : resultRelInfo);
5810 tgl@sss.pgh.pa.us 3980 : 41299 : break;
3981 : 6801 : case CMD_UPDATE:
3050 rhaas@postgresql.org 3982 : 6801 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
5810 tgl@sss.pgh.pa.us 3983 : 6801 : break;
3984 : 6052 : case CMD_DELETE:
3050 rhaas@postgresql.org 3985 : 6052 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
5810 tgl@sss.pgh.pa.us 3986 : 6052 : break;
1258 alvherre@alvh.no-ip. 3987 : 704 : case CMD_MERGE:
3988 [ + + ]: 704 : if (node->mt_merge_subcommands & MERGE_INSERT)
3989 : 388 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3990 [ + + ]: 704 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3991 : 477 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3992 [ + + ]: 704 : if (node->mt_merge_subcommands & MERGE_DELETE)
3993 : 187 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3994 : 704 : break;
5810 tgl@sss.pgh.pa.us 3995 :UBC 0 : default:
3996 [ # # ]: 0 : elog(ERROR, "unknown operation");
3997 : : break;
3998 : : }
5810 tgl@sss.pgh.pa.us 3999 :CBC 54856 : }
4000 : :
4001 : : /*
4002 : : * Process AFTER EACH STATEMENT triggers
4003 : : */
4004 : : static void
2992 rhodiumtoad@postgres 4005 : 53224 : fireASTriggers(ModifyTableState *node)
4006 : : {
2728 alvherre@alvh.no-ip. 4007 : 53224 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1783 heikki.linnakangas@i 4008 : 53224 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4009 : :
5810 tgl@sss.pgh.pa.us 4010 [ + + + + : 53224 : switch (node->operation)
- ]
4011 : : {
4012 : 40155 : case CMD_INSERT:
2728 alvherre@alvh.no-ip. 4013 [ + + ]: 40155 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3774 andres@anarazel.de 4014 : 365 : ExecASUpdateTriggers(node->ps.state,
4015 : : resultRelInfo,
2912 tgl@sss.pgh.pa.us 4016 : 365 : node->mt_oc_transition_capture);
2992 rhodiumtoad@postgres 4017 : 40155 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4018 : 40155 : node->mt_transition_capture);
5810 tgl@sss.pgh.pa.us 4019 : 40155 : break;
4020 : 6445 : case CMD_UPDATE:
2992 rhodiumtoad@postgres 4021 : 6445 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4022 : 6445 : node->mt_transition_capture);
5810 tgl@sss.pgh.pa.us 4023 : 6445 : break;
4024 : 5997 : case CMD_DELETE:
2992 rhodiumtoad@postgres 4025 : 5997 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4026 : 5997 : node->mt_transition_capture);
5810 tgl@sss.pgh.pa.us 4027 : 5997 : break;
1258 alvherre@alvh.no-ip. 4028 : 627 : case CMD_MERGE:
4029 [ + + ]: 627 : if (node->mt_merge_subcommands & MERGE_DELETE)
4030 : 169 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4031 : 169 : node->mt_transition_capture);
4032 [ + + ]: 627 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4033 : 427 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4034 : 427 : node->mt_transition_capture);
4035 [ + + ]: 627 : if (node->mt_merge_subcommands & MERGE_INSERT)
4036 : 354 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4037 : 354 : node->mt_transition_capture);
4038 : 627 : break;
5810 tgl@sss.pgh.pa.us 4039 :UBC 0 : default:
4040 [ # # ]: 0 : elog(ERROR, "unknown operation");
4041 : : break;
4042 : : }
5810 tgl@sss.pgh.pa.us 4043 :CBC 53224 : }
4044 : :
4045 : : /*
4046 : : * Set up the state needed for collecting transition tuples for AFTER
4047 : : * triggers.
4048 : : */
4049 : : static void
2992 rhodiumtoad@postgres 4050 : 55044 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4051 : : {
2728 alvherre@alvh.no-ip. 4052 : 55044 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
1783 heikki.linnakangas@i 4053 : 55044 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4054 : :
4055 : : /* Check for transition tables on the directly targeted relation. */
2992 rhodiumtoad@postgres 4056 : 55044 : mtstate->mt_transition_capture =
2912 tgl@sss.pgh.pa.us 4057 : 55044 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4058 : 55044 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4059 : : mtstate->operation);
2728 alvherre@alvh.no-ip. 4060 [ + + ]: 55044 : if (plan->operation == CMD_INSERT &&
4061 [ + + ]: 41309 : plan->onConflictAction == ONCONFLICT_UPDATE)
2912 tgl@sss.pgh.pa.us 4062 : 422 : mtstate->mt_oc_transition_capture =
4063 : 422 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4064 : 422 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4065 : : CMD_UPDATE);
2787 rhaas@postgresql.org 4066 : 55044 : }
4067 : :
4068 : : /*
4069 : : * ExecPrepareTupleRouting --- prepare for routing one tuple
4070 : : *
4071 : : * Determine the partition in which the tuple in slot is to be inserted,
4072 : : * and return its ResultRelInfo in *partRelInfo. The return value is
4073 : : * a slot holding the tuple of the partition rowtype.
4074 : : *
4075 : : * This also sets the transition table information in mtstate based on the
4076 : : * selected partition.
4077 : : */
4078 : : static TupleTableSlot *
2728 alvherre@alvh.no-ip. 4079 : 369498 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4080 : : EState *estate,
4081 : : PartitionTupleRouting *proute,
4082 : : ResultRelInfo *targetRelInfo,
4083 : : TupleTableSlot *slot,
4084 : : ResultRelInfo **partRelInfo)
4085 : : {
4086 : : ResultRelInfo *partrel;
4087 : : TupleConversionMap *map;
4088 : :
4089 : : /*
4090 : : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4091 : : * not find a valid partition for the tuple in 'slot' then an error is
4092 : : * raised. An error may also be raised if the found partition is not a
4093 : : * valid target for INSERTs. This is required since a partitioned table
4094 : : * UPDATE to another partition becomes a DELETE+INSERT.
4095 : : */
2486 4096 : 369498 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4097 : :
4098 : : /*
4099 : : * If we're capturing transition tuples, we might need to convert from the
4100 : : * partition rowtype to root partitioned table's rowtype. But if there
4101 : : * are no BEFORE triggers on the partition that could change the tuple, we
4102 : : * can just remember the original unconverted tuple to avoid a needless
4103 : : * round trip conversion.
4104 : : */
2728 4105 [ + + ]: 369387 : if (mtstate->mt_transition_capture != NULL)
4106 : : {
4107 : : bool has_before_insert_row_trig;
4108 : :
1783 heikki.linnakangas@i 4109 [ + + ]: 98 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4110 [ + + ]: 21 : partrel->ri_TrigDesc->trig_insert_before_row);
4111 : :
4112 : 77 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4113 [ + + ]: 77 : !has_before_insert_row_trig ? slot : NULL;
4114 : : }
4115 : :
4116 : : /*
4117 : : * Convert the tuple, if necessary.
4118 : : */
1009 alvherre@alvh.no-ip. 4119 : 369387 : map = ExecGetRootToChildMap(partrel, estate);
2531 andres@anarazel.de 4120 [ + + ]: 369387 : if (map != NULL)
4121 : : {
1783 heikki.linnakangas@i 4122 : 34230 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4123 : :
2531 andres@anarazel.de 4124 : 34230 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4125 : : }
4126 : :
1788 heikki.linnakangas@i 4127 : 369387 : *partRelInfo = partrel;
2728 alvherre@alvh.no-ip. 4128 : 369387 : return slot;
4129 : : }
4130 : :
4131 : : /* ----------------------------------------------------------------
4132 : : * ExecModifyTable
4133 : : *
4134 : : * Perform table modifications as required, and return RETURNING results
4135 : : * if needed.
4136 : : * ----------------------------------------------------------------
4137 : : */
4138 : : static TupleTableSlot *
2973 andres@anarazel.de 4139 : 59276 : ExecModifyTable(PlanState *pstate)
4140 : : {
4141 : 59276 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4142 : : ModifyTableContext context;
5671 bruce@momjian.us 4143 : 59276 : EState *estate = node->ps.state;
4144 : 59276 : CmdType operation = node->operation;
4145 : : ResultRelInfo *resultRelInfo;
4146 : : PlanState *subplanstate;
4147 : : TupleTableSlot *slot;
4148 : : TupleTableSlot *oldSlot;
4149 : : ItemPointerData tuple_ctid;
4150 : : HeapTupleData oldtupdata;
4151 : : HeapTuple oldtuple;
4152 : : ItemPointer tupleid;
4153 : : bool tuplock;
4154 : :
2965 andres@anarazel.de 4155 [ - + ]: 59276 : CHECK_FOR_INTERRUPTS();
4156 : :
4157 : : /*
4158 : : * This should NOT get called during EvalPlanQual; we should have passed a
4159 : : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4160 : : * Assert because this condition is easy to miss in testing. (Note:
4161 : : * although ModifyTable should not get executed within an EvalPlanQual
4162 : : * operation, we do have to allow it to be initialized and shut down in
4163 : : * case it is within a CTE subplan. Hence this test must be here, not in
4164 : : * ExecInitModifyTable.)
4165 : : */
2193 4166 [ - + ]: 59276 : if (estate->es_epq_active != NULL)
4970 tgl@sss.pgh.pa.us 4167 [ # # ]:UBC 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4168 : :
4169 : : /*
4170 : : * If we've already completed processing, don't try to do more. We need
4171 : : * this test because ExecPostprocessPlan might call us an extra time, and
4172 : : * our subplan's nodes aren't necessarily robust against being called
4173 : : * extra times.
4174 : : */
5307 tgl@sss.pgh.pa.us 4175 [ + + ]:CBC 59276 : if (node->mt_done)
4176 : 399 : return NULL;
4177 : :
4178 : : /*
4179 : : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4180 : : */
5810 4181 [ + + ]: 58877 : if (node->fireBSTriggers)
4182 : : {
4183 : 54862 : fireBSTriggers(node);
4184 : 54856 : node->fireBSTriggers = false;
4185 : : }
4186 : :
4187 : : /* Preload local variables */
1620 4188 : 58871 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4189 : 58871 : subplanstate = outerPlanState(node);
4190 : :
4191 : : /* Set global context */
1269 alvherre@alvh.no-ip. 4192 : 58871 : context.mtstate = node;
4193 : 58871 : context.epqstate = &node->mt_epqstate;
4194 : 58871 : context.estate = estate;
4195 : :
4196 : : /*
4197 : : * Fetch rows from subplan, and execute the required table modification
4198 : : * for each row.
4199 : : */
4200 : : for (;;)
4201 : : {
4202 : : /*
4203 : : * Reset the per-output-tuple exprcontext. This is needed because
4204 : : * triggers expect to use that context as workspace. It's a bit ugly
4205 : : * to do this below the top level of the plan, however. We might need
4206 : : * to rethink this later.
4207 : : */
5498 tgl@sss.pgh.pa.us 4208 [ + + ]: 7135040 : ResetPerTupleExprContext(estate);
4209 : :
4210 : : /*
4211 : : * Reset per-tuple memory context used for processing on conflict and
4212 : : * returning clauses, to free any expression evaluation storage
4213 : : * allocated in the previous cycle.
4214 : : */
2489 andres@anarazel.de 4215 [ + + ]: 7135040 : if (pstate->ps_ExprContext)
4216 : 176117 : ResetExprContext(pstate->ps_ExprContext);
4217 : :
4218 : : /*
4219 : : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4220 : : * to execute, do so now --- see the comments in ExecMerge().
4221 : : */
525 dean.a.rasheed@gmail 4222 [ + + ]: 7135040 : if (node->mt_merge_pending_not_matched != NULL)
4223 : : {
4224 : 1 : context.planSlot = node->mt_merge_pending_not_matched;
233 4225 : 1 : context.cpDeletedSlot = NULL;
4226 : :
525 4227 : 1 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4228 : 1 : node->canSetTag);
4229 : :
4230 : : /* Clear the pending action */
4231 : 1 : node->mt_merge_pending_not_matched = NULL;
4232 : :
4233 : : /*
4234 : : * If we got a RETURNING result, return it to the caller. We'll
4235 : : * continue the work on next call.
4236 : : */
4237 [ + - ]: 1 : if (slot)
4238 : 1 : return slot;
4239 : :
525 dean.a.rasheed@gmail 4240 :UBC 0 : continue; /* continue with the next tuple */
4241 : : }
4242 : :
4243 : : /* Fetch the next row from subplan */
1235 alvherre@alvh.no-ip. 4244 :CBC 7135039 : context.planSlot = ExecProcNode(subplanstate);
233 dean.a.rasheed@gmail 4245 : 7134830 : context.cpDeletedSlot = NULL;
4246 : :
4247 : : /* No more tuples to process? */
1235 alvherre@alvh.no-ip. 4248 [ + + + + ]: 7134830 : if (TupIsNull(context.planSlot))
4249 : : break;
4250 : :
4251 : : /*
4252 : : * When there are multiple result relations, each tuple contains a
4253 : : * junk column that gives the OID of the rel from which it came.
4254 : : * Extract it and select the correct result relation.
4255 : : */
1620 tgl@sss.pgh.pa.us 4256 [ + + ]: 7081605 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4257 : : {
4258 : : Datum datum;
4259 : : bool isNull;
4260 : : Oid resultoid;
4261 : :
1235 alvherre@alvh.no-ip. 4262 : 2592 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4263 : : &isNull);
1620 tgl@sss.pgh.pa.us 4264 [ + + ]: 2592 : if (isNull)
4265 : : {
4266 : : /*
4267 : : * For commands other than MERGE, any tuples having InvalidOid
4268 : : * for tableoid are errors. For MERGE, we may need to handle
4269 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4270 : : *
4271 : : * Note that we use the node's toplevel resultRelInfo, not any
4272 : : * specific partition's.
4273 : : */
1258 alvherre@alvh.no-ip. 4274 [ + - ]: 254 : if (operation == CMD_MERGE)
4275 : : {
1235 4276 : 254 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4277 : :
538 dean.a.rasheed@gmail 4278 : 254 : slot = ExecMerge(&context, node->resultRelInfo,
4279 : 254 : NULL, NULL, node->canSetTag);
4280 : :
4281 : : /*
4282 : : * If we got a RETURNING result, return it to the caller.
4283 : : * We'll continue the work on next call.
4284 : : */
4285 [ + + ]: 248 : if (slot)
4286 : 19 : return slot;
4287 : :
4288 : 229 : continue; /* continue with the next tuple */
4289 : : }
4290 : :
1620 tgl@sss.pgh.pa.us 4291 [ # # ]:UBC 0 : elog(ERROR, "tableoid is NULL");
4292 : : }
1620 tgl@sss.pgh.pa.us 4293 :CBC 2338 : resultoid = DatumGetObjectId(datum);
4294 : :
4295 : : /* If it's not the same as last time, we need to locate the rel */
4296 [ + + ]: 2338 : if (resultoid != node->mt_lastResultOid)
1614 4297 : 1601 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4298 : : false, true);
4299 : : }
4300 : :
4301 : : /*
4302 : : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4303 : : * here is compute the RETURNING expressions.
4304 : : */
3459 rhaas@postgresql.org 4305 [ + + ]: 7081351 : if (resultRelInfo->ri_usesFdwDirectModify)
4306 : : {
4307 [ - + ]: 347 : Assert(resultRelInfo->ri_projectReturning);
4308 : :
4309 : : /*
4310 : : * A scan slot containing the data that was actually inserted,
4311 : : * updated or deleted has already been made available to
4312 : : * ExecProcessReturning by IterateDirectModify, so no need to
4313 : : * provide it here. The individual old and new slots are not
4314 : : * needed, since direct-modify is disabled if the RETURNING list
4315 : : * refers to OLD/NEW values.
4316 : : */
233 dean.a.rasheed@gmail 4317 [ + - - + ]: 347 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4318 : : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4319 : :
4320 : 347 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4321 : : NULL, NULL, context.planSlot);
4322 : :
3459 rhaas@postgresql.org 4323 : 347 : return slot;
4324 : : }
4325 : :
1235 alvherre@alvh.no-ip. 4326 : 7081004 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4327 : 7081004 : slot = context.planSlot;
4328 : :
2840 tgl@sss.pgh.pa.us 4329 : 7081004 : tupleid = NULL;
4185 noah@leadboat.com 4330 : 7081004 : oldtuple = NULL;
4331 : :
4332 : : /*
4333 : : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4334 : : * to be updated/deleted/merged. For a heap relation, that's a TID;
4335 : : * otherwise we may have a wholerow junk attr that carries the old
4336 : : * tuple in toto. Keep this in step with the part of
4337 : : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4338 : : */
1258 alvherre@alvh.no-ip. 4339 [ + + + + : 7081004 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4340 : : operation == CMD_MERGE)
4341 : : {
4342 : : char relkind;
4343 : : Datum datum;
4344 : : bool isNull;
4345 : :
1620 tgl@sss.pgh.pa.us 4346 : 934580 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4347 [ + + + + ]: 934580 : if (relkind == RELKIND_RELATION ||
4348 [ + + ]: 285 : relkind == RELKIND_MATVIEW ||
4349 : : relkind == RELKIND_PARTITIONED_TABLE)
4350 : : {
4351 : : /* ri_RowIdAttNo refers to a ctid attribute */
4352 [ - + ]: 934298 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4353 : 934298 : datum = ExecGetJunkAttribute(slot,
4354 : 934298 : resultRelInfo->ri_RowIdAttNo,
4355 : : &isNull);
4356 : :
4357 : : /*
4358 : : * For commands other than MERGE, any tuples having a null row
4359 : : * identifier are errors. For MERGE, we may need to handle
4360 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4361 : : *
4362 : : * Note that we use the node's toplevel resultRelInfo, not any
4363 : : * specific partition's.
4364 : : */
4365 [ + + ]: 934298 : if (isNull)
4366 : : {
1258 alvherre@alvh.no-ip. 4367 [ + - ]: 1058 : if (operation == CMD_MERGE)
4368 : : {
1235 4369 : 1058 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4370 : :
538 dean.a.rasheed@gmail 4371 : 1058 : slot = ExecMerge(&context, node->resultRelInfo,
4372 : 1058 : NULL, NULL, node->canSetTag);
4373 : :
4374 : : /*
4375 : : * If we got a RETURNING result, return it to the
4376 : : * caller. We'll continue the work on next call.
4377 : : */
4378 [ + + ]: 1037 : if (slot)
4379 : 60 : return slot;
4380 : :
4381 : 998 : continue; /* continue with the next tuple */
4382 : : }
4383 : :
1620 tgl@sss.pgh.pa.us 4384 [ # # ]:UBC 0 : elog(ERROR, "ctid is NULL");
4385 : : }
4386 : :
1620 tgl@sss.pgh.pa.us 4387 :CBC 933240 : tupleid = (ItemPointer) DatumGetPointer(datum);
4388 : 933240 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4389 : 933240 : tupleid = &tuple_ctid;
4390 : : }
4391 : :
4392 : : /*
4393 : : * Use the wholerow attribute, when available, to reconstruct the
4394 : : * old relation tuple. The old tuple serves one or both of two
4395 : : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4396 : : * provides values for any unchanged columns for the NEW tuple of
4397 : : * an UPDATE, because the subplan does not produce all the columns
4398 : : * of the target table.
4399 : : *
4400 : : * Note that the wholerow attribute does not carry system columns,
4401 : : * so foreign table triggers miss seeing those, except that we
4402 : : * know enough here to set t_tableOid. Quite separately from
4403 : : * this, the FDW may fetch its own junk attrs to identify the row.
4404 : : *
4405 : : * Other relevant relkinds, currently limited to views, always
4406 : : * have a wholerow attribute.
4407 : : */
4408 [ + + ]: 282 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4409 : : {
4410 : 267 : datum = ExecGetJunkAttribute(slot,
4411 : 267 : resultRelInfo->ri_RowIdAttNo,
4412 : : &isNull);
4413 : :
4414 : : /*
4415 : : * For commands other than MERGE, any tuples having a null row
4416 : : * identifier are errors. For MERGE, we may need to handle
4417 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4418 : : *
4419 : : * Note that we use the node's toplevel resultRelInfo, not any
4420 : : * specific partition's.
4421 : : */
4422 [ + + ]: 267 : if (isNull)
4423 : : {
555 dean.a.rasheed@gmail 4424 [ + - ]: 24 : if (operation == CMD_MERGE)
4425 : : {
4426 : 24 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4427 : :
538 4428 : 24 : slot = ExecMerge(&context, node->resultRelInfo,
4429 : 24 : NULL, NULL, node->canSetTag);
4430 : :
4431 : : /*
4432 : : * If we got a RETURNING result, return it to the
4433 : : * caller. We'll continue the work on next call.
4434 : : */
4435 [ + + ]: 21 : if (slot)
4436 : 6 : return slot;
4437 : :
4438 : 15 : continue; /* continue with the next tuple */
4439 : : }
4440 : :
1620 tgl@sss.pgh.pa.us 4441 [ # # ]:UBC 0 : elog(ERROR, "wholerow is NULL");
4442 : : }
4443 : :
1620 tgl@sss.pgh.pa.us 4444 :CBC 243 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4445 : 243 : oldtupdata.t_len =
4446 : 243 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4447 : 243 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4448 : : /* Historically, view triggers see invalid t_tableOid. */
4449 : 243 : oldtupdata.t_tableOid =
4450 [ + + ]: 243 : (relkind == RELKIND_VIEW) ? InvalidOid :
4451 : 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4452 : :
4453 : 243 : oldtuple = &oldtupdata;
4454 : : }
4455 : : else
4456 : : {
4457 : : /* Only foreign tables are allowed to omit a row-ID attr */
4458 [ - + ]: 15 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4459 : : }
4460 : : }
4461 : :
5810 4462 [ + + + + : 7079922 : switch (operation)
- ]
4463 : : {
4464 : 6146424 : case CMD_INSERT:
4465 : : /* Initialize projection info if first time for this table */
1614 4466 [ + + ]: 6146424 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4467 : 40714 : ExecInitInsertProjection(node, resultRelInfo);
1235 alvherre@alvh.no-ip. 4468 : 6146424 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
1269 4469 : 6146424 : slot = ExecInsert(&context, resultRelInfo, slot,
1266 4470 : 6146424 : node->canSetTag, NULL, NULL);
5810 tgl@sss.pgh.pa.us 4471 : 6145371 : break;
4472 : :
4473 : 158779 : case CMD_UPDATE:
347 noah@leadboat.com 4474 : 158779 : tuplock = false;
4475 : :
4476 : : /* Initialize projection info if first time for this table */
1614 tgl@sss.pgh.pa.us 4477 [ + + ]: 158779 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4478 : 6657 : ExecInitUpdateProjection(node, resultRelInfo);
4479 : :
4480 : : /*
4481 : : * Make the new tuple by combining plan's output tuple with
4482 : : * the old tuple being updated.
4483 : : */
1620 4484 : 158779 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4485 [ + + ]: 158779 : if (oldtuple != NULL)
4486 : : {
347 noah@leadboat.com 4487 [ - + ]: 159 : Assert(!resultRelInfo->ri_needLockTagTuple);
4488 : : /* Use the wholerow junk attr as the old tuple. */
1620 tgl@sss.pgh.pa.us 4489 : 159 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4490 : : }
4491 : : else
4492 : : {
4493 : : /* Fetch the most recent version of old tuple. */
4494 : 158620 : Relation relation = resultRelInfo->ri_RelationDesc;
4495 : :
347 noah@leadboat.com 4496 [ + + ]: 158620 : if (resultRelInfo->ri_needLockTagTuple)
4497 : : {
4498 : 12497 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4499 : 12497 : tuplock = true;
4500 : : }
1620 tgl@sss.pgh.pa.us 4501 [ - + ]: 158620 : if (!table_tuple_fetch_row_version(relation, tupleid,
4502 : : SnapshotAny,
4503 : : oldSlot))
1620 tgl@sss.pgh.pa.us 4504 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
4505 : : }
908 dean.a.rasheed@gmail 4506 :CBC 158779 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4507 : : oldSlot);
4508 : :
4509 : : /* Now apply the update. */
1269 alvherre@alvh.no-ip. 4510 : 158779 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
233 dean.a.rasheed@gmail 4511 : 158779 : oldSlot, slot, node->canSetTag);
347 noah@leadboat.com 4512 [ + + ]: 158527 : if (tuplock)
4513 : 12497 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4514 : : InplaceUpdateTupleLock);
5810 tgl@sss.pgh.pa.us 4515 : 158527 : break;
4516 : :
4517 : 768429 : case CMD_DELETE:
1269 alvherre@alvh.no-ip. 4518 : 768429 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
513 akorotkov@postgresql 4519 : 768429 : true, false, node->canSetTag, NULL, NULL, NULL);
5810 tgl@sss.pgh.pa.us 4520 : 768395 : break;
4521 : :
1258 alvherre@alvh.no-ip. 4522 : 6290 : case CMD_MERGE:
555 dean.a.rasheed@gmail 4523 : 6290 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4524 : 6290 : node->canSetTag);
1258 alvherre@alvh.no-ip. 4525 : 6243 : break;
4526 : :
5810 tgl@sss.pgh.pa.us 4527 :UBC 0 : default:
4528 [ # # ]: 0 : elog(ERROR, "unknown operation");
4529 : : break;
4530 : : }
4531 : :
4532 : : /*
4533 : : * If we got a RETURNING result, return it to caller. We'll continue
4534 : : * the work on next call.
4535 : : */
5810 tgl@sss.pgh.pa.us 4536 [ + + ]:CBC 7078536 : if (slot)
4537 : 3594 : return slot;
4538 : : }
4539 : :
4540 : : /*
4541 : : * Insert remaining tuples for batch insert.
4542 : : */
1016 efujita@postgresql.o 4543 [ + + ]: 53225 : if (estate->es_insert_pending_result_relations != NIL)
4544 : 13 : ExecPendingInserts(estate);
4545 : :
4546 : : /*
4547 : : * We're done, but fire AFTER STATEMENT triggers before exiting.
4548 : : */
5810 tgl@sss.pgh.pa.us 4549 : 53224 : fireASTriggers(node);
4550 : :
5307 4551 : 53224 : node->mt_done = true;
4552 : :
5810 4553 : 53224 : return NULL;
4554 : : }
4555 : :
4556 : : /*
4557 : : * ExecLookupResultRelByOid
4558 : : * If the table with given OID is among the result relations to be
4559 : : * updated by the given ModifyTable node, return its ResultRelInfo.
4560 : : *
4561 : : * If not found, return NULL if missing_ok, else raise error.
4562 : : *
4563 : : * If update_cache is true, then upon successful lookup, update the node's
4564 : : * one-element cache. ONLY ExecModifyTable may pass true for this.
4565 : : */
4566 : : ResultRelInfo *
1614 4567 : 5044 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4568 : : bool missing_ok, bool update_cache)
4569 : : {
4570 [ + + ]: 5044 : if (node->mt_resultOidHash)
4571 : : {
4572 : : /* Use the pre-built hash table to locate the rel */
4573 : : MTTargetRelLookup *mtlookup;
4574 : :
4575 : : mtlookup = (MTTargetRelLookup *)
4576 : 562 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4577 [ + - ]: 562 : if (mtlookup)
4578 : : {
4579 [ + + ]: 562 : if (update_cache)
4580 : : {
4581 : 412 : node->mt_lastResultOid = resultoid;
4582 : 412 : node->mt_lastResultIndex = mtlookup->relationIndex;
4583 : : }
4584 : 562 : return node->resultRelInfo + mtlookup->relationIndex;
4585 : : }
4586 : : }
4587 : : else
4588 : : {
4589 : : /* With few target rels, just search the ResultRelInfo array */
4590 [ + + ]: 8040 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4591 : : {
4592 : 4849 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4593 : :
4594 [ + + ]: 4849 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4595 : : {
4596 [ + + ]: 1291 : if (update_cache)
4597 : : {
4598 : 1189 : node->mt_lastResultOid = resultoid;
4599 : 1189 : node->mt_lastResultIndex = ndx;
4600 : : }
4601 : 1291 : return rInfo;
4602 : : }
4603 : : }
4604 : : }
4605 : :
4606 [ - + ]: 3191 : if (!missing_ok)
1614 tgl@sss.pgh.pa.us 4607 [ # # ]:UBC 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
1614 tgl@sss.pgh.pa.us 4608 :CBC 3191 : return NULL;
4609 : : }
4610 : :
4611 : : /* ----------------------------------------------------------------
4612 : : * ExecInitModifyTable
4613 : : * ----------------------------------------------------------------
4614 : : */
4615 : : ModifyTableState *
5810 4616 : 55561 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4617 : : {
4618 : : ModifyTableState *mtstate;
1620 4619 : 55561 : Plan *subplan = outerPlan(node);
5810 4620 : 55561 : CmdType operation = node->operation;
171 amitlan@postgresql.o 4621 : 55561 : int total_nrels = list_length(node->resultRelations);
4622 : : int nrels;
211 4623 : 55561 : List *resultRelations = NIL;
4624 : 55561 : List *withCheckOptionLists = NIL;
4625 : 55561 : List *returningLists = NIL;
4626 : 55561 : List *updateColnosLists = NIL;
201 4627 : 55561 : List *mergeActionLists = NIL;
4628 : 55561 : List *mergeJoinConditions = NIL;
4629 : : ResultRelInfo *resultRelInfo;
4630 : : List *arowmarks;
4631 : : ListCell *l;
4632 : : int i;
4633 : : Relation rel;
4634 : :
4635 : : /* check for unsupported flags */
5810 tgl@sss.pgh.pa.us 4636 [ - + ]: 55561 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4637 : :
4638 : : /*
4639 : : * Only consider unpruned relations for initializing their ResultRelInfo
4640 : : * struct and other fields such as withCheckOptions, etc.
4641 : : *
4642 : : * Note: We must avoid pruning every result relation. This is important
4643 : : * for MERGE, since even if every result relation is pruned from the
4644 : : * subplan, there might still be NOT MATCHED rows, for which there may be
4645 : : * INSERT actions to perform. To allow these actions to be found, at
4646 : : * least one result relation must be kept. Also, when inserting into a
4647 : : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4648 : : * as a reference for building the ResultRelInfo of the target partition.
4649 : : * In either case, it doesn't matter which result relation is kept, so we
4650 : : * just keep the first one, if all others have been pruned. See also,
4651 : : * ExecDoInitialPruning(), which ensures that this first result relation
4652 : : * has been locked.
4653 : : */
211 amitlan@postgresql.o 4654 : 55561 : i = 0;
4655 [ + - + + : 112384 : foreach(l, node->resultRelations)
+ + ]
4656 : : {
4657 : 56823 : Index rti = lfirst_int(l);
4658 : : bool keep_rel;
4659 : :
171 4660 : 56823 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4661 [ + + + + : 56823 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
+ + ]
4662 : : {
4663 : : /* all result relations pruned; keep the first one */
4664 : 24 : keep_rel = true;
4665 : 24 : rti = linitial_int(node->resultRelations);
4666 : 24 : i = 0;
4667 : : }
4668 : :
4669 [ + + ]: 56823 : if (keep_rel)
4670 : : {
211 4671 : 56781 : resultRelations = lappend_int(resultRelations, rti);
4672 [ + + ]: 56781 : if (node->withCheckOptionLists)
4673 : : {
4674 : 715 : List *withCheckOptions = list_nth_node(List,
4675 : : node->withCheckOptionLists,
4676 : : i);
4677 : :
4678 : 715 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4679 : : }
4680 [ + + ]: 56781 : if (node->returningLists)
4681 : : {
4682 : 2510 : List *returningList = list_nth_node(List,
4683 : : node->returningLists,
4684 : : i);
4685 : :
4686 : 2510 : returningLists = lappend(returningLists, returningList);
4687 : : }
4688 [ + + ]: 56781 : if (node->updateColnosLists)
4689 : : {
4690 : 8017 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4691 : :
4692 : 8017 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4693 : : }
201 4694 [ + + ]: 56781 : if (node->mergeActionLists)
4695 : : {
4696 : 906 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4697 : :
4698 : 906 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4699 : : }
4700 [ + + ]: 56781 : if (node->mergeJoinConditions)
4701 : : {
4702 : 906 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4703 : :
4704 : 906 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4705 : : }
4706 : : }
211 4707 : 56823 : i++;
4708 : : }
4709 : 55561 : nrels = list_length(resultRelations);
171 4710 [ - + ]: 55561 : Assert(nrels > 0);
4711 : :
4712 : : /*
4713 : : * create state structure
4714 : : */
5810 tgl@sss.pgh.pa.us 4715 : 55561 : mtstate = makeNode(ModifyTableState);
4716 : 55561 : mtstate->ps.plan = (Plan *) node;
4717 : 55561 : mtstate->ps.state = estate;
2973 andres@anarazel.de 4718 : 55561 : mtstate->ps.ExecProcNode = ExecModifyTable;
4719 : :
5307 tgl@sss.pgh.pa.us 4720 : 55561 : mtstate->operation = operation;
4721 : 55561 : mtstate->canSetTag = node->canSetTag;
4722 : 55561 : mtstate->mt_done = false;
4723 : :
1620 4724 : 55561 : mtstate->mt_nrels = nrels;
1789 heikki.linnakangas@i 4725 : 55561 : mtstate->resultRelInfo = (ResultRelInfo *)
1620 tgl@sss.pgh.pa.us 4726 : 55561 : palloc(nrels * sizeof(ResultRelInfo));
4727 : :
525 dean.a.rasheed@gmail 4728 : 55561 : mtstate->mt_merge_pending_not_matched = NULL;
1258 alvherre@alvh.no-ip. 4729 : 55561 : mtstate->mt_merge_inserted = 0;
4730 : 55561 : mtstate->mt_merge_updated = 0;
4731 : 55561 : mtstate->mt_merge_deleted = 0;
211 amitlan@postgresql.o 4732 : 55561 : mtstate->mt_updateColnosLists = updateColnosLists;
201 4733 : 55561 : mtstate->mt_mergeActionLists = mergeActionLists;
4734 : 55561 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4735 : :
4736 : : /*----------
4737 : : * Resolve the target relation. This is the same as:
4738 : : *
4739 : : * - the relation for which we will fire FOR STATEMENT triggers,
4740 : : * - the relation into whose tuple format all captured transition tuples
4741 : : * must be converted, and
4742 : : * - the root partitioned table used for tuple routing.
4743 : : *
4744 : : * If it's a partitioned or inherited table, the root partition or
4745 : : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4746 : : * given explicitly in node->rootRelation. Otherwise, the target relation
4747 : : * is the sole relation in the node->resultRelations list and, since it can
4748 : : * never be pruned, also in the resultRelations list constructed above.
4749 : : *----------
4750 : : */
1789 heikki.linnakangas@i 4751 [ + + ]: 55561 : if (node->rootRelation > 0)
4752 : : {
211 amitlan@postgresql.o 4753 [ - + ]: 1445 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
1789 heikki.linnakangas@i 4754 : 1445 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4755 : 1445 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4756 : : node->rootRelation);
4757 : : }
4758 : : else
4759 : : {
683 tgl@sss.pgh.pa.us 4760 [ - + ]: 54116 : Assert(list_length(node->resultRelations) == 1);
201 amitlan@postgresql.o 4761 [ - + ]: 54116 : Assert(list_length(resultRelations) == 1);
1783 heikki.linnakangas@i 4762 : 54116 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4763 : 54116 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
201 amitlan@postgresql.o 4764 : 54116 : linitial_int(resultRelations));
4765 : : }
4766 : :
4767 : : /* set up epqstate with dummy subplan data for the moment */
841 tgl@sss.pgh.pa.us 4768 : 55561 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4769 : : node->epqParam, resultRelations);
5810 4770 : 55561 : mtstate->fireBSTriggers = true;
4771 : :
4772 : : /*
4773 : : * Build state for collecting transition tuples. This requires having a
4774 : : * valid trigger query context, so skip it in explain-only mode.
4775 : : */
1783 heikki.linnakangas@i 4776 [ + + ]: 55561 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4777 : 55044 : ExecSetupTransitionCaptureState(mtstate, estate);
4778 : :
4779 : : /*
4780 : : * Open all the result relations and initialize the ResultRelInfo structs.
4781 : : * (But root relation was initialized above, if it's part of the array.)
4782 : : * We must do this before initializing the subplan, because direct-modify
4783 : : * FDWs expect their ResultRelInfos to be available.
4784 : : */
5307 tgl@sss.pgh.pa.us 4785 : 55561 : resultRelInfo = mtstate->resultRelInfo;
5810 4786 : 55561 : i = 0;
211 amitlan@postgresql.o 4787 [ + - + + : 112171 : foreach(l, resultRelations)
+ + ]
4788 : : {
1789 heikki.linnakangas@i 4789 : 56778 : Index resultRelation = lfirst_int(l);
555 dean.a.rasheed@gmail 4790 : 56778 : List *mergeActions = NIL;
4791 : :
201 amitlan@postgresql.o 4792 [ + + ]: 56778 : if (mergeActionLists)
4793 : 906 : mergeActions = list_nth(mergeActionLists, i);
4794 : :
1783 heikki.linnakangas@i 4795 [ + + ]: 56778 : if (resultRelInfo != mtstate->rootResultRelInfo)
4796 : : {
4797 : 2662 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4798 : :
4799 : : /*
4800 : : * For child result relations, store the root result relation
4801 : : * pointer. We do so for the convenience of places that want to
4802 : : * look at the query's original target relation but don't have the
4803 : : * mtstate handy.
4804 : : */
1614 tgl@sss.pgh.pa.us 4805 : 2662 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4806 : : }
4807 : :
4808 : : /* Initialize the usesFdwDirectModify flag */
1269 alvherre@alvh.no-ip. 4809 : 56778 : resultRelInfo->ri_usesFdwDirectModify =
4810 : 56778 : bms_is_member(i, node->fdwDirectModifyPlans);
4811 : :
4812 : : /*
4813 : : * Verify result relation is a valid target for the current operation
4814 : : */
2 dean.a.rasheed@gmail 4815 : 56778 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
4816 : : mergeActions);
4817 : :
1620 tgl@sss.pgh.pa.us 4818 : 56610 : resultRelInfo++;
4819 : 56610 : i++;
4820 : : }
4821 : :
4822 : : /*
4823 : : * Now we may initialize the subplan.
4824 : : */
4825 : 55393 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4826 : :
4827 : : /*
4828 : : * Do additional per-result-relation initialization.
4829 : : */
4830 [ + + ]: 111986 : for (i = 0; i < nrels; i++)
4831 : : {
4832 : 56593 : resultRelInfo = &mtstate->resultRelInfo[i];
4833 : :
4834 : : /* Let FDWs init themselves for foreign-table result rels */
3459 rhaas@postgresql.org 4835 [ + + ]: 56593 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4836 [ + + ]: 56489 : resultRelInfo->ri_FdwRoutine != NULL &&
4563 tgl@sss.pgh.pa.us 4837 [ + - ]: 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4838 : : {
4839 : 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4840 : :
4841 : 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4842 : : resultRelInfo,
4843 : : fdw_private,
4844 : : i,
4845 : : eflags);
4846 : : }
4847 : :
4848 : : /*
4849 : : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4850 : : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4851 : : * tables, the FDW might have created additional junk attr(s), but
4852 : : * those are no concern of ours.
4853 : : */
1258 alvherre@alvh.no-ip. 4854 [ + + + + : 56593 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4855 : : operation == CMD_MERGE)
4856 : : {
4857 : : char relkind;
4858 : :
1614 tgl@sss.pgh.pa.us 4859 : 15151 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4860 [ + + + + ]: 15151 : if (relkind == RELKIND_RELATION ||
4861 [ + + ]: 348 : relkind == RELKIND_MATVIEW ||
4862 : : relkind == RELKIND_PARTITIONED_TABLE)
4863 : : {
4864 : 14821 : resultRelInfo->ri_RowIdAttNo =
4865 : 14821 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4866 [ - + ]: 14821 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1614 tgl@sss.pgh.pa.us 4867 [ # # ]:UBC 0 : elog(ERROR, "could not find junk ctid column");
4868 : : }
1614 tgl@sss.pgh.pa.us 4869 [ + + ]:CBC 330 : else if (relkind == RELKIND_FOREIGN_TABLE)
4870 : : {
4871 : : /*
4872 : : * We don't support MERGE with foreign tables for now. (It's
4873 : : * problematic because the implementation uses CTID.)
4874 : : */
1258 alvherre@alvh.no-ip. 4875 [ - + ]: 186 : Assert(operation != CMD_MERGE);
4876 : :
4877 : : /*
4878 : : * When there is a row-level trigger, there should be a
4879 : : * wholerow attribute. We also require it to be present in
4880 : : * UPDATE and MERGE, so we can get the values of unchanged
4881 : : * columns.
4882 : : */
1614 tgl@sss.pgh.pa.us 4883 : 186 : resultRelInfo->ri_RowIdAttNo =
4884 : 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4885 : : "wholerow");
1258 alvherre@alvh.no-ip. 4886 [ + + - + ]: 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
1614 tgl@sss.pgh.pa.us 4887 [ - + ]: 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1614 tgl@sss.pgh.pa.us 4888 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4889 : : }
4890 : : else
4891 : : {
4892 : : /* Other valid target relkinds must provide wholerow */
1614 tgl@sss.pgh.pa.us 4893 :CBC 144 : resultRelInfo->ri_RowIdAttNo =
4894 : 144 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4895 : : "wholerow");
4896 [ - + ]: 144 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1614 tgl@sss.pgh.pa.us 4897 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4898 : : }
4899 : : }
4900 : : }
4901 : :
4902 : : /*
4903 : : * If this is an inherited update/delete/merge, there will be a junk
4904 : : * attribute named "tableoid" present in the subplan's targetlist. It
4905 : : * will be used to identify the result relation for a given tuple to be
4906 : : * updated/deleted/merged.
4907 : : */
1614 tgl@sss.pgh.pa.us 4908 :CBC 55393 : mtstate->mt_resultOidAttno =
4909 : 55393 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
171 amitlan@postgresql.o 4910 [ + + - + ]: 55393 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
1614 tgl@sss.pgh.pa.us 4911 : 55393 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4912 : 55393 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4913 : :
4914 : : /* Get the root target relation */
1783 heikki.linnakangas@i 4915 : 55393 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4916 : :
4917 : : /*
4918 : : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4919 : : * or MERGE might need this too, but only if it actually moves tuples
4920 : : * between partitions; in that case setup is done by
4921 : : * ExecCrossPartitionUpdate.
4922 : : */
2787 rhaas@postgresql.org 4923 [ + + + + ]: 55393 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4924 : : operation == CMD_INSERT)
2753 4925 : 1985 : mtstate->mt_partition_tuple_routing =
1614 tgl@sss.pgh.pa.us 4926 : 1985 : ExecSetupPartitionTupleRouting(estate, rel);
4927 : :
4928 : : /*
4929 : : * Initialize any WITH CHECK OPTION constraints if needed.
4930 : : */
4433 sfrost@snowman.net 4931 : 55393 : resultRelInfo = mtstate->resultRelInfo;
211 amitlan@postgresql.o 4932 [ + + + + : 56108 : foreach(l, withCheckOptionLists)
+ + ]
4933 : : {
4433 sfrost@snowman.net 4934 : 715 : List *wcoList = (List *) lfirst(l);
4935 : 715 : List *wcoExprs = NIL;
4936 : : ListCell *ll;
4937 : :
4938 [ + - + + : 1957 : foreach(ll, wcoList)
+ + ]
4939 : : {
4940 : 1242 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
3098 andres@anarazel.de 4941 : 1242 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4942 : : &mtstate->ps);
4943 : :
4433 sfrost@snowman.net 4944 : 1242 : wcoExprs = lappend(wcoExprs, wcoExpr);
4945 : : }
4946 : :
4947 : 715 : resultRelInfo->ri_WithCheckOptions = wcoList;
4948 : 715 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4949 : 715 : resultRelInfo++;
4950 : : }
4951 : :
4952 : : /*
4953 : : * Initialize RETURNING projections if needed.
4954 : : */
211 amitlan@postgresql.o 4955 [ + + ]: 55393 : if (returningLists)
4956 : : {
4957 : : TupleTableSlot *slot;
4958 : : ExprContext *econtext;
4959 : :
4960 : : /*
4961 : : * Initialize result tuple slot and assign its rowtype using the plan
4962 : : * node's declared targetlist, which the planner set up to be the same
4963 : : * as the first (before runtime pruning) RETURNING list. We assume
4964 : : * all the result rels will produce compatible output.
4965 : : */
2487 andres@anarazel.de 4966 : 2337 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5810 tgl@sss.pgh.pa.us 4967 : 2337 : slot = mtstate->ps.ps_ResultTupleSlot;
4968 : :
4969 : : /* Need an econtext too */
3098 andres@anarazel.de 4970 [ + - ]: 2337 : if (mtstate->ps.ps_ExprContext == NULL)
4971 : 2337 : ExecAssignExprContext(estate, &mtstate->ps);
4972 : 2337 : econtext = mtstate->ps.ps_ExprContext;
4973 : :
4974 : : /*
4975 : : * Build a projection for each result rel.
4976 : : */
5307 tgl@sss.pgh.pa.us 4977 : 2337 : resultRelInfo = mtstate->resultRelInfo;
211 amitlan@postgresql.o 4978 [ + - + + : 4847 : foreach(l, returningLists)
+ + ]
4979 : : {
5810 tgl@sss.pgh.pa.us 4980 : 2510 : List *rlist = (List *) lfirst(l);
4981 : :
2710 rhaas@postgresql.org 4982 : 2510 : resultRelInfo->ri_returningList = rlist;
5810 tgl@sss.pgh.pa.us 4983 : 2510 : resultRelInfo->ri_projectReturning =
3098 andres@anarazel.de 4984 : 2510 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
2999 tgl@sss.pgh.pa.us 4985 : 2510 : resultRelInfo->ri_RelationDesc->rd_att);
5810 4986 : 2510 : resultRelInfo++;
4987 : : }
4988 : : }
4989 : : else
4990 : : {
4991 : : /*
4992 : : * We still must construct a dummy result tuple type, because InitPlan
4993 : : * expects one (maybe should change that?).
4994 : : */
2493 andres@anarazel.de 4995 : 53056 : ExecInitResultTypeTL(&mtstate->ps);
4996 : :
5810 tgl@sss.pgh.pa.us 4997 : 53056 : mtstate->ps.ps_ExprContext = NULL;
4998 : : }
4999 : :
5000 : : /* Set the list of arbiter indexes if needed for ON CONFLICT */
2721 alvherre@alvh.no-ip. 5001 : 55393 : resultRelInfo = mtstate->resultRelInfo;
5002 [ + + ]: 55393 : if (node->onConflictAction != ONCONFLICT_NONE)
5003 : : {
5004 : : /* insert may only have one relation, inheritance is not expanded */
171 amitlan@postgresql.o 5005 [ - + ]: 686 : Assert(total_nrels == 1);
2721 alvherre@alvh.no-ip. 5006 : 686 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5007 : : }
5008 : :
5009 : : /*
5010 : : * If needed, Initialize target list, projection and qual for ON CONFLICT
5011 : : * DO UPDATE.
5012 : : */
3774 andres@anarazel.de 5013 [ + + ]: 55393 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5014 : : {
1580 tgl@sss.pgh.pa.us 5015 : 458 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
5016 : : ExprContext *econtext;
5017 : : TupleDesc relationDesc;
5018 : :
5019 : : /* already exists if created by RETURNING processing above */
3774 andres@anarazel.de 5020 [ + + ]: 458 : if (mtstate->ps.ps_ExprContext == NULL)
5021 : 319 : ExecAssignExprContext(estate, &mtstate->ps);
5022 : :
5023 : 458 : econtext = mtstate->ps.ps_ExprContext;
2759 5024 : 458 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5025 : :
5026 : : /* create state for DO UPDATE SET operation */
1580 tgl@sss.pgh.pa.us 5027 : 458 : resultRelInfo->ri_onConflict = onconfl;
5028 : :
5029 : : /* initialize slot for the existing tuple */
5030 : 458 : onconfl->oc_Existing =
2371 andres@anarazel.de 5031 : 458 : table_slot_create(resultRelInfo->ri_RelationDesc,
5032 : 458 : &mtstate->ps.state->es_tupleTable);
5033 : :
5034 : : /*
5035 : : * Create the tuple slot for the UPDATE SET projection. We want a slot
5036 : : * of the table's type here, because the slot will be used to insert
5037 : : * into the table, and for RETURNING processing - which may access
5038 : : * system attributes.
5039 : : */
1580 tgl@sss.pgh.pa.us 5040 : 458 : onconfl->oc_ProjSlot =
5041 : 458 : table_slot_create(resultRelInfo->ri_RelationDesc,
5042 : 458 : &mtstate->ps.state->es_tupleTable);
5043 : :
5044 : : /* build UPDATE SET projection state */
5045 : 458 : onconfl->oc_ProjInfo =
5046 : 458 : ExecBuildUpdateProjection(node->onConflictSet,
5047 : : true,
5048 : : node->onConflictCols,
5049 : : relationDesc,
5050 : : econtext,
5051 : : onconfl->oc_ProjSlot,
5052 : : &mtstate->ps);
5053 : :
5054 : : /* initialize state to evaluate the WHERE clause, if any */
3774 andres@anarazel.de 5055 [ + + ]: 458 : if (node->onConflictWhere)
5056 : : {
5057 : : ExprState *qualexpr;
5058 : :
3098 5059 : 88 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5060 : : &mtstate->ps);
1580 tgl@sss.pgh.pa.us 5061 : 88 : onconfl->oc_WhereClause = qualexpr;
5062 : : }
5063 : : }
5064 : :
5065 : : /*
5066 : : * If we have any secondary relations in an UPDATE or DELETE, they need to
5067 : : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5068 : : * EvalPlanQual mechanism needs to be told about them. This also goes for
5069 : : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5070 : : */
1620 5071 : 55393 : arowmarks = NIL;
5794 5072 [ + + + + : 56804 : foreach(l, node->rowMarks)
+ + ]
5073 : : {
3071 5074 : 1411 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5075 : : ExecRowMark *erm;
5076 : : ExecAuxRowMark *aerm;
5077 : :
5078 : : /*
5079 : : * Ignore "parent" rowmarks, because they are irrelevant at runtime.
5080 : : * Also ignore the rowmarks belonging to child tables that have been
5081 : : * pruned in ExecDoInitialPruning().
5082 : : */
211 amitlan@postgresql.o 5083 [ + + ]: 1411 : if (rc->isParent ||
5084 [ + + ]: 1340 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5794 tgl@sss.pgh.pa.us 5085 : 298 : continue;
5086 : :
5087 : : /* Find ExecRowMark and build ExecAuxRowMark */
3770 5088 : 1113 : erm = ExecFindRowMark(estate, rc->rti, false);
1620 5089 : 1113 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5090 : 1113 : arowmarks = lappend(arowmarks, aerm);
5091 : : }
5092 : :
5093 : : /* For a MERGE command, initialize its state */
1258 alvherre@alvh.no-ip. 5094 [ + + ]: 55393 : if (mtstate->operation == CMD_MERGE)
5095 : 781 : ExecInitMerge(mtstate, estate);
5096 : :
1620 tgl@sss.pgh.pa.us 5097 : 55393 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5098 : :
5099 : : /*
5100 : : * If there are a lot of result relations, use a hash table to speed the
5101 : : * lookups. If there are not a lot, a simple linear search is faster.
5102 : : *
5103 : : * It's not clear where the threshold is, but try 64 for starters. In a
5104 : : * debugging build, use a small threshold so that we get some test
5105 : : * coverage of both code paths.
5106 : : */
5107 : : #ifdef USE_ASSERT_CHECKING
5108 : : #define MT_NRELS_HASH 4
5109 : : #else
5110 : : #define MT_NRELS_HASH 64
5111 : : #endif
5112 [ + + ]: 55393 : if (nrels >= MT_NRELS_HASH)
5113 : : {
5114 : : HASHCTL hash_ctl;
5115 : :
5116 : 167 : hash_ctl.keysize = sizeof(Oid);
5117 : 167 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5118 : 167 : hash_ctl.hcxt = CurrentMemoryContext;
5119 : 167 : mtstate->mt_resultOidHash =
5120 : 167 : hash_create("ModifyTable target hash",
5121 : : nrels, &hash_ctl,
5122 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5123 [ + + ]: 934 : for (i = 0; i < nrels; i++)
5124 : : {
5125 : : Oid hashkey;
5126 : : MTTargetRelLookup *mtlookup;
5127 : : bool found;
5128 : :
5129 : 767 : resultRelInfo = &mtstate->resultRelInfo[i];
5130 : 767 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5131 : : mtlookup = (MTTargetRelLookup *)
5132 : 767 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5133 : : HASH_ENTER, &found);
5134 [ - + ]: 767 : Assert(!found);
5135 : 767 : mtlookup->relationIndex = i;
5136 : : }
5137 : : }
5138 : : else
5139 : 55226 : mtstate->mt_resultOidHash = NULL;
5140 : :
5141 : : /*
5142 : : * Determine if the FDW supports batch insert and determine the batch size
5143 : : * (a FDW may support batching, but it may be disabled for the
5144 : : * server/table).
5145 : : *
5146 : : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5147 : : * remains set to 0.
5148 : : */
1689 tomas.vondra@postgre 5149 [ + + ]: 55393 : if (operation == CMD_INSERT)
5150 : : {
5151 : : /* insert may only have one relation, inheritance is not expanded */
171 amitlan@postgresql.o 5152 [ - + ]: 41442 : Assert(total_nrels == 1);
1689 tomas.vondra@postgre 5153 : 41442 : resultRelInfo = mtstate->resultRelInfo;
1614 tgl@sss.pgh.pa.us 5154 [ + - ]: 41442 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5155 [ + + ]: 41442 : resultRelInfo->ri_FdwRoutine != NULL &&
5156 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5157 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5158 : : {
5159 : 88 : resultRelInfo->ri_BatchSize =
5160 : 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
1689 tomas.vondra@postgre 5161 [ - + ]: 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5162 : : }
5163 : : else
1614 tgl@sss.pgh.pa.us 5164 : 41354 : resultRelInfo->ri_BatchSize = 1;
5165 : : }
5166 : :
5167 : : /*
5168 : : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5169 : : * to estate->es_auxmodifytables so that it will be run to completion by
5170 : : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5171 : : * ModifyTable node too, but there's no need.) Note the use of lcons not
5172 : : * lappend: we need later-initialized ModifyTable nodes to be shut down
5173 : : * before earlier ones. This ensures that we don't throw away RETURNING
5174 : : * rows that need to be seen by a later CTE subplan.
5175 : : */
5307 5176 [ + + ]: 55393 : if (!mtstate->canSetTag)
5177 : 475 : estate->es_auxmodifytables = lcons(mtstate,
5178 : : estate->es_auxmodifytables);
5179 : :
5810 5180 : 55393 : return mtstate;
5181 : : }
5182 : :
5183 : : /* ----------------------------------------------------------------
5184 : : * ExecEndModifyTable
5185 : : *
5186 : : * Shuts down the plan.
5187 : : *
5188 : : * Returns nothing of interest.
5189 : : * ----------------------------------------------------------------
5190 : : */
5191 : : void
5192 : 53205 : ExecEndModifyTable(ModifyTableState *node)
5193 : : {
5194 : : int i;
5195 : :
5196 : : /*
5197 : : * Allow any FDWs to shut down
5198 : : */
1620 5199 [ + + ]: 107455 : for (i = 0; i < node->mt_nrels; i++)
5200 : : {
5201 : : int j;
4563 5202 : 54250 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5203 : :
3459 rhaas@postgresql.org 5204 [ + + ]: 54250 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5205 [ + + ]: 54154 : resultRelInfo->ri_FdwRoutine != NULL &&
4563 tgl@sss.pgh.pa.us 5206 [ + - ]: 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5207 : 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5208 : : resultRelInfo);
5209 : :
5210 : : /*
5211 : : * Cleanup the initialized batch slots. This only matters for FDWs
5212 : : * with batching, but the other cases will have ri_NumSlotsInitialized
5213 : : * == 0.
5214 : : */
1548 tomas.vondra@postgre 5215 [ + + ]: 54278 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5216 : : {
5217 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5218 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5219 : : }
5220 : : }
5221 : :
5222 : : /*
5223 : : * Close all the partitioned tables, leaf partitions, and their indices
5224 : : * and release the slot used for tuple routing, if set.
5225 : : */
2802 rhaas@postgresql.org 5226 [ + + ]: 53205 : if (node->mt_partition_tuple_routing)
5227 : : {
2710 5228 : 2010 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5229 : :
2486 alvherre@alvh.no-ip. 5230 [ + + ]: 2010 : if (node->mt_root_tuple_slot)
5231 : 325 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5232 : : }
5233 : :
5234 : : /*
5235 : : * Terminate EPQ execution if active
5236 : : */
5794 tgl@sss.pgh.pa.us 5237 : 53205 : EvalPlanQualEnd(&node->mt_epqstate);
5238 : :
5239 : : /*
5240 : : * shut down subplan
5241 : : */
1620 5242 : 53205 : ExecEndNode(outerPlanState(node));
5810 5243 : 53205 : }
5244 : :
5245 : : void
5535 tgl@sss.pgh.pa.us 5246 :UBC 0 : ExecReScanModifyTable(ModifyTableState *node)
5247 : : {
5248 : : /*
5249 : : * Currently, we don't need to support rescan on ModifyTable nodes. The
5250 : : * semantics of that would be a bit debatable anyway.
5251 : : */
5810 5252 [ # # ]: 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5253 : : }
|