Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeModifyTable.c
4 : : * routines to handle ModifyTable nodes.
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeModifyTable.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /* INTERFACE ROUTINES
16 : : * ExecInitModifyTable - initialize the ModifyTable node
17 : : * ExecModifyTable - retrieve the next tuple from the node
18 : : * ExecEndModifyTable - shut down the ModifyTable node
19 : : * ExecReScanModifyTable - rescan the ModifyTable node
20 : : *
21 : : * NOTES
22 : : * The ModifyTable node receives input from its outerPlan, which is
23 : : * the data to insert for INSERT cases, the changed columns' new
24 : : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : : * row-locating info for DELETE cases.
26 : : *
27 : : * The relation to modify can be an ordinary table, a foreign table, or a
28 : : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : : * targeted a view not in one of those two categories, earlier processing
31 : : * already pointed the ModifyTable result relation to an underlying
32 : : * relation of that other view. This node does process
33 : : * ri_WithCheckOptions, which may have expressions from those other,
34 : : * automatically updatable views.
35 : : *
36 : : * MERGE runs a join between the source relation and the target table.
37 : : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : : * is an outer join that might output tuples without a matching target
39 : : * tuple. In this case, any unmatched target tuples will have NULL
40 : : * row-locating info, and only INSERT can be run. But for matched target
41 : : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : : * SOURCE, all tuples produced by the join will include a matching target
44 : : * tuple, so all tuples contain row-locating info.
45 : : *
46 : : * If the query specifies RETURNING, then the ModifyTable returns a
47 : : * RETURNING tuple after completing each row insert, update, or delete.
48 : : * It must be called again to continue the operation. Without RETURNING,
49 : : * we just loop within the node until all the work is done, then
50 : : * return NULL. This avoids useless call/return overhead.
51 : : */
52 : :
53 : : #include "postgres.h"
54 : :
55 : : #include "access/htup_details.h"
56 : : #include "access/tableam.h"
57 : : #include "access/xact.h"
58 : : #include "commands/trigger.h"
59 : : #include "executor/execPartition.h"
60 : : #include "executor/executor.h"
61 : : #include "executor/nodeModifyTable.h"
62 : : #include "foreign/fdwapi.h"
63 : : #include "miscadmin.h"
64 : : #include "nodes/nodeFuncs.h"
65 : : #include "optimizer/optimizer.h"
66 : : #include "rewrite/rewriteHandler.h"
67 : : #include "rewrite/rewriteManip.h"
68 : : #include "storage/lmgr.h"
69 : : #include "utils/builtins.h"
70 : : #include "utils/datum.h"
71 : : #include "utils/injection_point.h"
72 : : #include "utils/rel.h"
73 : : #include "utils/snapmgr.h"
74 : :
75 : :
76 : : typedef struct MTTargetRelLookup
77 : : {
78 : : Oid relationOid; /* hash key, must be first */
79 : : int relationIndex; /* rel's index in resultRelInfo[] array */
80 : : } MTTargetRelLookup;
81 : :
82 : : /*
83 : : * Context struct for a ModifyTable operation, containing basic execution
84 : : * state and some output variables populated by ExecUpdateAct() and
85 : : * ExecDeleteAct() to report the result of their actions to callers.
86 : : */
87 : : typedef struct ModifyTableContext
88 : : {
89 : : /* Operation state */
90 : : ModifyTableState *mtstate;
91 : : EPQState *epqstate;
92 : : EState *estate;
93 : :
94 : : /*
95 : : * Slot containing tuple obtained from ModifyTable's subplan. Used to
96 : : * access "junk" columns that are not going to be stored.
97 : : */
98 : : TupleTableSlot *planSlot;
99 : :
100 : : /*
101 : : * Information about the changes that were made concurrently to a tuple
102 : : * being updated or deleted
103 : : */
104 : : TM_FailureData tmfd;
105 : :
106 : : /*
107 : : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
108 : : * clause that refers to OLD columns (converted to the root's tuple
109 : : * descriptor).
110 : : */
111 : : TupleTableSlot *cpDeletedSlot;
112 : :
113 : : /*
114 : : * The tuple projected by the INSERT's RETURNING clause, when doing a
115 : : * cross-partition UPDATE
116 : : */
117 : : TupleTableSlot *cpUpdateReturningSlot;
118 : : } ModifyTableContext;
119 : :
120 : : /*
121 : : * Context struct containing output data specific to UPDATE operations.
122 : : */
123 : : typedef struct UpdateContext
124 : : {
125 : : bool crossPartUpdate; /* was it a cross-partition update? */
126 : : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
127 : :
128 : : /*
129 : : * Lock mode to acquire on the latest tuple version before performing
130 : : * EvalPlanQual on it
131 : : */
132 : : LockTupleMode lockmode;
133 : : } UpdateContext;
134 : :
135 : :
136 : : static void ExecBatchInsert(ModifyTableState *mtstate,
137 : : ResultRelInfo *resultRelInfo,
138 : : TupleTableSlot **slots,
139 : : TupleTableSlot **planSlots,
140 : : int numSlots,
141 : : EState *estate,
142 : : bool canSetTag);
143 : : static void ExecPendingInserts(EState *estate);
144 : : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
145 : : ResultRelInfo *sourcePartInfo,
146 : : ResultRelInfo *destPartInfo,
147 : : ItemPointer tupleid,
148 : : TupleTableSlot *oldslot,
149 : : TupleTableSlot *newslot);
150 : : static bool ExecOnConflictLockRow(ModifyTableContext *context,
151 : : TupleTableSlot *existing,
152 : : ItemPointer conflictTid,
153 : : Relation relation,
154 : : LockTupleMode lockmode,
155 : : bool isUpdate);
156 : : static bool ExecOnConflictUpdate(ModifyTableContext *context,
157 : : ResultRelInfo *resultRelInfo,
158 : : ItemPointer conflictTid,
159 : : TupleTableSlot *excludedSlot,
160 : : bool canSetTag,
161 : : TupleTableSlot **returning);
162 : : static bool ExecOnConflictSelect(ModifyTableContext *context,
163 : : ResultRelInfo *resultRelInfo,
164 : : ItemPointer conflictTid,
165 : : TupleTableSlot *excludedSlot,
166 : : bool canSetTag,
167 : : TupleTableSlot **returning);
168 : : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
169 : : EState *estate,
170 : : PartitionTupleRouting *proute,
171 : : ResultRelInfo *targetRelInfo,
172 : : TupleTableSlot *slot,
173 : : ResultRelInfo **partRelInfo);
174 : :
175 : : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
176 : : ResultRelInfo *resultRelInfo,
177 : : ItemPointer tupleid,
178 : : HeapTuple oldtuple,
179 : : bool canSetTag);
180 : : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
181 : : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
182 : : ResultRelInfo *resultRelInfo,
183 : : ItemPointer tupleid,
184 : : HeapTuple oldtuple,
185 : : bool canSetTag,
186 : : bool *matched);
187 : : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
188 : : ResultRelInfo *resultRelInfo,
189 : : bool canSetTag);
190 : :
191 : :
192 : : /*
193 : : * Verify that the tuples to be produced by INSERT match the
194 : : * target relation's rowtype
195 : : *
196 : : * We do this to guard against stale plans. If plan invalidation is
197 : : * functioning properly then we should never get a failure here, but better
198 : : * safe than sorry. Note that this is called after we have obtained lock
199 : : * on the target rel, so the rowtype can't change underneath us.
200 : : *
201 : : * The plan output is represented by its targetlist, because that makes
202 : : * handling the dropped-column case easier.
203 : : *
204 : : * We used to use this for UPDATE as well, but now the equivalent checks
205 : : * are done in ExecBuildUpdateProjection.
206 : : */
207 : : static void
6000 tgl@sss.pgh.pa.us 208 :CBC 45402 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
209 : : {
210 : 45402 : TupleDesc resultDesc = RelationGetDescr(resultRel);
211 : 45402 : int attno = 0;
212 : : ListCell *lc;
213 : :
214 [ + + + + : 139622 : foreach(lc, targetList)
+ + ]
215 : : {
216 : 94220 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
217 : : Form_pg_attribute attr;
218 : :
1810 219 [ - + ]: 94220 : Assert(!tle->resjunk); /* caller removed junk items already */
220 : :
6000 221 [ - + ]: 94220 : if (attno >= resultDesc->natts)
6000 tgl@sss.pgh.pa.us 222 [ # # ]:UBC 0 : ereport(ERROR,
223 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
224 : : errmsg("table row type and query-specified row type do not match"),
225 : : errdetail("Query has too many columns.")));
3129 andres@anarazel.de 226 :CBC 94220 : attr = TupleDescAttr(resultDesc, attno);
227 : 94220 : attno++;
228 : :
229 : : /*
230 : : * Special cases here should match planner's expand_insert_targetlist.
231 : : */
334 tgl@sss.pgh.pa.us 232 [ + + ]: 94220 : if (attr->attisdropped)
233 : : {
234 : : /*
235 : : * For a dropped column, we can't check atttypid (it's likely 0).
236 : : * In any case the planner has most likely inserted an INT4 null.
237 : : * What we insist on is just *some* NULL constant.
238 : : */
239 [ + - ]: 344 : if (!IsA(tle->expr, Const) ||
240 [ - + ]: 344 : !((Const *) tle->expr)->constisnull)
6000 tgl@sss.pgh.pa.us 241 [ # # ]:UBC 0 : ereport(ERROR,
242 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
243 : : errmsg("table row type and query-specified row type do not match"),
244 : : errdetail("Query provides a value for a dropped column at ordinal position %d.",
245 : : attno)));
246 : : }
334 tgl@sss.pgh.pa.us 247 [ + + ]:CBC 93876 : else if (attr->attgenerated)
248 : : {
249 : : /*
250 : : * For a generated column, the planner will have inserted a null
251 : : * of the column's base type (to avoid possibly failing on domain
252 : : * not-null constraints). It doesn't seem worth insisting on that
253 : : * exact type though, since a null value is type-independent. As
254 : : * above, just insist on *some* NULL constant.
255 : : */
6000 256 [ + - ]: 613 : if (!IsA(tle->expr, Const) ||
257 [ - + ]: 613 : !((Const *) tle->expr)->constisnull)
6000 tgl@sss.pgh.pa.us 258 [ # # ]:UBC 0 : ereport(ERROR,
259 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
260 : : errmsg("table row type and query-specified row type do not match"),
261 : : errdetail("Query provides a value for a generated column at ordinal position %d.",
262 : : attno)));
263 : : }
264 : : else
265 : : {
266 : : /* Normal case: demand type match */
334 tgl@sss.pgh.pa.us 267 [ - + ]:CBC 93263 : if (exprType((Node *) tle->expr) != attr->atttypid)
334 tgl@sss.pgh.pa.us 268 [ # # ]:UBC 0 : ereport(ERROR,
269 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
270 : : errmsg("table row type and query-specified row type do not match"),
271 : : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
272 : : format_type_be(attr->atttypid),
273 : : attno,
274 : : format_type_be(exprType((Node *) tle->expr)))));
275 : : }
276 : : }
6000 tgl@sss.pgh.pa.us 277 [ - + ]:CBC 45402 : if (attno != resultDesc->natts)
6000 tgl@sss.pgh.pa.us 278 [ # # ]:UBC 0 : ereport(ERROR,
279 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
280 : : errmsg("table row type and query-specified row type do not match"),
281 : : errdetail("Query has too few columns.")));
6000 tgl@sss.pgh.pa.us 282 :CBC 45402 : }
283 : :
284 : : /*
285 : : * ExecProcessReturning --- evaluate a RETURNING list
286 : : *
287 : : * context: context for the ModifyTable operation
288 : : * resultRelInfo: current result rel
289 : : * isDelete: true if the operation/merge action is a DELETE
290 : : * oldSlot: slot holding old tuple deleted or updated
291 : : * newSlot: slot holding new tuple inserted or updated
292 : : * planSlot: slot holding tuple returned by top subplan node
293 : : *
294 : : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
295 : : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
296 : : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
297 : : *
298 : : * Note: For the SELECT path of INSERT ... ON CONFLICT DO SELECT, oldSlot and
299 : : * newSlot are both the existing tuple, since it's not changed.
300 : : *
301 : : * Returns a slot holding the result tuple
302 : : */
303 : : static TupleTableSlot *
423 dean.a.rasheed@gmail 304 : 4684 : ExecProcessReturning(ModifyTableContext *context,
305 : : ResultRelInfo *resultRelInfo,
306 : : bool isDelete,
307 : : TupleTableSlot *oldSlot,
308 : : TupleTableSlot *newSlot,
309 : : TupleTableSlot *planSlot)
310 : : {
311 : 4684 : EState *estate = context->estate;
3649 rhaas@postgresql.org 312 : 4684 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
6000 tgl@sss.pgh.pa.us 313 : 4684 : ExprContext *econtext = projectReturning->pi_exprContext;
314 : :
315 : : /* Make tuple and any needed join variables available to ExecProject */
31 dean.a.rasheed@gmail 316 [ + + ]:GNC 4684 : if (isDelete)
317 : : {
318 : : /* return old tuple by default */
319 [ + + ]: 721 : if (oldSlot)
320 : 602 : econtext->ecxt_scantuple = oldSlot;
321 : : }
322 : : else
323 : : {
324 : : /* return new tuple by default */
325 [ + + ]: 3963 : if (newSlot)
326 : 3735 : econtext->ecxt_scantuple = newSlot;
327 : : }
6000 tgl@sss.pgh.pa.us 328 :CBC 4684 : econtext->ecxt_outertuple = planSlot;
329 : :
330 : : /* Make old/new tuples available to ExecProject, if required */
423 dean.a.rasheed@gmail 331 [ + + ]: 4684 : if (oldSlot)
332 : 2052 : econtext->ecxt_oldtuple = oldSlot;
333 [ + + ]: 2632 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
334 : 95 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
335 : : else
336 : 2537 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
337 : :
338 [ + + ]: 4684 : if (newSlot)
339 : 3735 : econtext->ecxt_newtuple = newSlot;
340 [ + + ]: 949 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
341 : 66 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
342 : : else
343 : 883 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
344 : :
345 : : /*
346 : : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
347 : : * information is required to evaluate ReturningExpr nodes and also in
348 : : * ExecEvalSysVar() and ExecEvalWholeRowVar().
349 : : */
350 [ + + ]: 4684 : if (oldSlot == NULL)
351 : 2632 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
352 : : else
353 : 2052 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
354 : :
355 [ + + ]: 4684 : if (newSlot == NULL)
356 : 949 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
357 : : else
358 : 3735 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
359 : :
360 : : /* Compute the RETURNING expressions */
3342 andres@anarazel.de 361 : 4684 : return ExecProject(projectReturning);
362 : : }
363 : :
364 : : /*
365 : : * ExecCheckTupleVisible -- verify tuple is visible
366 : : *
367 : : * It would not be consistent with guarantees of the higher isolation levels to
368 : : * proceed with avoiding insertion (taking speculative insertion's alternative
369 : : * path) on the basis of another tuple that is not visible to MVCC snapshot.
370 : : * Check for the need to raise a serialization failure, and do so as necessary.
371 : : */
372 : : static void
2549 373 : 2781 : ExecCheckTupleVisible(EState *estate,
374 : : Relation rel,
375 : : TupleTableSlot *slot)
376 : : {
3964 377 [ + + ]: 2781 : if (!IsolationUsesXactSnapshot())
378 : 2743 : return;
379 : :
2549 380 [ + + ]: 38 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
381 : : {
382 : : Datum xminDatum;
383 : : TransactionId xmin;
384 : : bool isnull;
385 : :
386 : 26 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
387 [ - + ]: 26 : Assert(!isnull);
388 : 26 : xmin = DatumGetTransactionId(xminDatum);
389 : :
390 : : /*
391 : : * We should not raise a serialization failure if the conflict is
392 : : * against a tuple inserted by our own transaction, even if it's not
393 : : * visible to our snapshot. (This would happen, for example, if
394 : : * conflicting keys are proposed for insertion in a single command.)
395 : : */
396 [ + + ]: 26 : if (!TransactionIdIsCurrentTransactionId(xmin))
3430 tgl@sss.pgh.pa.us 397 [ + - ]: 10 : ereport(ERROR,
398 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
399 : : errmsg("could not serialize access due to concurrent update")));
400 : : }
401 : : }
402 : :
403 : : /*
404 : : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
405 : : */
406 : : static void
3964 andres@anarazel.de 407 : 112 : ExecCheckTIDVisible(EState *estate,
408 : : ResultRelInfo *relinfo,
409 : : ItemPointer tid,
410 : : TupleTableSlot *tempSlot)
411 : : {
412 : 112 : Relation rel = relinfo->ri_RelationDesc;
413 : :
414 : : /* Redundantly check isolation level */
415 [ + + ]: 112 : if (!IsolationUsesXactSnapshot())
416 : 80 : return;
417 : :
2488 418 [ - + ]: 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
3964 andres@anarazel.de 419 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
2549 andres@anarazel.de 420 :CBC 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
421 : 22 : ExecClearTuple(tempSlot);
422 : : }
423 : :
424 : : /*
425 : : * Initialize generated columns handling for a tuple
426 : : *
427 : : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
428 : : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
429 : : * This is used only for stored generated columns.
430 : : *
431 : : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
432 : : * This is used by both stored and virtual generated columns.
433 : : *
434 : : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
435 : : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
436 : : * cross-partition UPDATEs, since a partition might be the target of both
437 : : * UPDATE and INSERT actions.
438 : : */
439 : : void
401 peter@eisentraut.org 440 : 29875 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
441 : : EState *estate,
442 : : CmdType cmdtype)
443 : : {
2542 444 : 29875 : Relation rel = resultRelInfo->ri_RelationDesc;
445 : 29875 : TupleDesc tupdesc = RelationGetDescr(rel);
446 : 29875 : int natts = tupdesc->natts;
447 : : ExprState **ri_GeneratedExprs;
448 : : int ri_NumGeneratedNeeded;
449 : : Bitmapset *updatedCols;
450 : : MemoryContext oldContext;
451 : :
452 : : /* Nothing to do if no generated columns */
401 453 [ + + + + : 29875 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
+ + ]
1165 tgl@sss.pgh.pa.us 454 : 29273 : return;
455 : :
456 : : /*
457 : : * In an UPDATE, we can skip computing any generated columns that do not
458 : : * depend on any UPDATE target column. But if there is a BEFORE ROW
459 : : * UPDATE trigger, we cannot skip because the trigger might change more
460 : : * columns.
461 : : */
462 [ + + ]: 602 : if (cmdtype == CMD_UPDATE &&
463 [ + + - + ]: 135 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
464 : 113 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
465 : : else
466 : 489 : updatedCols = NULL;
467 : :
468 : : /*
469 : : * Make sure these data structures are built in the per-query memory
470 : : * context so they'll survive throughout the query.
471 : : */
472 : 602 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
473 : :
1105 474 : 602 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
475 : 602 : ri_NumGeneratedNeeded = 0;
476 : :
1165 477 [ + + ]: 2453 : for (int i = 0; i < natts; i++)
478 : : {
401 peter@eisentraut.org 479 : 1854 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
480 : :
481 [ + + ]: 1854 : if (attgenerated)
482 : : {
483 : : Expr *expr;
484 : :
485 : : /* Fetch the GENERATED AS expression tree */
1165 tgl@sss.pgh.pa.us 486 : 644 : expr = (Expr *) build_column_default(rel, i + 1);
487 [ - + ]: 644 : if (expr == NULL)
1165 tgl@sss.pgh.pa.us 488 [ # # ]:UBC 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
489 : : i + 1, RelationGetRelationName(rel));
490 : :
491 : : /*
492 : : * If it's an update with a known set of update target columns,
493 : : * see if we can skip the computation.
494 : : */
1165 tgl@sss.pgh.pa.us 495 [ + + ]:CBC 644 : if (updatedCols)
496 : : {
497 : 120 : Bitmapset *attrs_used = NULL;
498 : :
499 : 120 : pull_varattnos((Node *) expr, 1, &attrs_used);
500 : :
501 [ + + ]: 120 : if (!bms_overlap(updatedCols, attrs_used))
502 : 12 : continue; /* need not update this column */
503 : : }
504 : :
505 : : /* No luck, so prepare the expression for execution */
401 peter@eisentraut.org 506 [ + + ]: 632 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
507 : : {
508 : 590 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
509 : 587 : ri_NumGeneratedNeeded++;
510 : : }
511 : :
512 : : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
1105 tgl@sss.pgh.pa.us 513 [ + + ]: 629 : if (cmdtype == CMD_UPDATE)
514 : 134 : resultRelInfo->ri_extraUpdatedCols =
515 : 134 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
516 : : i + 1 - FirstLowInvalidHeapAttributeNumber);
517 : : }
518 : : }
519 : :
401 peter@eisentraut.org 520 [ + + ]: 599 : if (ri_NumGeneratedNeeded == 0)
521 : : {
522 : : /* didn't need it after all */
523 : 21 : pfree(ri_GeneratedExprs);
524 : 21 : ri_GeneratedExprs = NULL;
525 : : }
526 : :
527 : : /* Save in appropriate set of fields */
1105 tgl@sss.pgh.pa.us 528 [ + + ]: 599 : if (cmdtype == CMD_UPDATE)
529 : : {
530 : : /* Don't call twice */
531 [ - + ]: 135 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
532 : :
533 : 135 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
534 : 135 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
535 : :
401 peter@eisentraut.org 536 : 135 : resultRelInfo->ri_extraUpdatedCols_valid = true;
537 : : }
538 : : else
539 : : {
540 : : /* Don't call twice */
1105 tgl@sss.pgh.pa.us 541 [ - + ]: 464 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
542 : :
543 : 464 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
544 : 464 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
545 : : }
546 : :
1165 547 : 599 : MemoryContextSwitchTo(oldContext);
548 : : }
549 : :
550 : : /*
551 : : * Compute stored generated columns for a tuple
552 : : */
553 : : void
554 : 824 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
555 : : EState *estate, TupleTableSlot *slot,
556 : : CmdType cmdtype)
557 : : {
558 : 824 : Relation rel = resultRelInfo->ri_RelationDesc;
559 : 824 : TupleDesc tupdesc = RelationGetDescr(rel);
560 : 824 : int natts = tupdesc->natts;
561 [ + + ]: 824 : ExprContext *econtext = GetPerTupleExprContext(estate);
562 : : ExprState **ri_GeneratedExprs;
563 : : MemoryContext oldContext;
564 : : Datum *values;
565 : : bool *nulls;
566 : :
567 : : /* We should not be called unless this is true */
568 [ + - - + ]: 824 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
569 : :
570 : : /*
571 : : * Initialize the expressions if we didn't already, and check whether we
572 : : * can exit early because nothing needs to be computed.
573 : : */
1105 574 [ + + ]: 824 : if (cmdtype == CMD_UPDATE)
575 : : {
576 [ + + ]: 146 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
401 peter@eisentraut.org 577 : 110 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
1105 tgl@sss.pgh.pa.us 578 [ + + ]: 146 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
579 : 9 : return;
580 : 137 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
581 : : }
582 : : else
583 : : {
584 [ + + ]: 678 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
401 peter@eisentraut.org 585 : 467 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
586 : : /* Early exit is impossible given the prior Assert */
1105 tgl@sss.pgh.pa.us 587 [ - + ]: 675 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
588 : 675 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
589 : : }
590 : :
2542 peter@eisentraut.org 591 [ + - ]: 812 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
592 : :
95 michael@paquier.xyz 593 :GNC 812 : values = palloc_array(Datum, natts);
594 : 812 : nulls = palloc_array(bool, natts);
595 : :
2496 peter@eisentraut.org 596 :CBC 812 : slot_getallattrs(slot);
597 : 812 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
598 : :
2542 599 [ + + ]: 3307 : for (int i = 0; i < natts; i++)
600 : : {
450 drowley@postgresql.o 601 : 2507 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
602 : :
1105 tgl@sss.pgh.pa.us 603 [ + + ]: 2507 : if (ri_GeneratedExprs[i])
604 : : {
605 : : Datum val;
606 : : bool isnull;
607 : :
450 drowley@postgresql.o 608 [ - + ]: 823 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
609 : :
2542 peter@eisentraut.org 610 : 823 : econtext->ecxt_scantuple = slot;
611 : :
1105 tgl@sss.pgh.pa.us 612 : 823 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
613 : :
614 : : /*
615 : : * We must make a copy of val as we have no guarantees about where
616 : : * memory for a pass-by-reference Datum is located.
617 : : */
2157 drowley@postgresql.o 618 [ + + ]: 811 : if (!isnull)
619 : 787 : val = datumCopy(val, attr->attbyval, attr->attlen);
620 : :
2542 peter@eisentraut.org 621 : 811 : values[i] = val;
622 : 811 : nulls[i] = isnull;
623 : : }
624 : : else
625 : : {
2496 626 [ + + ]: 1684 : if (!nulls[i])
627 : 1610 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
628 : : }
629 : : }
630 : :
631 : 800 : ExecClearTuple(slot);
632 : 800 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
633 : 800 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
634 : 800 : ExecStoreVirtualTuple(slot);
635 : 800 : ExecMaterializeSlot(slot);
636 : :
2542 637 : 800 : MemoryContextSwitchTo(oldContext);
638 : : }
639 : :
640 : : /*
641 : : * ExecInitInsertProjection
642 : : * Do one-time initialization of projection data for INSERT tuples.
643 : : *
644 : : * INSERT queries may need a projection to filter out junk attrs in the tlist.
645 : : *
646 : : * This is also a convenient place to verify that the
647 : : * output of an INSERT matches the target table.
648 : : */
649 : : static void
1804 tgl@sss.pgh.pa.us 650 : 44856 : ExecInitInsertProjection(ModifyTableState *mtstate,
651 : : ResultRelInfo *resultRelInfo)
652 : : {
653 : 44856 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
654 : 44856 : Plan *subplan = outerPlan(node);
655 : 44856 : EState *estate = mtstate->ps.state;
656 : 44856 : List *insertTargetList = NIL;
657 : 44856 : bool need_projection = false;
658 : : ListCell *l;
659 : :
660 : : /* Extract non-junk columns of the subplan's result tlist. */
661 [ + + + + : 137663 : foreach(l, subplan->targetlist)
+ + ]
662 : : {
663 : 92807 : TargetEntry *tle = (TargetEntry *) lfirst(l);
664 : :
665 [ + - ]: 92807 : if (!tle->resjunk)
666 : 92807 : insertTargetList = lappend(insertTargetList, tle);
667 : : else
1804 tgl@sss.pgh.pa.us 668 :UBC 0 : need_projection = true;
669 : : }
670 : :
671 : : /*
672 : : * The junk-free list must produce a tuple suitable for the result
673 : : * relation.
674 : : */
1804 tgl@sss.pgh.pa.us 675 :CBC 44856 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
676 : :
677 : : /* We'll need a slot matching the table's format. */
678 : 44856 : resultRelInfo->ri_newTupleSlot =
679 : 44856 : table_slot_create(resultRelInfo->ri_RelationDesc,
680 : : &estate->es_tupleTable);
681 : :
682 : : /* Build ProjectionInfo if needed (it probably isn't). */
683 [ - + ]: 44856 : if (need_projection)
684 : : {
1804 tgl@sss.pgh.pa.us 685 :UBC 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
686 : :
687 : : /* need an expression context to do the projection */
688 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
689 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
690 : :
691 : 0 : resultRelInfo->ri_projectNew =
692 : 0 : ExecBuildProjectionInfo(insertTargetList,
693 : : mtstate->ps.ps_ExprContext,
694 : : resultRelInfo->ri_newTupleSlot,
695 : : &mtstate->ps,
696 : : relDesc);
697 : : }
698 : :
1804 tgl@sss.pgh.pa.us 699 :CBC 44856 : resultRelInfo->ri_projectNewInfoValid = true;
700 : 44856 : }
701 : :
702 : : /*
703 : : * ExecInitUpdateProjection
704 : : * Do one-time initialization of projection data for UPDATE tuples.
705 : : *
706 : : * UPDATE always needs a projection, because (1) there's always some junk
707 : : * attrs, and (2) we may need to merge values of not-updated columns from
708 : : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
709 : : * the subplan contains only new values for the changed columns, plus row
710 : : * identity info in the junk attrs.
711 : : *
712 : : * This is "one-time" for any given result rel, but we might touch more than
713 : : * one result rel in the course of an inherited UPDATE, and each one needs
714 : : * its own projection due to possible column order variation.
715 : : *
716 : : * This is also a convenient place to verify that the output of an UPDATE
717 : : * matches the target table (ExecBuildUpdateProjection does that).
718 : : */
719 : : static void
720 : 7055 : ExecInitUpdateProjection(ModifyTableState *mtstate,
721 : : ResultRelInfo *resultRelInfo)
722 : : {
723 : 7055 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
724 : 7055 : Plan *subplan = outerPlan(node);
725 : 7055 : EState *estate = mtstate->ps.state;
726 : 7055 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
727 : : int whichrel;
728 : : List *updateColnos;
729 : :
730 : : /*
731 : : * Usually, mt_lastResultIndex matches the target rel. If it happens not
732 : : * to, we can get the index the hard way with an integer division.
733 : : */
734 : 7055 : whichrel = mtstate->mt_lastResultIndex;
735 [ - + ]: 7055 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
736 : : {
1804 tgl@sss.pgh.pa.us 737 :UBC 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
738 [ # # # # ]: 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
739 : : }
740 : :
401 amitlan@postgresql.o 741 :CBC 7055 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
742 : :
743 : : /*
744 : : * For UPDATE, we use the old tuple to fill up missing values in the tuple
745 : : * produced by the subplan to get the new tuple. We need two slots, both
746 : : * matching the table's desired format.
747 : : */
1804 tgl@sss.pgh.pa.us 748 : 7055 : resultRelInfo->ri_oldTupleSlot =
749 : 7055 : table_slot_create(resultRelInfo->ri_RelationDesc,
750 : : &estate->es_tupleTable);
751 : 7055 : resultRelInfo->ri_newTupleSlot =
752 : 7055 : table_slot_create(resultRelInfo->ri_RelationDesc,
753 : : &estate->es_tupleTable);
754 : :
755 : : /* need an expression context to do the projection */
756 [ + + ]: 7055 : if (mtstate->ps.ps_ExprContext == NULL)
757 : 6346 : ExecAssignExprContext(estate, &mtstate->ps);
758 : :
759 : 7055 : resultRelInfo->ri_projectNew =
760 : 7055 : ExecBuildUpdateProjection(subplan->targetlist,
761 : : false, /* subplan did the evaluation */
762 : : updateColnos,
763 : : relDesc,
764 : : mtstate->ps.ps_ExprContext,
765 : : resultRelInfo->ri_newTupleSlot,
766 : : &mtstate->ps);
767 : :
768 : 7055 : resultRelInfo->ri_projectNewInfoValid = true;
769 : 7055 : }
770 : :
771 : : /*
772 : : * ExecGetInsertNewTuple
773 : : * This prepares a "new" tuple ready to be inserted into given result
774 : : * relation, by removing any junk columns of the plan's output tuple
775 : : * and (if necessary) coercing the tuple to the right tuple format.
776 : : */
777 : : static TupleTableSlot *
1810 778 : 6902758 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
779 : : TupleTableSlot *planSlot)
780 : : {
781 : 6902758 : ProjectionInfo *newProj = relinfo->ri_projectNew;
782 : : ExprContext *econtext;
783 : :
784 : : /*
785 : : * If there's no projection to be done, just make sure the slot is of the
786 : : * right type for the target rel. If the planSlot is the right type we
787 : : * can use it as-is, else copy the data into ri_newTupleSlot.
788 : : */
789 [ + - ]: 6902758 : if (newProj == NULL)
790 : : {
791 [ + + ]: 6902758 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
792 : : {
793 : 6490448 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
794 : 6490448 : return relinfo->ri_newTupleSlot;
795 : : }
796 : : else
797 : 412310 : return planSlot;
798 : : }
799 : :
800 : : /*
801 : : * Else project; since the projection output slot is ri_newTupleSlot, this
802 : : * will also fix any slot-type problem.
803 : : *
804 : : * Note: currently, this is dead code, because INSERT cases don't receive
805 : : * any junk columns so there's never a projection to be done.
806 : : */
1810 tgl@sss.pgh.pa.us 807 :UBC 0 : econtext = newProj->pi_exprContext;
808 : 0 : econtext->ecxt_outertuple = planSlot;
809 : 0 : return ExecProject(newProj);
810 : : }
811 : :
812 : : /*
813 : : * ExecGetUpdateNewTuple
814 : : * This prepares a "new" tuple by combining an UPDATE subplan's output
815 : : * tuple (which contains values of changed columns) with unchanged
816 : : * columns taken from the old tuple.
817 : : *
818 : : * The subplan tuple might also contain junk columns, which are ignored.
819 : : * Note that the projection also ensures we have a slot of the right type.
820 : : */
821 : : TupleTableSlot *
1810 tgl@sss.pgh.pa.us 822 :CBC 160168 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
823 : : TupleTableSlot *planSlot,
824 : : TupleTableSlot *oldSlot)
825 : : {
1098 dean.a.rasheed@gmail 826 : 160168 : ProjectionInfo *newProj = relinfo->ri_projectNew;
827 : : ExprContext *econtext;
828 : :
829 : : /* Use a few extra Asserts to protect against outside callers */
1804 tgl@sss.pgh.pa.us 830 [ - + ]: 160168 : Assert(relinfo->ri_projectNewInfoValid);
1810 831 [ + - - + ]: 160168 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
832 [ + - - + ]: 160168 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
833 : :
834 : 160168 : econtext = newProj->pi_exprContext;
835 : 160168 : econtext->ecxt_outertuple = planSlot;
836 : 160168 : econtext->ecxt_scantuple = oldSlot;
837 : 160168 : return ExecProject(newProj);
838 : : }
839 : :
840 : : /* ----------------------------------------------------------------
841 : : * ExecInsert
842 : : *
843 : : * For INSERT, we have to insert the tuple into the target relation
844 : : * (or partition thereof) and insert appropriate tuples into the index
845 : : * relations.
846 : : *
847 : : * slot contains the new tuple value to be stored.
848 : : *
849 : : * Returns RETURNING result if any, otherwise NULL.
850 : : * *inserted_tuple is the tuple that's effectively inserted;
851 : : * *insert_destrel is the relation where it was inserted.
852 : : * These are only set on success.
853 : : *
854 : : * This may change the currently active tuple conversion map in
855 : : * mtstate->mt_transition_capture, so the callers must take care to
856 : : * save the previous value to avoid losing track of it.
857 : : * ----------------------------------------------------------------
858 : : */
859 : : static TupleTableSlot *
1459 alvherre@alvh.no-ip. 860 : 6904171 : ExecInsert(ModifyTableContext *context,
861 : : ResultRelInfo *resultRelInfo,
862 : : TupleTableSlot *slot,
863 : : bool canSetTag,
864 : : TupleTableSlot **inserted_tuple,
865 : : ResultRelInfo **insert_destrel)
866 : : {
867 : 6904171 : ModifyTableState *mtstate = context->mtstate;
868 : 6904171 : EState *estate = context->estate;
869 : : Relation resultRelationDesc;
6000 tgl@sss.pgh.pa.us 870 : 6904171 : List *recheckIndexes = NIL;
1459 alvherre@alvh.no-ip. 871 : 6904171 : TupleTableSlot *planSlot = context->planSlot;
3261 rhaas@postgresql.org 872 : 6904171 : TupleTableSlot *result = NULL;
873 : : TransitionCaptureState *ar_insert_trig_tcs;
2918 alvherre@alvh.no-ip. 874 : 6904171 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
875 : 6904171 : OnConflictAction onconflict = node->onConflictAction;
1978 heikki.linnakangas@i 876 : 6904171 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
877 : : MemoryContext oldContext;
878 : :
879 : : /*
880 : : * If the input result relation is a partitioned table, find the leaf
881 : : * partition to insert the tuple into.
882 : : */
883 [ + + ]: 6904171 : if (proute)
884 : : {
885 : : ResultRelInfo *partRelInfo;
886 : :
887 : 387969 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
888 : : resultRelInfo, slot,
889 : : &partRelInfo);
890 : 387858 : resultRelInfo = partRelInfo;
891 : : }
892 : :
893 : 6904060 : ExecMaterializeSlot(slot);
894 : :
6000 tgl@sss.pgh.pa.us 895 : 6904060 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
896 : :
897 : : /*
898 : : * Open the table's indexes, if we have not done so already, so that we
899 : : * can add new index entries for the inserted tuple.
900 : : */
1804 901 [ + + ]: 6904060 : if (resultRelationDesc->rd_rel->relhasindex &&
902 [ + + ]: 1924924 : resultRelInfo->ri_IndexRelationDescs == NULL)
903 : 17913 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
904 : :
905 : : /*
906 : : * BEFORE ROW INSERT Triggers.
907 : : *
908 : : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
909 : : * INSERT ... ON CONFLICT statement. We cannot check for constraint
910 : : * violations before firing these triggers, because they can change the
911 : : * values to insert. Also, they can run arbitrary user-defined code with
912 : : * side-effects that we can't cancel by just not inserting the tuple.
913 : : */
6000 914 [ + + ]: 6904060 : if (resultRelInfo->ri_TrigDesc &&
5635 915 [ + + ]: 337799 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
916 : : {
917 : : /* Flush any pending inserts, so rows are visible to the triggers */
1206 efujita@postgresql.o 918 [ + + ]: 1085 : if (estate->es_insert_pending_result_relations != NIL)
919 : 3 : ExecPendingInserts(estate);
920 : :
2574 andres@anarazel.de 921 [ + + ]: 1085 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
922 : 100 : return NULL; /* "do nothing" */
923 : : }
924 : :
925 : : /* INSTEAD OF ROW INSERT Triggers */
5635 tgl@sss.pgh.pa.us 926 [ + + ]: 6903911 : if (resultRelInfo->ri_TrigDesc &&
927 [ + + ]: 337650 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
928 : : {
2574 andres@anarazel.de 929 [ + + ]: 84 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
930 : 3 : return NULL; /* "do nothing" */
931 : : }
4753 tgl@sss.pgh.pa.us 932 [ + + ]: 6903827 : else if (resultRelInfo->ri_FdwRoutine)
933 : : {
934 : : /*
935 : : * GENERATED expressions might reference the tableoid column, so
936 : : * (re-)initialize tts_tableOid before evaluating them.
937 : : */
1759 938 : 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
939 : :
940 : : /*
941 : : * Compute stored generated columns
942 : : */
2542 peter@eisentraut.org 943 [ + + ]: 1010 : if (resultRelationDesc->rd_att->constr &&
944 [ + + ]: 179 : resultRelationDesc->rd_att->constr->has_generated_stored)
1978 heikki.linnakangas@i 945 : 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
946 : : CMD_INSERT);
947 : :
948 : : /*
949 : : * If the FDW supports batching, and batching is requested, accumulate
950 : : * rows and insert them in batches. Otherwise use the per-row inserts.
951 : : */
1880 tomas.vondra@postgre 952 [ + + ]: 1010 : if (resultRelInfo->ri_BatchSize > 1)
953 : : {
1206 efujita@postgresql.o 954 : 145 : bool flushed = false;
955 : :
956 : : /*
957 : : * When we've reached the desired batch size, perform the
958 : : * insertion.
959 : : */
1880 tomas.vondra@postgre 960 [ + + ]: 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
961 : : {
962 : 10 : ExecBatchInsert(mtstate, resultRelInfo,
963 : : resultRelInfo->ri_Slots,
964 : : resultRelInfo->ri_PlanSlots,
965 : : resultRelInfo->ri_NumSlots,
966 : : estate, canSetTag);
1206 efujita@postgresql.o 967 : 10 : flushed = true;
968 : : }
969 : :
1880 tomas.vondra@postgre 970 : 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
971 : :
972 [ + + ]: 145 : if (resultRelInfo->ri_Slots == NULL)
973 : : {
95 michael@paquier.xyz 974 :GNC 15 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
975 : 15 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
976 : : }
977 : :
978 : : /*
979 : : * Initialize the batch slots. We don't know how many slots will
980 : : * be needed, so we initialize them as the batch grows, and we
981 : : * keep them across batches. To mitigate an inefficiency in how
982 : : * resource owner handles objects with many references (as with
983 : : * many slots all referencing the same tuple descriptor) we copy
984 : : * the appropriate tuple descriptor for each slot.
985 : : */
1738 tomas.vondra@postgre 986 [ + + ]:CBC 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
987 : : {
1721 andrew@dunslane.net 988 : 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
989 : : TupleDesc plan_tdesc =
1031 tgl@sss.pgh.pa.us 990 : 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
991 : :
1738 tomas.vondra@postgre 992 : 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
993 : 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
994 : :
995 : 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
1676 996 : 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
997 : :
998 : : /* remember how many batch slots we initialized */
1738 999 : 72 : resultRelInfo->ri_NumSlotsInitialized++;
1000 : : }
1001 : :
1733 1002 : 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
1003 : : slot);
1004 : :
1005 : 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
1006 : : planSlot);
1007 : :
1008 : : /*
1009 : : * If these are the first tuples stored in the buffers, add the
1010 : : * target rel and the mtstate to the
1011 : : * es_insert_pending_result_relations and
1012 : : * es_insert_pending_modifytables lists respectively, except in
1013 : : * the case where flushing was done above, in which case they
1014 : : * would already have been added to the lists, so no need to do
1015 : : * this.
1016 : : */
1206 efujita@postgresql.o 1017 [ + + + + ]: 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1018 : : {
1019 [ - + ]: 19 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1020 : : resultRelInfo));
1021 : 19 : estate->es_insert_pending_result_relations =
1022 : 19 : lappend(estate->es_insert_pending_result_relations,
1023 : : resultRelInfo);
1193 1024 : 19 : estate->es_insert_pending_modifytables =
1025 : 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1026 : : }
1206 1027 [ - + ]: 145 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1028 : : resultRelInfo));
1029 : :
1880 tomas.vondra@postgre 1030 : 145 : resultRelInfo->ri_NumSlots++;
1031 : :
1032 : 145 : MemoryContextSwitchTo(oldContext);
1033 : :
1034 : 145 : return NULL;
1035 : : }
1036 : :
1037 : : /*
1038 : : * insert into foreign table: let the FDW do it
1039 : : */
4753 tgl@sss.pgh.pa.us 1040 : 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1041 : : resultRelInfo,
1042 : : slot,
1043 : : planSlot);
1044 : :
1045 [ + + ]: 862 : if (slot == NULL) /* "do nothing" */
1046 : 2 : return NULL;
1047 : :
1048 : : /*
1049 : : * AFTER ROW Triggers or RETURNING expressions might reference the
1050 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1051 : : * them. (This covers the case where the FDW replaced the slot.)
1052 : : */
2574 andres@anarazel.de 1053 : 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1054 : : }
1055 : : else
1056 : : {
1057 : : WCOKind wco_kind;
1058 : :
1059 : : /*
1060 : : * Constraints and GENERATED expressions might reference the tableoid
1061 : : * column, so (re-)initialize tts_tableOid before evaluating them.
1062 : : */
1063 : 6902817 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1064 : :
1065 : : /*
1066 : : * Compute stored generated columns
1067 : : */
2542 peter@eisentraut.org 1068 [ + + ]: 6902817 : if (resultRelationDesc->rd_att->constr &&
1069 [ + + ]: 2291129 : resultRelationDesc->rd_att->constr->has_generated_stored)
1978 heikki.linnakangas@i 1070 : 653 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1071 : : CMD_INSERT);
1072 : :
1073 : : /*
1074 : : * Check any RLS WITH CHECK policies.
1075 : : *
1076 : : * Normally we should check INSERT policies. But if the insert is the
1077 : : * result of a partition key update that moved the tuple to a new
1078 : : * partition, we should instead check UPDATE policies, because we are
1079 : : * executing policies defined on the target table, and not those
1080 : : * defined on the child partitions.
1081 : : *
1082 : : * If we're running MERGE, we refer to the action that we're executing
1083 : : * to know if we're doing an INSERT or UPDATE to a partition table.
1084 : : */
1448 alvherre@alvh.no-ip. 1085 [ + + ]: 6902802 : if (mtstate->operation == CMD_UPDATE)
1086 : 400 : wco_kind = WCO_RLS_UPDATE_CHECK;
1087 [ + + ]: 6902402 : else if (mtstate->operation == CMD_MERGE)
728 dean.a.rasheed@gmail 1088 : 893 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1448 alvherre@alvh.no-ip. 1089 [ + + ]: 893 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1090 : : else
1091 : 6901509 : wco_kind = WCO_RLS_INSERT_CHECK;
1092 : :
1093 : : /*
1094 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1095 : : * we are looking for at this point.
1096 : : */
3978 sfrost@snowman.net 1097 [ + + ]: 6902802 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2977 rhaas@postgresql.org 1098 : 360 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1099 : :
1100 : : /*
1101 : : * Check the constraints of the tuple.
1102 : : */
2834 alvherre@alvh.no-ip. 1103 [ + + ]: 6902703 : if (resultRelationDesc->rd_att->constr)
1104 : 2291060 : ExecConstraints(resultRelInfo, slot, estate);
1105 : :
1106 : : /*
1107 : : * Also check the tuple against the partition constraint, if there is
1108 : : * one; except that if we got here via tuple-routing, we don't need to
1109 : : * if there's no BR trigger defined on the partition.
1110 : : */
2006 tgl@sss.pgh.pa.us 1111 [ + + ]: 6902341 : if (resultRelationDesc->rd_rel->relispartition &&
1861 heikki.linnakangas@i 1112 [ + + ]: 388991 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
2834 alvherre@alvh.no-ip. 1113 [ + + ]: 387555 : (resultRelInfo->ri_TrigDesc &&
1114 [ + + ]: 832 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1115 : 1540 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1116 : :
3964 andres@anarazel.de 1117 [ + + + - ]: 6902257 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1118 : 2118 : {
1119 : : /* Perform a speculative insertion. */
1120 : : uint32 specToken;
1121 : : ItemPointerData conflictTid;
1122 : : ItemPointerData invalidItemPtr;
1123 : : bool specConflict;
1124 : : List *arbiterIndexes;
1125 : :
572 akapila@postgresql.o 1126 : 5006 : ItemPointerSetInvalid(&invalidItemPtr);
2911 alvherre@alvh.no-ip. 1127 : 5006 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1128 : :
1129 : : /*
1130 : : * Do a non-conclusive check for conflicts first.
1131 : : *
1132 : : * We're not holding any locks yet, so this doesn't guarantee that
1133 : : * the later insert won't conflict. But it avoids leaving behind
1134 : : * a lot of canceled speculative insertions, if you run a lot of
1135 : : * INSERT ON CONFLICT statements that do conflict.
1136 : : *
1137 : : * We loop back here if we find a conflict below, either during
1138 : : * the pre-check, or when we re-check after inserting the tuple
1139 : : * speculatively. Better allow interrupts in case some bug makes
1140 : : * this an infinite loop.
1141 : : */
3964 andres@anarazel.de 1142 : 14 : vlock:
1319 tgl@sss.pgh.pa.us 1143 [ - + ]: 5020 : CHECK_FOR_INTERRUPTS();
3964 andres@anarazel.de 1144 : 5020 : specConflict = false;
1978 heikki.linnakangas@i 1145 [ + + ]: 5020 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1146 : : &conflictTid, &invalidItemPtr,
1147 : : arbiterIndexes))
1148 : : {
1149 : : /* committed conflict tuple found */
3964 andres@anarazel.de 1150 [ + + ]: 2885 : if (onconflict == ONCONFLICT_UPDATE)
1151 : : {
1152 : : /*
1153 : : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1154 : : * part. Be prepared to retry if the UPDATE fails because
1155 : : * of another concurrent UPDATE/DELETE to the conflict
1156 : : * tuple.
1157 : : */
1158 : 2626 : TupleTableSlot *returning = NULL;
1159 : :
1459 alvherre@alvh.no-ip. 1160 [ + + ]: 2626 : if (ExecOnConflictUpdate(context, resultRelInfo,
1161 : : &conflictTid, slot, canSetTag,
1162 : : &returning))
1163 : : {
2896 1164 [ - + ]: 2584 : InstrCountTuples2(&mtstate->ps, 1);
3964 andres@anarazel.de 1165 : 2584 : return returning;
1166 : : }
1167 : : else
1168 : 3 : goto vlock;
1169 : : }
31 dean.a.rasheed@gmail 1170 [ + + ]:GNC 259 : else if (onconflict == ONCONFLICT_SELECT)
1171 : : {
1172 : : /*
1173 : : * In case of ON CONFLICT DO SELECT, optionally lock the
1174 : : * conflicting tuple, fetch it and project RETURNING on
1175 : : * it. Be prepared to retry if locking fails because of a
1176 : : * concurrent UPDATE/DELETE to the conflict tuple.
1177 : : */
1178 : 147 : TupleTableSlot *returning = NULL;
1179 : :
1180 [ + - ]: 147 : if (ExecOnConflictSelect(context, resultRelInfo,
1181 : : &conflictTid, slot, canSetTag,
1182 : : &returning))
1183 : : {
1184 [ - + ]: 135 : InstrCountTuples2(&mtstate->ps, 1);
1185 : 135 : return returning;
1186 : : }
1187 : : else
31 dean.a.rasheed@gmail 1188 :UNC 0 : goto vlock;
1189 : : }
1190 : : else
1191 : : {
1192 : : /*
1193 : : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1194 : : * verify that the tuple is visible to the executor's MVCC
1195 : : * snapshot at higher isolation levels.
1196 : : *
1197 : : * Using ExecGetReturningSlot() to store the tuple for the
1198 : : * recheck isn't that pretty, but we can't trivially use
1199 : : * the input slot, because it might not be of a compatible
1200 : : * type. As there's no conflicting usage of
1201 : : * ExecGetReturningSlot() in the DO NOTHING case...
1202 : : */
3964 andres@anarazel.de 1203 [ - + ]:CBC 112 : Assert(onconflict == ONCONFLICT_NOTHING);
2549 1204 : 112 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1205 : : ExecGetReturningSlot(estate, resultRelInfo));
2896 alvherre@alvh.no-ip. 1206 [ - + ]: 102 : InstrCountTuples2(&mtstate->ps, 1);
3964 andres@anarazel.de 1207 : 102 : return NULL;
1208 : : }
1209 : : }
1210 : :
1211 : : /*
1212 : : * Before we start insertion proper, acquire our "speculative
1213 : : * insertion lock". Others can use that to wait for us to decide
1214 : : * if we're going to go ahead with the insertion, instead of
1215 : : * waiting for the whole transaction to complete.
1216 : : */
111 alvherre@kurilemu.de 1217 :GNC 2132 : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
3964 andres@anarazel.de 1218 :CBC 2132 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1219 : :
1220 : : /* insert the tuple, with the speculative token */
2488 1221 : 2132 : table_tuple_insert_speculative(resultRelationDesc, slot,
1222 : : estate->es_output_cid,
1223 : : 0,
1224 : : NULL,
1225 : : specToken);
1226 : :
1227 : : /* insert index entries for tuple */
1978 heikki.linnakangas@i 1228 : 2132 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1229 : : estate, EIIT_NO_DUPE_ERROR,
1230 : : slot, arbiterIndexes,
1231 : : &specConflict);
1232 : :
1233 : : /* adjust the tuple's state accordingly */
2488 andres@anarazel.de 1234 : 2129 : table_tuple_complete_speculative(resultRelationDesc, slot,
1235 : 2129 : specToken, !specConflict);
1236 : :
1237 : : /*
1238 : : * Wake up anyone waiting for our decision. They will re-check
1239 : : * the tuple, see that it's no longer speculative, and wait on our
1240 : : * XID as if this was a regularly inserted tuple all along. Or if
1241 : : * we killed the tuple, they will see it's dead, and proceed as if
1242 : : * the tuple never existed.
1243 : : */
3964 1244 : 2129 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1245 : :
1246 : : /*
1247 : : * If there was a conflict, start from the beginning. We'll do
1248 : : * the pre-check again, which will now find the conflicting tuple
1249 : : * (unless it aborts before we get there).
1250 : : */
1251 [ + + ]: 2129 : if (specConflict)
1252 : : {
1253 : 11 : list_free(recheckIndexes);
1254 : 11 : goto vlock;
1255 : : }
1256 : :
1257 : : /* Since there was no insertion conflict, we're done */
1258 : : }
1259 : : else
1260 : : {
1261 : : /* insert the tuple normally */
703 akorotkov@postgresql 1262 : 6897251 : table_tuple_insert(resultRelationDesc, slot,
1263 : : estate->es_output_cid,
1264 : : 0, NULL);
1265 : :
1266 : : /* insert index entries for tuple */
1267 [ + + ]: 6897239 : if (resultRelInfo->ri_NumIndices > 0)
26 alvherre@kurilemu.de 1268 :GNC 1919631 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, estate,
1269 : : 0, slot, NIL,
1270 : : NULL);
1271 : : }
1272 : : }
1273 : :
5497 tgl@sss.pgh.pa.us 1274 [ + + ]:CBC 6899996 : if (canSetTag)
1275 : 6899398 : (estate->es_processed)++;
1276 : :
1277 : : /*
1278 : : * If this insert is the result of a partition key update that moved the
1279 : : * tuple to a new partition, put this row into the transition NEW TABLE,
1280 : : * if there is one. We need to do this separately for DELETE and INSERT
1281 : : * because they happen on different tables.
1282 : : */
2977 rhaas@postgresql.org 1283 : 6899996 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1284 [ + + + + ]: 6899996 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1285 [ + + ]: 27 : && mtstate->mt_transition_capture->tcs_update_new_table)
1286 : : {
1456 alvherre@alvh.no-ip. 1287 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1288 : : NULL, NULL,
1289 : : NULL,
1290 : : NULL,
1291 : : slot,
1292 : : NULL,
1293 : 24 : mtstate->mt_transition_capture,
1294 : : false);
1295 : :
1296 : : /*
1297 : : * We've already captured the NEW TABLE row, so make sure any AR
1298 : : * INSERT trigger fired below doesn't capture it again.
1299 : : */
2977 rhaas@postgresql.org 1300 : 24 : ar_insert_trig_tcs = NULL;
1301 : : }
1302 : :
1303 : : /* AFTER ROW INSERT Triggers */
2574 andres@anarazel.de 1304 : 6899996 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1305 : : ar_insert_trig_tcs);
1306 : :
5887 tgl@sss.pgh.pa.us 1307 : 6899995 : list_free(recheckIndexes);
1308 : :
1309 : : /*
1310 : : * Check any WITH CHECK OPTION constraints from parent views. We are
1311 : : * required to do this after testing all constraints and uniqueness
1312 : : * violations per the SQL spec, so we do it after actually inserting the
1313 : : * record into the heap and all indexes.
1314 : : *
1315 : : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1316 : : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1317 : : *
1318 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1319 : : * are looking for at this point.
1320 : : */
4623 sfrost@snowman.net 1321 [ + + ]: 6899995 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3978 1322 : 224 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1323 : :
1324 : : /* Process RETURNING if present */
6000 tgl@sss.pgh.pa.us 1325 [ + + ]: 6899922 : if (resultRelInfo->ri_projectReturning)
1326 : : {
423 dean.a.rasheed@gmail 1327 : 2309 : TupleTableSlot *oldSlot = NULL;
1328 : :
1329 : : /*
1330 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1331 : : * refers to any OLD columns, ExecDelete() will have saved the tuple
1332 : : * deleted from the original partition, which we must use here to
1333 : : * compute the OLD column values. Otherwise, all OLD column values
1334 : : * will be NULL.
1335 : : */
1336 [ + + ]: 2309 : if (context->cpDeletedSlot)
1337 : : {
1338 : : TupleConversionMap *tupconv_map;
1339 : :
1340 : : /*
1341 : : * Convert the OLD tuple to the new partition's format/slot, if
1342 : : * needed. Note that ExecDelete() already converted it to the
1343 : : * root's partition's format/slot.
1344 : : */
1345 : 24 : oldSlot = context->cpDeletedSlot;
1346 : 24 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1347 [ + + ]: 24 : if (tupconv_map != NULL)
1348 : : {
1349 : 8 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1350 : : oldSlot,
1351 : : ExecGetReturningSlot(estate,
1352 : : resultRelInfo));
1353 : :
1354 : 8 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1355 : 8 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1356 : : }
1357 : : }
1358 : :
31 dean.a.rasheed@gmail 1359 :GNC 2309 : result = ExecProcessReturning(context, resultRelInfo, false,
1360 : : oldSlot, slot, planSlot);
1361 : :
1362 : : /*
1363 : : * For a cross-partition UPDATE, release the old tuple, first making
1364 : : * sure that the result slot has a local copy of any pass-by-reference
1365 : : * values.
1366 : : */
423 dean.a.rasheed@gmail 1367 [ + + ]:CBC 2303 : if (context->cpDeletedSlot)
1368 : : {
1369 : 24 : ExecMaterializeSlot(result);
1370 : 24 : ExecClearTuple(oldSlot);
1371 [ + + ]: 24 : if (context->cpDeletedSlot != oldSlot)
1372 : 8 : ExecClearTuple(context->cpDeletedSlot);
1373 : 24 : context->cpDeletedSlot = NULL;
1374 : : }
1375 : : }
1376 : :
1456 alvherre@alvh.no-ip. 1377 [ + + ]: 6899916 : if (inserted_tuple)
1378 : 415 : *inserted_tuple = slot;
1379 [ + + ]: 6899916 : if (insert_destrel)
1380 : 415 : *insert_destrel = resultRelInfo;
1381 : :
3342 rhaas@postgresql.org 1382 : 6899916 : return result;
1383 : : }
1384 : :
1385 : : /* ----------------------------------------------------------------
1386 : : * ExecBatchInsert
1387 : : *
1388 : : * Insert multiple tuples in an efficient way.
1389 : : * Currently, this handles inserting into a foreign table without
1390 : : * RETURNING clause.
1391 : : * ----------------------------------------------------------------
1392 : : */
1393 : : static void
1880 tomas.vondra@postgre 1394 : 29 : ExecBatchInsert(ModifyTableState *mtstate,
1395 : : ResultRelInfo *resultRelInfo,
1396 : : TupleTableSlot **slots,
1397 : : TupleTableSlot **planSlots,
1398 : : int numSlots,
1399 : : EState *estate,
1400 : : bool canSetTag)
1401 : : {
1402 : : int i;
1403 : 29 : int numInserted = numSlots;
1404 : 29 : TupleTableSlot *slot = NULL;
1405 : : TupleTableSlot **rslots;
1406 : :
1407 : : /*
1408 : : * insert into foreign table: let the FDW do it
1409 : : */
1410 : 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1411 : : resultRelInfo,
1412 : : slots,
1413 : : planSlots,
1414 : : &numInserted);
1415 : :
1416 [ + + ]: 173 : for (i = 0; i < numInserted; i++)
1417 : : {
1418 : 145 : slot = rslots[i];
1419 : :
1420 : : /*
1421 : : * AFTER ROW Triggers might reference the tableoid column, so
1422 : : * (re-)initialize tts_tableOid before evaluating them.
1423 : : */
1424 : 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1425 : :
1426 : : /* AFTER ROW INSERT Triggers */
1427 : 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1428 : 145 : mtstate->mt_transition_capture);
1429 : :
1430 : : /*
1431 : : * Check any WITH CHECK OPTION constraints from parent views. See the
1432 : : * comment in ExecInsert.
1433 : : */
1434 [ - + ]: 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1880 tomas.vondra@postgre 1435 :UBC 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1436 : : }
1437 : :
1880 tomas.vondra@postgre 1438 [ + - + - ]:CBC 28 : if (canSetTag && numInserted > 0)
1439 : 28 : estate->es_processed += numInserted;
1440 : :
1441 : : /* Clean up all the slots, ready for the next batch */
1055 michael@paquier.xyz 1442 [ + + ]: 172 : for (i = 0; i < numSlots; i++)
1443 : : {
1444 : 144 : ExecClearTuple(slots[i]);
1445 : 144 : ExecClearTuple(planSlots[i]);
1446 : : }
1447 : 28 : resultRelInfo->ri_NumSlots = 0;
1880 tomas.vondra@postgre 1448 : 28 : }
1449 : :
1450 : : /*
1451 : : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1452 : : */
1453 : : static void
1206 efujita@postgresql.o 1454 : 18 : ExecPendingInserts(EState *estate)
1455 : : {
1456 : : ListCell *l1,
1457 : : *l2;
1458 : :
1193 1459 [ + - + + : 36 : forboth(l1, estate->es_insert_pending_result_relations,
+ - + + +
+ + - +
+ ]
1460 : : l2, estate->es_insert_pending_modifytables)
1461 : : {
1462 : 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1463 : 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1464 : :
1206 1465 [ - + ]: 19 : Assert(mtstate);
1466 : 19 : ExecBatchInsert(mtstate, resultRelInfo,
1467 : : resultRelInfo->ri_Slots,
1468 : : resultRelInfo->ri_PlanSlots,
1469 : : resultRelInfo->ri_NumSlots,
1470 : 19 : estate, mtstate->canSetTag);
1471 : : }
1472 : :
1473 : 17 : list_free(estate->es_insert_pending_result_relations);
1193 1474 : 17 : list_free(estate->es_insert_pending_modifytables);
1206 1475 : 17 : estate->es_insert_pending_result_relations = NIL;
1193 1476 : 17 : estate->es_insert_pending_modifytables = NIL;
1206 1477 : 17 : }
1478 : :
1479 : : /*
1480 : : * ExecDeletePrologue -- subroutine for ExecDelete
1481 : : *
1482 : : * Prepare executor state for DELETE. Actually, the only thing we have to do
1483 : : * here is execute BEFORE ROW triggers. We return false if one of them makes
1484 : : * the delete a no-op; otherwise, return true.
1485 : : */
1486 : : static bool
1459 alvherre@alvh.no-ip. 1487 : 789271 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1488 : : ItemPointer tupleid, HeapTuple oldtuple,
1489 : : TupleTableSlot **epqreturnslot, TM_Result *result)
1490 : : {
1098 dean.a.rasheed@gmail 1491 [ + + ]: 789271 : if (result)
1492 : 799 : *result = TM_Ok;
1493 : :
1494 : : /* BEFORE ROW DELETE triggers */
1459 alvherre@alvh.no-ip. 1495 [ + + ]: 789271 : if (resultRelInfo->ri_TrigDesc &&
1496 [ + + ]: 3531 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1497 : : {
1498 : : /* Flush any pending inserts, so rows are visible to the triggers */
1206 efujita@postgresql.o 1499 [ + + ]: 173 : if (context->estate->es_insert_pending_result_relations != NIL)
1500 : 1 : ExecPendingInserts(context->estate);
1501 : :
1459 alvherre@alvh.no-ip. 1502 : 165 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1503 : : resultRelInfo, tupleid, oldtuple,
1504 : : epqreturnslot, result, &context->tmfd,
240 dean.a.rasheed@gmail 1505 : 173 : context->mtstate->operation == CMD_MERGE);
1506 : : }
1507 : :
1459 alvherre@alvh.no-ip. 1508 : 789098 : return true;
1509 : : }
1510 : :
1511 : : /*
1512 : : * ExecDeleteAct -- subroutine for ExecDelete
1513 : : *
1514 : : * Actually delete the tuple from a plain table.
1515 : : *
1516 : : * Caller is in charge of doing EvalPlanQual as necessary
1517 : : */
1518 : : static TM_Result
1519 : 789182 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1520 : : ItemPointer tupleid, bool changingPart)
1521 : : {
1522 : 789182 : EState *estate = context->estate;
1523 : :
1524 : 789182 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1525 : : estate->es_output_cid,
1526 : : estate->es_snapshot,
1527 : : estate->es_crosscheck_snapshot,
1528 : : true /* wait for commit */ ,
1529 : : &context->tmfd,
1530 : : changingPart);
1531 : : }
1532 : :
1533 : : /*
1534 : : * ExecDeleteEpilogue -- subroutine for ExecDelete
1535 : : *
1536 : : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1537 : : * including the UPDATE triggers if the deletion is being done as part of a
1538 : : * cross-partition tuple move.
1539 : : */
1540 : : static void
1541 : 789152 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1542 : : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1543 : : {
1544 : 789152 : ModifyTableState *mtstate = context->mtstate;
1545 : 789152 : EState *estate = context->estate;
1546 : : TransitionCaptureState *ar_delete_trig_tcs;
1547 : :
1548 : : /*
1549 : : * If this delete is the result of a partition key update that moved the
1550 : : * tuple to a new partition, put this row into the transition OLD TABLE,
1551 : : * if there is one. We need to do this separately for DELETE and INSERT
1552 : : * because they happen on different tables.
1553 : : */
1554 : 789152 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1555 [ + + + + ]: 789152 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1556 [ + + ]: 27 : mtstate->mt_transition_capture->tcs_update_old_table)
1557 : : {
1456 1558 : 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1559 : : NULL, NULL,
1560 : : tupleid, oldtuple,
703 akorotkov@postgresql 1561 : 24 : NULL, NULL, mtstate->mt_transition_capture,
1562 : : false);
1563 : :
1564 : : /*
1565 : : * We've already captured the OLD TABLE row, so make sure any AR
1566 : : * DELETE trigger fired below doesn't capture it again.
1567 : : */
1459 alvherre@alvh.no-ip. 1568 : 24 : ar_delete_trig_tcs = NULL;
1569 : : }
1570 : :
1571 : : /* AFTER ROW DELETE Triggers */
703 akorotkov@postgresql 1572 : 789152 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1573 : : ar_delete_trig_tcs, changingPart);
1459 alvherre@alvh.no-ip. 1574 : 789150 : }
1575 : :
1576 : : /* ----------------------------------------------------------------
1577 : : * ExecDelete
1578 : : *
1579 : : * DELETE is like UPDATE, except that we delete the tuple and no
1580 : : * index modifications are needed.
1581 : : *
1582 : : * When deleting from a table, tupleid identifies the tuple to delete and
1583 : : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1584 : : * oldtuple is passed to the triggers and identifies what to delete, and
1585 : : * tupleid is invalid. When deleting from a foreign table, tupleid is
1586 : : * invalid; the FDW has to figure out which row to delete using data from
1587 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
1588 : : * NULL when the foreign table has no relevant triggers. We use
1589 : : * tupleDeleted to indicate whether the tuple is actually deleted,
1590 : : * callers can use it to decide whether to continue the operation. When
1591 : : * this DELETE is a part of an UPDATE of partition-key, then the slot
1592 : : * returned by EvalPlanQual() is passed back using output parameter
1593 : : * epqreturnslot.
1594 : : *
1595 : : * Returns RETURNING result if any, otherwise NULL.
1596 : : * ----------------------------------------------------------------
1597 : : */
1598 : : static TupleTableSlot *
1599 : 789008 : ExecDelete(ModifyTableContext *context,
1600 : : ResultRelInfo *resultRelInfo,
1601 : : ItemPointer tupleid,
1602 : : HeapTuple oldtuple,
1603 : : bool processReturning,
1604 : : bool changingPart,
1605 : : bool canSetTag,
1606 : : TM_Result *tmresult,
1607 : : bool *tupleDeleted,
1608 : : TupleTableSlot **epqreturnslot)
1609 : : {
1610 : 789008 : EState *estate = context->estate;
1978 heikki.linnakangas@i 1611 : 789008 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
4753 tgl@sss.pgh.pa.us 1612 : 789008 : TupleTableSlot *slot = NULL;
1613 : : TM_Result result;
1614 : : bool saveOld;
1615 : :
2977 rhaas@postgresql.org 1616 [ + + ]: 789008 : if (tupleDeleted)
1617 : 536 : *tupleDeleted = false;
1618 : :
1619 : : /*
1620 : : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1621 : : * done if it says we are.
1622 : : */
1459 alvherre@alvh.no-ip. 1623 [ + + ]: 789008 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1624 : : epqreturnslot, tmresult))
1625 : 26 : return NULL;
1626 : :
1627 : : /* INSTEAD OF ROW DELETE Triggers */
5635 tgl@sss.pgh.pa.us 1628 [ + + ]: 788974 : if (resultRelInfo->ri_TrigDesc &&
1629 [ + + ]: 3463 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
6000 1630 : 24 : {
1631 : : bool dodelete;
1632 : :
5635 1633 [ - + ]: 27 : Assert(oldtuple != NULL);
4375 noah@leadboat.com 1634 : 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1635 : :
5635 tgl@sss.pgh.pa.us 1636 [ + + ]: 27 : if (!dodelete) /* "do nothing" */
6000 1637 : 3 : return NULL;
1638 : : }
4753 1639 [ + + ]: 788947 : else if (resultRelInfo->ri_FdwRoutine)
1640 : : {
1641 : : /*
1642 : : * delete from foreign table: let the FDW do it
1643 : : *
1644 : : * We offer the returning slot as a place to store RETURNING data,
1645 : : * although the FDW can return some other slot if it wants.
1646 : : */
2574 andres@anarazel.de 1647 : 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4753 tgl@sss.pgh.pa.us 1648 : 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1649 : : resultRelInfo,
1650 : : slot,
1651 : : context->planSlot);
1652 : :
1653 [ - + ]: 23 : if (slot == NULL) /* "do nothing" */
4753 tgl@sss.pgh.pa.us 1654 :UBC 0 : return NULL;
1655 : :
1656 : : /*
1657 : : * RETURNING expressions might reference the tableoid column, so
1658 : : * (re)initialize tts_tableOid before evaluating them.
1659 : : */
2708 andres@anarazel.de 1660 [ + + ]:CBC 23 : if (TTS_EMPTY(slot))
3692 rhaas@postgresql.org 1661 : 5 : ExecStoreAllNullTuple(slot);
1662 : :
2574 andres@anarazel.de 1663 : 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1664 : : }
1665 : : else
1666 : : {
1667 : : /*
1668 : : * delete the tuple
1669 : : *
1670 : : * Note: if context->estate->es_crosscheck_snapshot isn't
1671 : : * InvalidSnapshot, we check that the row to be deleted is visible to
1672 : : * that snapshot, and throw a can't-serialize error if not. This is a
1673 : : * special-case behavior needed for referential integrity updates in
1674 : : * transaction-snapshot mode transactions.
1675 : : */
1252 john.naylor@postgres 1676 : 788924 : ldelete:
703 akorotkov@postgresql 1677 : 788928 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1678 : :
815 dean.a.rasheed@gmail 1679 [ + + ]: 788910 : if (tmresult)
1680 : 519 : *tmresult = result;
1681 : :
5635 tgl@sss.pgh.pa.us 1682 [ + + + + : 788910 : switch (result)
- ]
1683 : : {
2549 andres@anarazel.de 1684 : 15 : case TM_SelfModified:
1685 : :
1686 : : /*
1687 : : * The target tuple was already updated or deleted by the
1688 : : * current command, or by a later command in the current
1689 : : * transaction. The former case is possible in a join DELETE
1690 : : * where multiple tuples join to the same target tuple. This
1691 : : * is somewhat questionable, but Postgres has always allowed
1692 : : * it: we just ignore additional deletion attempts.
1693 : : *
1694 : : * The latter case arises if the tuple is modified by a
1695 : : * command in a BEFORE trigger, or perhaps by a command in a
1696 : : * volatile function used in the query. In such situations we
1697 : : * should not ignore the deletion, but it is equally unsafe to
1698 : : * proceed. We don't want to discard the original DELETE
1699 : : * while keeping the triggered actions based on its deletion;
1700 : : * and it would be no better to allow the original DELETE
1701 : : * while discarding updates that it triggered. The row update
1702 : : * carries some information that might be important according
1703 : : * to business rules; so throwing an error is the only safe
1704 : : * course.
1705 : : *
1706 : : * If a trigger actually intends this type of interaction, it
1707 : : * can re-execute the DELETE and then return NULL to cancel
1708 : : * the outer delete.
1709 : : */
1459 alvherre@alvh.no-ip. 1710 [ + + ]: 15 : if (context->tmfd.cmax != estate->es_output_cid)
4888 kgrittn@postgresql.o 1711 [ + - ]: 3 : ereport(ERROR,
1712 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1713 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1714 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1715 : :
1716 : : /* Else, already deleted by self; nothing to do */
5635 tgl@sss.pgh.pa.us 1717 : 12 : return NULL;
1718 : :
2549 andres@anarazel.de 1719 : 788857 : case TM_Ok:
5635 tgl@sss.pgh.pa.us 1720 : 788857 : break;
1721 : :
2549 andres@anarazel.de 1722 : 35 : case TM_Updated:
1723 : : {
1724 : : TupleTableSlot *inputslot;
1725 : : TupleTableSlot *epqslot;
1726 : :
1727 [ + + ]: 35 : if (IsolationUsesXactSnapshot())
1728 [ + - ]: 1 : ereport(ERROR,
1729 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1730 : : errmsg("could not serialize access due to concurrent update")));
1731 : :
1732 : : /*
1733 : : * Already know that we're going to need to do EPQ, so
1734 : : * fetch tuple directly into the right slot.
1735 : : */
703 akorotkov@postgresql 1736 : 34 : EvalPlanQualBegin(context->epqstate);
1737 : 34 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1738 : : resultRelInfo->ri_RangeTableIndex);
1739 : :
1740 : 34 : result = table_tuple_lock(resultRelationDesc, tupleid,
1741 : : estate->es_snapshot,
1742 : : inputslot, estate->es_output_cid,
1743 : : LockTupleExclusive, LockWaitBlock,
1744 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1745 : : &context->tmfd);
1746 : :
1747 [ + + + - ]: 30 : switch (result)
1748 : : {
1749 : 27 : case TM_Ok:
1750 [ - + ]: 27 : Assert(context->tmfd.traversed);
1751 : 27 : epqslot = EvalPlanQual(context->epqstate,
1752 : : resultRelationDesc,
1753 : : resultRelInfo->ri_RangeTableIndex,
1754 : : inputslot);
1755 [ + - + + ]: 27 : if (TupIsNull(epqslot))
1756 : : /* Tuple not passing quals anymore, exiting... */
1757 : 15 : return NULL;
1758 : :
1759 : : /*
1760 : : * If requested, skip delete and pass back the
1761 : : * updated row.
1762 : : */
1763 [ + + ]: 12 : if (epqreturnslot)
1764 : : {
1765 : 8 : *epqreturnslot = epqslot;
1766 : 8 : return NULL;
1767 : : }
1768 : : else
1769 : 4 : goto ldelete;
1770 : :
1771 : 2 : case TM_SelfModified:
1772 : :
1773 : : /*
1774 : : * This can be reached when following an update
1775 : : * chain from a tuple updated by another session,
1776 : : * reaching a tuple that was already updated in
1777 : : * this transaction. If previously updated by this
1778 : : * command, ignore the delete, otherwise error
1779 : : * out.
1780 : : *
1781 : : * See also TM_SelfModified response to
1782 : : * table_tuple_delete() above.
1783 : : */
1784 [ + + ]: 2 : if (context->tmfd.cmax != estate->es_output_cid)
1785 [ + - ]: 1 : ereport(ERROR,
1786 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1787 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1788 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1789 : 1 : return NULL;
1790 : :
1791 : 1 : case TM_Deleted:
1792 : : /* tuple already deleted; nothing to do */
1793 : 1 : return NULL;
1794 : :
703 akorotkov@postgresql 1795 :UBC 0 : default:
1796 : :
1797 : : /*
1798 : : * TM_Invisible should be impossible because we're
1799 : : * waiting for updated row versions, and would
1800 : : * already have errored out if the first version
1801 : : * is invisible.
1802 : : *
1803 : : * TM_Updated should be impossible, because we're
1804 : : * locking the latest version via
1805 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1806 : : */
1807 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1808 : : result);
1809 : : return NULL;
1810 : : }
1811 : :
1812 : : Assert(false);
1813 : : break;
1814 : : }
1815 : :
2549 andres@anarazel.de 1816 :CBC 3 : case TM_Deleted:
1817 [ - + ]: 3 : if (IsolationUsesXactSnapshot())
2549 andres@anarazel.de 1818 [ # # ]:UBC 0 : ereport(ERROR,
1819 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1820 : : errmsg("could not serialize access due to concurrent delete")));
1821 : : /* tuple already deleted; nothing to do */
5635 tgl@sss.pgh.pa.us 1822 :CBC 3 : return NULL;
1823 : :
5635 tgl@sss.pgh.pa.us 1824 :UBC 0 : default:
2488 andres@anarazel.de 1825 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1826 : : result);
1827 : : return NULL;
1828 : : }
1829 : :
1830 : : /*
1831 : : * Note: Normally one would think that we have to delete index tuples
1832 : : * associated with the heap tuple now...
1833 : : *
1834 : : * ... but in POSTGRES, we have no need to do this because VACUUM will
1835 : : * take care of it later. We can't delete index tuples immediately
1836 : : * anyway, since the tuple is still visible to other transactions.
1837 : : */
1838 : : }
1839 : :
5497 tgl@sss.pgh.pa.us 1840 [ + + ]:CBC 788904 : if (canSetTag)
1841 : 788297 : (estate->es_processed)++;
1842 : :
1843 : : /* Tell caller that the delete actually happened. */
2977 rhaas@postgresql.org 1844 [ + + ]: 788904 : if (tupleDeleted)
1845 : 493 : *tupleDeleted = true;
1846 : :
703 akorotkov@postgresql 1847 : 788904 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1848 : :
1849 : : /*
1850 : : * Process RETURNING if present and if requested.
1851 : : *
1852 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1853 : : * refers to any OLD column values, save the old tuple here for later
1854 : : * processing of the RETURNING list by ExecInsert().
1855 : : */
423 dean.a.rasheed@gmail 1856 [ + + + + ]: 788977 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1857 [ + + ]: 75 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1858 : :
1859 [ + + + + : 788902 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
+ + ]
1860 : : {
1861 : : /*
1862 : : * We have to put the target tuple into a slot, which means first we
1863 : : * gotta fetch it. We can use the trigger tuple slot.
1864 : : */
1865 : : TupleTableSlot *rslot;
1866 : :
4753 tgl@sss.pgh.pa.us 1867 [ + + ]: 506 : if (resultRelInfo->ri_FdwRoutine)
1868 : : {
1869 : : /* FDW must have provided a slot containing the deleted row */
1870 [ + - - + ]: 7 : Assert(!TupIsNull(slot));
1871 : : }
1872 : : else
1873 : : {
2574 andres@anarazel.de 1874 : 499 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4753 tgl@sss.pgh.pa.us 1875 [ + + ]: 499 : if (oldtuple != NULL)
1876 : : {
2522 andres@anarazel.de 1877 : 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1878 : : }
1879 : : else
1880 : : {
703 akorotkov@postgresql 1881 [ - + ]: 487 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1882 : : SnapshotAny, slot))
703 akorotkov@postgresql 1883 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1884 : : }
1885 : : }
1886 : :
1887 : : /*
1888 : : * If required, save the old tuple for later processing of the
1889 : : * RETURNING list by ExecInsert().
1890 : : */
423 dean.a.rasheed@gmail 1891 [ + + ]:CBC 506 : if (saveOld)
1892 : : {
1893 : : TupleConversionMap *tupconv_map;
1894 : :
1895 : : /*
1896 : : * Convert the tuple into the root partition's format/slot, if
1897 : : * needed. ExecInsert() will then convert it to the new
1898 : : * partition's format/slot, if necessary.
1899 : : */
1900 : 24 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1901 [ + + ]: 24 : if (tupconv_map != NULL)
1902 : : {
1903 : 10 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1904 : 10 : TupleTableSlot *oldSlot = slot;
1905 : :
1906 : 10 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1907 : : slot,
1908 : : ExecGetReturningSlot(estate,
1909 : : rootRelInfo));
1910 : :
1911 : 10 : slot->tts_tableOid = oldSlot->tts_tableOid;
1912 : 10 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1913 : : }
1914 : :
1915 : 24 : context->cpDeletedSlot = slot;
1916 : :
1917 : 24 : return NULL;
1918 : : }
1919 : :
31 dean.a.rasheed@gmail 1920 :GNC 482 : rslot = ExecProcessReturning(context, resultRelInfo, true,
1921 : : slot, NULL, context->planSlot);
1922 : :
1923 : : /*
1924 : : * Before releasing the target tuple again, make sure rslot has a
1925 : : * local copy of any pass-by-reference values.
1926 : : */
4753 tgl@sss.pgh.pa.us 1927 :CBC 482 : ExecMaterializeSlot(rslot);
1928 : :
6000 1929 : 482 : ExecClearTuple(slot);
1930 : :
1931 : 482 : return rslot;
1932 : : }
1933 : :
1934 : 788396 : return NULL;
1935 : : }
1936 : :
1937 : : /*
1938 : : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1939 : : *
1940 : : * This works by first deleting the old tuple from the current partition,
1941 : : * followed by inserting the new tuple into the root parent table, that is,
1942 : : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1943 : : * correct partition.
1944 : : *
1945 : : * Returns true if the tuple has been successfully moved, or if it's found
1946 : : * that the tuple was concurrently deleted so there's nothing more to do
1947 : : * for the caller.
1948 : : *
1949 : : * False is returned if the tuple we're trying to move is found to have been
1950 : : * concurrently updated. In that case, the caller must check if the updated
1951 : : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1952 : : * this function again or perform a regular update accordingly. For MERGE,
1953 : : * the updated tuple is not returned in *retry_slot; it has its own retry
1954 : : * logic.
1955 : : */
1956 : : static bool
1459 alvherre@alvh.no-ip. 1957 : 560 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1958 : : ResultRelInfo *resultRelInfo,
1959 : : ItemPointer tupleid, HeapTuple oldtuple,
1960 : : TupleTableSlot *slot,
1961 : : bool canSetTag,
1962 : : UpdateContext *updateCxt,
1963 : : TM_Result *tmresult,
1964 : : TupleTableSlot **retry_slot,
1965 : : TupleTableSlot **inserted_tuple,
1966 : : ResultRelInfo **insert_destrel)
1967 : : {
1968 : 560 : ModifyTableState *mtstate = context->mtstate;
1977 heikki.linnakangas@i 1969 : 560 : EState *estate = mtstate->ps.state;
1970 : : TupleConversionMap *tupconv_map;
1971 : : bool tuple_deleted;
1972 : 560 : TupleTableSlot *epqslot = NULL;
1973 : :
423 dean.a.rasheed@gmail 1974 : 560 : context->cpDeletedSlot = NULL;
1459 alvherre@alvh.no-ip. 1975 : 560 : context->cpUpdateReturningSlot = NULL;
1098 dean.a.rasheed@gmail 1976 : 560 : *retry_slot = NULL;
1977 : :
1978 : : /*
1979 : : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1980 : : * to migrate to a different partition. Maybe this can be implemented
1981 : : * some day, but it seems a fringe feature with little redeeming value.
1982 : : */
1977 heikki.linnakangas@i 1983 [ - + ]: 560 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1977 heikki.linnakangas@i 1984 [ # # ]:UBC 0 : ereport(ERROR,
1985 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1986 : : errmsg("invalid ON UPDATE specification"),
1987 : : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1988 : :
1989 : : /*
1990 : : * When an UPDATE is run directly on a leaf partition, simply fail with a
1991 : : * partition constraint violation error.
1992 : : */
1804 tgl@sss.pgh.pa.us 1993 [ + + ]:CBC 560 : if (resultRelInfo == mtstate->rootResultRelInfo)
1977 heikki.linnakangas@i 1994 : 24 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1995 : :
1996 : : /* Initialize tuple routing info if not already done. */
1804 tgl@sss.pgh.pa.us 1997 [ + + ]: 536 : if (mtstate->mt_partition_tuple_routing == NULL)
1998 : : {
1999 : 343 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
2000 : : MemoryContext oldcxt;
2001 : :
2002 : : /* Things built here have to last for the query duration. */
2003 : 343 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
2004 : :
2005 : 343 : mtstate->mt_partition_tuple_routing =
2006 : 343 : ExecSetupPartitionTupleRouting(estate, rootRel);
2007 : :
2008 : : /*
2009 : : * Before a partition's tuple can be re-routed, it must first be
2010 : : * converted to the root's format, so we'll need a slot for storing
2011 : : * such tuples.
2012 : : */
2013 [ - + ]: 343 : Assert(mtstate->mt_root_tuple_slot == NULL);
2014 : 343 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
2015 : :
2016 : 343 : MemoryContextSwitchTo(oldcxt);
2017 : : }
2018 : :
2019 : : /*
2020 : : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
2021 : : * We want to return rows from INSERT.
2022 : : */
1459 alvherre@alvh.no-ip. 2023 : 536 : ExecDelete(context, resultRelInfo,
2024 : : tupleid, oldtuple,
2025 : : false, /* processReturning */
2026 : : true, /* changingPart */
2027 : : false, /* canSetTag */
2028 : : tmresult, &tuple_deleted, &epqslot);
2029 : :
2030 : : /*
2031 : : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2032 : : * it was already deleted by self, or it was concurrently deleted by
2033 : : * another transaction), then we should skip the insert as well;
2034 : : * otherwise, an UPDATE could cause an increase in the total number of
2035 : : * rows across all partitions, which is clearly wrong.
2036 : : *
2037 : : * For a normal UPDATE, the case where the tuple has been the subject of a
2038 : : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2039 : : * machinery, but for an UPDATE that we've translated into a DELETE from
2040 : : * this partition and an INSERT into some other partition, that's not
2041 : : * available, because CTID chains can't span relation boundaries. We
2042 : : * mimic the semantics to a limited extent by skipping the INSERT if the
2043 : : * DELETE fails to find a tuple. This ensures that two concurrent
2044 : : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2045 : : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2046 : : * it.
2047 : : */
1977 heikki.linnakangas@i 2048 [ + + ]: 533 : if (!tuple_deleted)
2049 : : {
2050 : : /*
2051 : : * epqslot will be typically NULL. But when ExecDelete() finds that
2052 : : * another transaction has concurrently updated the same row, it
2053 : : * re-fetches the row, skips the delete, and epqslot is set to the
2054 : : * re-fetched tuple slot. In that case, we need to do all the checks
2055 : : * again. For MERGE, we leave everything to the caller (it must do
2056 : : * additional rechecking, and might end up executing a different
2057 : : * action entirely).
2058 : : */
728 dean.a.rasheed@gmail 2059 [ + + ]: 40 : if (mtstate->operation == CMD_MERGE)
815 2060 : 19 : return *tmresult == TM_Ok;
1098 2061 [ + + - + ]: 21 : else if (TupIsNull(epqslot))
1977 heikki.linnakangas@i 2062 : 18 : return true;
2063 : : else
2064 : : {
2065 : : /* Fetch the most recent version of old tuple. */
2066 : : TupleTableSlot *oldSlot;
2067 : :
2068 : : /* ... but first, make sure ri_oldTupleSlot is initialized. */
703 akorotkov@postgresql 2069 [ - + ]: 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
703 akorotkov@postgresql 2070 :UBC 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
703 akorotkov@postgresql 2071 :CBC 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2072 [ - + ]: 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2073 : : tupleid,
2074 : : SnapshotAny,
2075 : : oldSlot))
703 akorotkov@postgresql 2076 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
2077 : : /* and project the new tuple to retry the UPDATE with */
1098 dean.a.rasheed@gmail 2078 :CBC 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2079 : : oldSlot);
1977 heikki.linnakangas@i 2080 : 3 : return false;
2081 : : }
2082 : : }
2083 : :
2084 : : /*
2085 : : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2086 : : * convert the tuple into root's tuple descriptor if needed, since
2087 : : * ExecInsert() starts the search from root.
2088 : : */
1804 tgl@sss.pgh.pa.us 2089 : 493 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1977 heikki.linnakangas@i 2090 [ + + ]: 493 : if (tupconv_map != NULL)
2091 : 158 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2092 : : slot,
2093 : : mtstate->mt_root_tuple_slot);
2094 : :
2095 : : /* Tuple routing starts from the root table. */
1459 alvherre@alvh.no-ip. 2096 : 429 : context->cpUpdateReturningSlot =
1456 2097 : 493 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2098 : : inserted_tuple, insert_destrel);
2099 : :
2100 : : /*
2101 : : * Reset the transition state that may possibly have been written by
2102 : : * INSERT.
2103 : : */
1977 heikki.linnakangas@i 2104 [ + + ]: 429 : if (mtstate->mt_transition_capture)
2105 : 27 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2106 : :
2107 : : /* We're done moving. */
2108 : 429 : return true;
2109 : : }
2110 : :
2111 : : /*
2112 : : * ExecUpdatePrologue -- subroutine for ExecUpdate
2113 : : *
2114 : : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2115 : : * triggers. We return false if one of them makes the update a no-op;
2116 : : * otherwise, return true.
2117 : : */
2118 : : static bool
1459 alvherre@alvh.no-ip. 2119 : 163787 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2120 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2121 : : TM_Result *result)
2122 : : {
2123 : 163787 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2124 : :
1098 dean.a.rasheed@gmail 2125 [ + + ]: 163787 : if (result)
2126 : 1098 : *result = TM_Ok;
2127 : :
1459 alvherre@alvh.no-ip. 2128 : 163787 : ExecMaterializeSlot(slot);
2129 : :
2130 : : /*
2131 : : * Open the table's indexes, if we have not done so already, so that we
2132 : : * can add new index entries for the updated tuple.
2133 : : */
2134 [ + + ]: 163787 : if (resultRelationDesc->rd_rel->relhasindex &&
2135 [ + + ]: 118238 : resultRelInfo->ri_IndexRelationDescs == NULL)
2136 : 4788 : ExecOpenIndices(resultRelInfo, false);
2137 : :
2138 : : /* BEFORE ROW UPDATE triggers */
2139 [ + + ]: 163787 : if (resultRelInfo->ri_TrigDesc &&
2140 [ + + ]: 3173 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2141 : : {
2142 : : /* Flush any pending inserts, so rows are visible to the triggers */
1206 efujita@postgresql.o 2143 [ + + ]: 1305 : if (context->estate->es_insert_pending_result_relations != NIL)
2144 : 1 : ExecPendingInserts(context->estate);
2145 : :
1459 alvherre@alvh.no-ip. 2146 : 1293 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2147 : : resultRelInfo, tupleid, oldtuple, slot,
2148 : : result, &context->tmfd,
240 dean.a.rasheed@gmail 2149 : 1305 : context->mtstate->operation == CMD_MERGE);
2150 : : }
2151 : :
1459 alvherre@alvh.no-ip. 2152 : 162482 : return true;
2153 : : }
2154 : :
2155 : : /*
2156 : : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2157 : : *
2158 : : * Apply the final modifications to the tuple slot before the update.
2159 : : * (This is split out because we also need it in the foreign-table code path.)
2160 : : */
2161 : : static void
2162 : 163656 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2163 : : TupleTableSlot *slot,
2164 : : EState *estate)
2165 : : {
2166 : 163656 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2167 : :
2168 : : /*
2169 : : * Constraints and GENERATED expressions might reference the tableoid
2170 : : * column, so (re-)initialize tts_tableOid before evaluating them.
2171 : : */
2172 : 163656 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2173 : :
2174 : : /*
2175 : : * Compute stored generated columns
2176 : : */
2177 [ + + ]: 163656 : if (resultRelationDesc->rd_att->constr &&
2178 [ + + ]: 99806 : resultRelationDesc->rd_att->constr->has_generated_stored)
2179 : 144 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2180 : : CMD_UPDATE);
2181 : 163656 : }
2182 : :
2183 : : /*
2184 : : * ExecUpdateAct -- subroutine for ExecUpdate
2185 : : *
2186 : : * Actually update the tuple, when operating on a plain table. If the
2187 : : * table is a partition, and the command was called referencing an ancestor
2188 : : * partitioned table, this routine migrates the resulting tuple to another
2189 : : * partition.
2190 : : *
2191 : : * The caller is in charge of keeping indexes current as necessary. The
2192 : : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2193 : : * be concurrently updated. However, in case of a cross-partition update,
2194 : : * this routine does it.
2195 : : */
2196 : : static TM_Result
2197 : 163558 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2198 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2199 : : bool canSetTag, UpdateContext *updateCxt)
2200 : : {
2201 : 163558 : EState *estate = context->estate;
2202 : 163558 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2203 : : bool partition_constraint_failed;
2204 : : TM_Result result;
2205 : :
2206 : 163558 : updateCxt->crossPartUpdate = false;
2207 : :
2208 : : /*
2209 : : * If we move the tuple to a new partition, we loop back here to recompute
2210 : : * GENERATED values (which are allowed to be different across partitions)
2211 : : * and recheck any RLS policies and constraints. We do not fire any
2212 : : * BEFORE triggers of the new partition, however.
2213 : : */
1252 john.naylor@postgres 2214 : 163561 : lreplace:
2215 : : /* Fill in GENERATEd columns */
1105 tgl@sss.pgh.pa.us 2216 : 163561 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2217 : :
2218 : : /* ensure slot is independent, consider e.g. EPQ */
1459 alvherre@alvh.no-ip. 2219 : 163561 : ExecMaterializeSlot(slot);
2220 : :
2221 : : /*
2222 : : * If partition constraint fails, this row might get moved to another
2223 : : * partition, in which case we should check the RLS CHECK policy just
2224 : : * before inserting into the new partition, rather than doing it here.
2225 : : * This is because a trigger on that partition might again change the row.
2226 : : * So skip the WCO checks if the partition constraint fails.
2227 : : */
2228 : 163561 : partition_constraint_failed =
2229 [ + + ]: 164957 : resultRelationDesc->rd_rel->relispartition &&
2230 [ + + ]: 1396 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2231 : :
2232 : : /* Check any RLS UPDATE WITH CHECK policies */
2233 [ + + ]: 163561 : if (!partition_constraint_failed &&
2234 [ + + ]: 163001 : resultRelInfo->ri_WithCheckOptions != NIL)
2235 : : {
2236 : : /*
2237 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2238 : : * we are looking for at this point.
2239 : : */
2240 : 267 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2241 : : resultRelInfo, slot, estate);
2242 : : }
2243 : :
2244 : : /*
2245 : : * If a partition check failed, try to move the row into the right
2246 : : * partition.
2247 : : */
2248 [ + + ]: 163534 : if (partition_constraint_failed)
2249 : : {
2250 : : TupleTableSlot *inserted_tuple,
2251 : : *retry_slot;
1456 2252 : 560 : ResultRelInfo *insert_destrel = NULL;
2253 : :
2254 : : /*
2255 : : * ExecCrossPartitionUpdate will first DELETE the row from the
2256 : : * partition it's currently in and then insert it back into the root
2257 : : * table, which will re-route it to the correct partition. However,
2258 : : * if the tuple has been concurrently updated, a retry is needed.
2259 : : */
1459 2260 [ + + ]: 560 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2261 : : tupleid, oldtuple, slot,
2262 : : canSetTag, updateCxt,
2263 : : &result,
2264 : : &retry_slot,
2265 : : &inserted_tuple,
2266 : : &insert_destrel))
2267 : : {
2268 : : /* success! */
2269 : 459 : updateCxt->crossPartUpdate = true;
2270 : :
2271 : : /*
2272 : : * If the partitioned table being updated is referenced in foreign
2273 : : * keys, queue up trigger events to check that none of them were
2274 : : * violated. No special treatment is needed in
2275 : : * non-cross-partition update situations, because the leaf
2276 : : * partition's AR update triggers will take care of that. During
2277 : : * cross-partition updates implemented as delete on the source
2278 : : * partition followed by insert on the destination partition,
2279 : : * AR-UPDATE triggers of the root table (that is, the table
2280 : : * mentioned in the query) must be fired.
2281 : : *
2282 : : * NULL insert_destrel means that the move failed to occur, that
2283 : : * is, the update failed, so no need to anything in that case.
2284 : : */
1456 2285 [ + + ]: 459 : if (insert_destrel &&
2286 [ + + ]: 415 : resultRelInfo->ri_TrigDesc &&
2287 [ + + ]: 184 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2288 : 153 : ExecCrossPartitionUpdateForeignKey(context,
2289 : : resultRelInfo,
2290 : : insert_destrel,
2291 : : tupleid, slot,
2292 : : inserted_tuple);
2293 : :
1459 2294 : 463 : return TM_Ok;
2295 : : }
2296 : :
2297 : : /*
2298 : : * No luck, a retry is needed. If running MERGE, we do not do so
2299 : : * here; instead let it handle that on its own rules.
2300 : : */
728 dean.a.rasheed@gmail 2301 [ + + ]: 10 : if (context->mtstate->operation == CMD_MERGE)
815 2302 : 7 : return result;
2303 : :
2304 : : /*
2305 : : * ExecCrossPartitionUpdate installed an updated version of the new
2306 : : * tuple in the retry slot; start over.
2307 : : */
1098 2308 : 3 : slot = retry_slot;
1459 alvherre@alvh.no-ip. 2309 : 3 : goto lreplace;
2310 : : }
2311 : :
2312 : : /*
2313 : : * Check the constraints of the tuple. We've already checked the
2314 : : * partition constraint above; however, we must still ensure the tuple
2315 : : * passes all other constraints, so we will call ExecConstraints() and
2316 : : * have it validate all remaining checks.
2317 : : */
2318 [ + + ]: 162974 : if (resultRelationDesc->rd_att->constr)
2319 : 99492 : ExecConstraints(resultRelInfo, slot, estate);
2320 : :
2321 : : /*
2322 : : * replace the heap tuple
2323 : : *
2324 : : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2325 : : * the row to be updated is visible to that snapshot, and throw a
2326 : : * can't-serialize error if not. This is a special-case behavior needed
2327 : : * for referential integrity updates in transaction-snapshot mode
2328 : : * transactions.
2329 : : */
2330 : 162937 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2331 : : estate->es_output_cid,
2332 : : estate->es_snapshot,
2333 : : estate->es_crosscheck_snapshot,
2334 : : true /* wait for commit */ ,
2335 : : &context->tmfd, &updateCxt->lockmode,
2336 : : &updateCxt->updateIndexes);
2337 : :
2338 : 162925 : return result;
2339 : : }
2340 : :
2341 : : /*
2342 : : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2343 : : *
2344 : : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2345 : : * returns indicating that the tuple was updated.
2346 : : */
2347 : : static void
2348 : 162939 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2349 : : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2350 : : HeapTuple oldtuple, TupleTableSlot *slot)
2351 : : {
2352 : 162939 : ModifyTableState *mtstate = context->mtstate;
1098 dean.a.rasheed@gmail 2353 : 162939 : List *recheckIndexes = NIL;
2354 : :
2355 : : /* insert index entries for tuple if necessary */
1091 tomas.vondra@postgre 2356 [ + + + + ]: 162939 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2357 : : {
26 alvherre@kurilemu.de 2358 :GNC 89444 : bits32 flags = EIIT_IS_UPDATE;
2359 : :
2360 [ + + ]: 89444 : if (updateCxt->updateIndexes == TU_Summarizing)
2361 : 1641 : flags |= EIIT_ONLY_SUMMARIZING;
2362 : 89444 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, context->estate,
2363 : : flags, slot, NIL,
2364 : : NULL);
2365 : : }
2366 : :
2367 : : /* AFTER ROW UPDATE Triggers */
1459 alvherre@alvh.no-ip. 2368 :CBC 162893 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2369 : : NULL, NULL,
2370 : : tupleid, oldtuple, slot,
2371 : : recheckIndexes,
2372 [ + + ]: 162893 : mtstate->operation == CMD_INSERT ?
2373 : : mtstate->mt_oc_transition_capture :
2374 : : mtstate->mt_transition_capture,
2375 : : false);
2376 : :
1098 dean.a.rasheed@gmail 2377 : 162891 : list_free(recheckIndexes);
2378 : :
2379 : : /*
2380 : : * Check any WITH CHECK OPTION constraints from parent views. We are
2381 : : * required to do this after testing all constraints and uniqueness
2382 : : * violations per the SQL spec, so we do it after actually updating the
2383 : : * record in the heap and all indexes.
2384 : : *
2385 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2386 : : * are looking for at this point.
2387 : : */
1459 alvherre@alvh.no-ip. 2388 [ + + ]: 162891 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2389 : 254 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2390 : : slot, context->estate);
2391 : 162850 : }
2392 : :
2393 : : /*
2394 : : * Queues up an update event using the target root partitioned table's
2395 : : * trigger to check that a cross-partition update hasn't broken any foreign
2396 : : * keys pointing into it.
2397 : : */
2398 : : static void
1456 2399 : 153 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2400 : : ResultRelInfo *sourcePartInfo,
2401 : : ResultRelInfo *destPartInfo,
2402 : : ItemPointer tupleid,
2403 : : TupleTableSlot *oldslot,
2404 : : TupleTableSlot *newslot)
2405 : : {
2406 : : ListCell *lc;
2407 : : ResultRelInfo *rootRelInfo;
2408 : : List *ancestorRels;
2409 : :
2410 : 153 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2411 : 153 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2412 : :
2413 : : /*
2414 : : * For any foreign keys that point directly into a non-root ancestors of
2415 : : * the source partition, we can in theory fire an update event to enforce
2416 : : * those constraints using their triggers, if we could tell that both the
2417 : : * source and the destination partitions are under the same ancestor. But
2418 : : * for now, we simply report an error that those cannot be enforced.
2419 : : */
2420 [ + - + + : 333 : foreach(lc, ancestorRels)
+ + ]
2421 : : {
2422 : 183 : ResultRelInfo *rInfo = lfirst(lc);
2423 : 183 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2424 : 183 : bool has_noncloned_fkey = false;
2425 : :
2426 : : /* Root ancestor's triggers will be processed. */
2427 [ + + ]: 183 : if (rInfo == rootRelInfo)
2428 : 150 : continue;
2429 : :
2430 [ + - + - ]: 33 : if (trigdesc && trigdesc->trig_update_after_row)
2431 : : {
2432 [ + + ]: 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2433 : : {
2434 : 84 : Trigger *trig = &trigdesc->triggers[i];
2435 : :
2436 [ + + + - ]: 87 : if (!trig->tgisclone &&
2437 : 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2438 : : {
2439 : 3 : has_noncloned_fkey = true;
2440 : 3 : break;
2441 : : }
2442 : : }
2443 : : }
2444 : :
2445 [ + + ]: 33 : if (has_noncloned_fkey)
2446 [ + - ]: 3 : ereport(ERROR,
2447 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2448 : : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2449 : : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2450 : : RelationGetRelationName(rInfo->ri_RelationDesc),
2451 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2452 : : errhint("Consider defining the foreign key on table \"%s\".",
2453 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2454 : : }
2455 : :
2456 : : /* Perform the root table's triggers. */
2457 : 150 : ExecARUpdateTriggers(context->estate,
2458 : : rootRelInfo, sourcePartInfo, destPartInfo,
2459 : : tupleid, NULL, newslot, NIL, NULL, true);
2460 : 150 : }
2461 : :
2462 : : /* ----------------------------------------------------------------
2463 : : * ExecUpdate
2464 : : *
2465 : : * note: we can't run UPDATE queries with transactions
2466 : : * off because UPDATEs are actually INSERTs and our
2467 : : * scan will mistakenly loop forever, updating the tuple
2468 : : * it just inserted.. This should be fixed but until it
2469 : : * is, we don't want to get stuck in an infinite loop
2470 : : * which corrupts your database..
2471 : : *
2472 : : * When updating a table, tupleid identifies the tuple to update and
2473 : : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2474 : : * oldtuple is passed to the triggers and identifies what to update, and
2475 : : * tupleid is invalid. When updating a foreign table, tupleid is
2476 : : * invalid; the FDW has to figure out which row to update using data from
2477 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
2478 : : * NULL when the foreign table has no relevant triggers.
2479 : : *
2480 : : * oldSlot contains the old tuple value.
2481 : : * slot contains the new tuple value to be stored.
2482 : : * planSlot is the output of the ModifyTable's subplan; we use it
2483 : : * to access values from other input tables (for RETURNING),
2484 : : * row-ID junk columns, etc.
2485 : : *
2486 : : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2487 : : * had identified the tuple to update, it will identify the tuple
2488 : : * actually updated after EvalPlanQual.
2489 : : * ----------------------------------------------------------------
2490 : : */
2491 : : static TupleTableSlot *
1459 2492 : 162689 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2493 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2494 : : TupleTableSlot *slot, bool canSetTag)
2495 : : {
2496 : 162689 : EState *estate = context->estate;
1978 heikki.linnakangas@i 2497 : 162689 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1459 alvherre@alvh.no-ip. 2498 : 162689 : UpdateContext updateCxt = {0};
2499 : : TM_Result result;
2500 : :
2501 : : /*
2502 : : * abort the operation if not running transactions
2503 : : */
6000 tgl@sss.pgh.pa.us 2504 [ - + ]: 162689 : if (IsBootstrapProcessingMode())
6000 tgl@sss.pgh.pa.us 2505 [ # # ]:UBC 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2506 : :
2507 : : /*
2508 : : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2509 : : * done if it says we are.
2510 : : */
1098 dean.a.rasheed@gmail 2511 [ + + ]:CBC 162689 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
1459 alvherre@alvh.no-ip. 2512 : 66 : return NULL;
2513 : :
2514 : : /* INSTEAD OF ROW UPDATE Triggers */
5635 tgl@sss.pgh.pa.us 2515 [ + + ]: 162611 : if (resultRelInfo->ri_TrigDesc &&
2516 [ + + ]: 2911 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2517 : : {
2574 andres@anarazel.de 2518 [ + + ]: 63 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2519 : : oldtuple, slot))
2489 tgl@sss.pgh.pa.us 2520 : 9 : return NULL; /* "do nothing" */
2521 : : }
4753 2522 [ + + ]: 162548 : else if (resultRelInfo->ri_FdwRoutine)
2523 : : {
2524 : : /* Fill in GENERATEd columns */
1459 alvherre@alvh.no-ip. 2525 : 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2526 : :
2527 : : /*
2528 : : * update in foreign table: let the FDW do it
2529 : : */
4753 tgl@sss.pgh.pa.us 2530 : 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2531 : : resultRelInfo,
2532 : : slot,
2533 : : context->planSlot);
2534 : :
2535 [ + + ]: 95 : if (slot == NULL) /* "do nothing" */
2536 : 1 : return NULL;
2537 : :
2538 : : /*
2539 : : * AFTER ROW Triggers or RETURNING expressions might reference the
2540 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2541 : : * them. (This covers the case where the FDW replaced the slot.)
2542 : : */
2574 andres@anarazel.de 2543 : 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2544 : : }
2545 : : else
2546 : : {
2547 : : ItemPointerData lockedtid;
2548 : :
2549 : : /*
2550 : : * If we generate a new candidate tuple after EvalPlanQual testing, we
2551 : : * must loop back here to try again. (We don't need to redo triggers,
2552 : : * however. If there are any BEFORE triggers then trigger.c will have
2553 : : * done table_tuple_lock to lock the correct tuple, so there's no need
2554 : : * to do them again.)
2555 : : */
1459 alvherre@alvh.no-ip. 2556 : 162453 : redo_act:
537 noah@leadboat.com 2557 : 162509 : lockedtid = *tupleid;
1459 alvherre@alvh.no-ip. 2558 : 162509 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2559 : : canSetTag, &updateCxt);
2560 : :
2561 : : /*
2562 : : * If ExecUpdateAct reports that a cross-partition update was done,
2563 : : * then the RETURNING tuple (if any) has been projected and there's
2564 : : * nothing else for us to do.
2565 : : */
2566 [ + + ]: 162351 : if (updateCxt.crossPartUpdate)
2567 : 453 : return context->cpUpdateReturningSlot;
2568 : :
5635 tgl@sss.pgh.pa.us 2569 [ + + + + : 161964 : switch (result)
- ]
2570 : : {
2549 andres@anarazel.de 2571 : 42 : case TM_SelfModified:
2572 : :
2573 : : /*
2574 : : * The target tuple was already updated or deleted by the
2575 : : * current command, or by a later command in the current
2576 : : * transaction. The former case is possible in a join UPDATE
2577 : : * where multiple tuples join to the same target tuple. This
2578 : : * is pretty questionable, but Postgres has always allowed it:
2579 : : * we just execute the first update action and ignore
2580 : : * additional update attempts.
2581 : : *
2582 : : * The latter case arises if the tuple is modified by a
2583 : : * command in a BEFORE trigger, or perhaps by a command in a
2584 : : * volatile function used in the query. In such situations we
2585 : : * should not ignore the update, but it is equally unsafe to
2586 : : * proceed. We don't want to discard the original UPDATE
2587 : : * while keeping the triggered actions based on it; and we
2588 : : * have no principled way to merge this update with the
2589 : : * previous ones. So throwing an error is the only safe
2590 : : * course.
2591 : : *
2592 : : * If a trigger actually intends this type of interaction, it
2593 : : * can re-execute the UPDATE (assuming it can figure out how)
2594 : : * and then return NULL to cancel the outer update.
2595 : : */
1459 alvherre@alvh.no-ip. 2596 [ + + ]: 42 : if (context->tmfd.cmax != estate->es_output_cid)
4888 kgrittn@postgresql.o 2597 [ + - ]: 3 : ereport(ERROR,
2598 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2599 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2600 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2601 : :
2602 : : /* Else, already updated by self; nothing to do */
5635 tgl@sss.pgh.pa.us 2603 : 39 : return NULL;
2604 : :
2549 andres@anarazel.de 2605 : 161834 : case TM_Ok:
5635 tgl@sss.pgh.pa.us 2606 : 161834 : break;
2607 : :
2549 andres@anarazel.de 2608 : 84 : case TM_Updated:
2609 : : {
2610 : : TupleTableSlot *inputslot;
2611 : : TupleTableSlot *epqslot;
2612 : :
2613 [ + + ]: 84 : if (IsolationUsesXactSnapshot())
2614 [ + - ]: 2 : ereport(ERROR,
2615 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2616 : : errmsg("could not serialize access due to concurrent update")));
2617 : :
2618 : : /*
2619 : : * Already know that we're going to need to do EPQ, so
2620 : : * fetch tuple directly into the right slot.
2621 : : */
703 akorotkov@postgresql 2622 : 82 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2623 : : resultRelInfo->ri_RangeTableIndex);
2624 : :
2625 : 82 : result = table_tuple_lock(resultRelationDesc, tupleid,
2626 : : estate->es_snapshot,
2627 : : inputslot, estate->es_output_cid,
2628 : : updateCxt.lockmode, LockWaitBlock,
2629 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2630 : : &context->tmfd);
2631 : :
2632 [ + + + - ]: 80 : switch (result)
2633 : : {
2634 : 75 : case TM_Ok:
2635 [ - + ]: 75 : Assert(context->tmfd.traversed);
2636 : :
2637 : 75 : epqslot = EvalPlanQual(context->epqstate,
2638 : : resultRelationDesc,
2639 : : resultRelInfo->ri_RangeTableIndex,
2640 : : inputslot);
2641 [ + + + + ]: 75 : if (TupIsNull(epqslot))
2642 : : /* Tuple not passing quals anymore, exiting... */
2643 : 19 : return NULL;
2644 : :
2645 : : /* Make sure ri_oldTupleSlot is initialized. */
2646 [ - + ]: 56 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
703 akorotkov@postgresql 2647 :UBC 0 : ExecInitUpdateProjection(context->mtstate,
2648 : : resultRelInfo);
2649 : :
537 noah@leadboat.com 2650 [ + + ]:CBC 56 : if (resultRelInfo->ri_needLockTagTuple)
2651 : : {
2652 : 1 : UnlockTuple(resultRelationDesc,
2653 : : &lockedtid, InplaceUpdateTupleLock);
2654 : 1 : LockTuple(resultRelationDesc,
2655 : : tupleid, InplaceUpdateTupleLock);
2656 : : }
2657 : :
2658 : : /* Fetch the most recent version of old tuple. */
703 akorotkov@postgresql 2659 : 56 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2660 [ - + ]: 56 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2661 : : tupleid,
2662 : : SnapshotAny,
2663 : : oldSlot))
703 akorotkov@postgresql 2664 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
703 akorotkov@postgresql 2665 :CBC 56 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2666 : : epqslot, oldSlot);
2667 : 56 : goto redo_act;
2668 : :
2669 : 1 : case TM_Deleted:
2670 : : /* tuple already deleted; nothing to do */
2671 : 1 : return NULL;
2672 : :
2673 : 4 : case TM_SelfModified:
2674 : :
2675 : : /*
2676 : : * This can be reached when following an update
2677 : : * chain from a tuple updated by another session,
2678 : : * reaching a tuple that was already updated in
2679 : : * this transaction. If previously modified by
2680 : : * this command, ignore the redundant update,
2681 : : * otherwise error out.
2682 : : *
2683 : : * See also TM_SelfModified response to
2684 : : * table_tuple_update() above.
2685 : : */
2686 [ + + ]: 4 : if (context->tmfd.cmax != estate->es_output_cid)
2687 [ + - ]: 1 : ereport(ERROR,
2688 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2689 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2690 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2691 : 3 : return NULL;
2692 : :
703 akorotkov@postgresql 2693 :UBC 0 : default:
2694 : : /* see table_tuple_lock call in ExecDelete() */
2695 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2696 : : result);
2697 : : return NULL;
2698 : : }
2699 : : }
2700 : :
2701 : : break;
2702 : :
2549 andres@anarazel.de 2703 :CBC 4 : case TM_Deleted:
2704 [ - + ]: 4 : if (IsolationUsesXactSnapshot())
2549 andres@anarazel.de 2705 [ # # ]:UBC 0 : ereport(ERROR,
2706 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2707 : : errmsg("could not serialize access due to concurrent delete")));
2708 : : /* tuple already deleted; nothing to do */
5635 tgl@sss.pgh.pa.us 2709 :CBC 4 : return NULL;
2710 : :
5635 tgl@sss.pgh.pa.us 2711 :UBC 0 : default:
2488 andres@anarazel.de 2712 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2713 : : result);
2714 : : return NULL;
2715 : : }
2716 : : }
2717 : :
5497 tgl@sss.pgh.pa.us 2718 [ + + ]:CBC 161976 : if (canSetTag)
2719 : 161667 : (estate->es_processed)++;
2720 : :
1459 alvherre@alvh.no-ip. 2721 : 161976 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2722 : : slot);
2723 : :
2724 : : /* Process RETURNING if present */
6000 tgl@sss.pgh.pa.us 2725 [ + + ]: 161893 : if (resultRelInfo->ri_projectReturning)
31 dean.a.rasheed@gmail 2726 :GNC 1214 : return ExecProcessReturning(context, resultRelInfo, false,
2727 : : oldSlot, slot, context->planSlot);
2728 : :
6000 tgl@sss.pgh.pa.us 2729 :CBC 160679 : return NULL;
2730 : : }
2731 : :
2732 : : /*
2733 : : * ExecOnConflictLockRow --- lock the row for ON CONFLICT DO SELECT/UPDATE
2734 : : *
2735 : : * Try to lock tuple for update as part of speculative insertion for ON
2736 : : * CONFLICT DO UPDATE or ON CONFLICT DO SELECT FOR UPDATE/SHARE.
2737 : : *
2738 : : * Returns true if the row is successfully locked, or false if the caller must
2739 : : * retry the INSERT from scratch.
2740 : : */
2741 : : static bool
31 dean.a.rasheed@gmail 2742 :GNC 2681 : ExecOnConflictLockRow(ModifyTableContext *context,
2743 : : TupleTableSlot *existing,
2744 : : ItemPointer conflictTid,
2745 : : Relation relation,
2746 : : LockTupleMode lockmode,
2747 : : bool isUpdate)
2748 : : {
2749 : : TM_FailureData tmfd;
2750 : : TM_Result test;
2751 : : Datum xminDatum;
2752 : : TransactionId xmin;
2753 : : bool isnull;
2754 : :
2755 : : /*
2756 : : * Lock tuple with lockmode. Don't follow updates when tuple cannot be
2757 : : * locked without doing so. A row locking conflict here means our
2758 : : * previous conclusion that the tuple is conclusively committed is not
2759 : : * true anymore.
2760 : : */
2488 andres@anarazel.de 2761 :CBC 2681 : test = table_tuple_lock(relation, conflictTid,
1459 alvherre@alvh.no-ip. 2762 : 2681 : context->estate->es_snapshot,
2763 : 2681 : existing, context->estate->es_output_cid,
2764 : : lockmode, LockWaitBlock, 0,
2765 : : &tmfd);
3964 andres@anarazel.de 2766 [ + + - + : 2681 : switch (test)
+ - ]
2767 : : {
2549 2768 : 2657 : case TM_Ok:
2769 : : /* success! */
3964 2770 : 2657 : break;
2771 : :
2549 2772 : 21 : case TM_Invisible:
2773 : :
2774 : : /*
2775 : : * This can occur when a just inserted tuple is updated again in
2776 : : * the same command. E.g. because multiple rows with the same
2777 : : * conflicting key values are inserted.
2778 : : *
2779 : : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2780 : : * case. We do not want to proceed because it would lead to the
2781 : : * same row being updated a second time in some unspecified order,
2782 : : * and in contrast to plain UPDATEs there's no historical behavior
2783 : : * to break.
2784 : : *
2785 : : * It is the user's responsibility to prevent this situation from
2786 : : * occurring. These problems are why the SQL standard similarly
2787 : : * specifies that for SQL MERGE, an exception must be raised in
2788 : : * the event of an attempt to update the same row twice.
2789 : : */
2790 : 21 : xminDatum = slot_getsysattr(existing,
2791 : : MinTransactionIdAttributeNumber,
2792 : : &isnull);
2793 [ - + ]: 21 : Assert(!isnull);
2794 : 21 : xmin = DatumGetTransactionId(xminDatum);
2795 : :
2796 [ + - ]: 21 : if (TransactionIdIsCurrentTransactionId(xmin))
3964 2797 [ + - + + ]: 21 : ereport(ERROR,
2798 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2799 : : /* translator: %s is a SQL command name */
2800 : : errmsg("%s command cannot affect row a second time",
2801 : : isUpdate ? "ON CONFLICT DO UPDATE" : "ON CONFLICT DO SELECT"),
2802 : : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2803 : :
2804 : : /* This shouldn't happen */
3964 andres@anarazel.de 2805 [ # # ]:UBC 0 : elog(ERROR, "attempted to lock invisible tuple");
2806 : : break;
2807 : :
2549 2808 : 0 : case TM_SelfModified:
2809 : :
2810 : : /*
2811 : : * This state should never be reached. As a dirty snapshot is used
2812 : : * to find conflicting tuples, speculative insertion wouldn't have
2813 : : * seen this row to conflict with.
2814 : : */
3964 2815 [ # # ]: 0 : elog(ERROR, "unexpected self-updated tuple");
2816 : : break;
2817 : :
2549 andres@anarazel.de 2818 :CBC 2 : case TM_Updated:
3964 2819 [ - + ]: 2 : if (IsolationUsesXactSnapshot())
3964 andres@anarazel.de 2820 [ # # ]:UBC 0 : ereport(ERROR,
2821 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2822 : : errmsg("could not serialize access due to concurrent update")));
2823 : :
2824 : : /*
2825 : : * Tell caller to try again from the very start.
2826 : : *
2827 : : * It does not make sense to use the usual EvalPlanQual() style
2828 : : * loop here, as the new version of the row might not conflict
2829 : : * anymore, or the conflicting tuple has actually been deleted.
2830 : : */
2549 andres@anarazel.de 2831 :CBC 2 : ExecClearTuple(existing);
2832 : 2 : return false;
2833 : :
2834 : 1 : case TM_Deleted:
2835 [ - + ]: 1 : if (IsolationUsesXactSnapshot())
2549 andres@anarazel.de 2836 [ # # ]:UBC 0 : ereport(ERROR,
2837 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2838 : : errmsg("could not serialize access due to concurrent delete")));
2839 : :
2840 : : /* see TM_Updated case */
2549 andres@anarazel.de 2841 :CBC 1 : ExecClearTuple(existing);
3964 2842 : 1 : return false;
2843 : :
3964 andres@anarazel.de 2844 :UBC 0 : default:
2488 2845 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2846 : : }
2847 : :
2848 : : /* Success, the tuple is locked. */
31 dean.a.rasheed@gmail 2849 :GNC 2657 : return true;
2850 : : }
2851 : :
2852 : : /*
2853 : : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2854 : : *
2855 : : * Try to lock tuple for update as part of speculative insertion. If
2856 : : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2857 : : * (but still lock row, even though it may not satisfy estate's
2858 : : * snapshot).
2859 : : *
2860 : : * Returns true if we're done (with or without an update), or false if
2861 : : * the caller must retry the INSERT from scratch.
2862 : : */
2863 : : static bool
2864 : 2626 : ExecOnConflictUpdate(ModifyTableContext *context,
2865 : : ResultRelInfo *resultRelInfo,
2866 : : ItemPointer conflictTid,
2867 : : TupleTableSlot *excludedSlot,
2868 : : bool canSetTag,
2869 : : TupleTableSlot **returning)
2870 : : {
2871 : 2626 : ModifyTableState *mtstate = context->mtstate;
2872 : 2626 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2873 : 2626 : Relation relation = resultRelInfo->ri_RelationDesc;
2874 : 2626 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2875 : 2626 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2876 : : LockTupleMode lockmode;
2877 : :
2878 : : /*
2879 : : * Parse analysis should have blocked ON CONFLICT for all system
2880 : : * relations, which includes these. There's no fundamental obstacle to
2881 : : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2882 : : * ExecUpdate() caller.
2883 : : */
2884 [ - + ]: 2626 : Assert(!resultRelInfo->ri_needLockTagTuple);
2885 : :
2886 : : /* Determine lock mode to use */
2887 : 2626 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2888 : :
2889 : : /* Lock tuple for update */
2890 [ + + ]: 2626 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
2891 : : resultRelInfo->ri_RelationDesc, lockmode, true))
2892 : 3 : return false;
2893 : :
2894 : : /*
2895 : : * Verify that the tuple is visible to our MVCC snapshot if the current
2896 : : * isolation level mandates that.
2897 : : *
2898 : : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2899 : : * CONFLICT ... WHERE clause may prevent us from reaching that.
2900 : : *
2901 : : * This means we only ever continue when a new command in the current
2902 : : * transaction could see the row, even though in READ COMMITTED mode the
2903 : : * tuple will not be visible according to the current statement's
2904 : : * snapshot. This is in line with the way UPDATE deals with newer tuple
2905 : : * versions.
2906 : : */
1459 alvherre@alvh.no-ip. 2907 :CBC 2611 : ExecCheckTupleVisible(context->estate, relation, existing);
2908 : :
2909 : : /*
2910 : : * Make tuple and any needed join variables available to ExecQual and
2911 : : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2912 : : * the target's existing tuple is installed in the scantuple. EXCLUDED
2913 : : * has been made to reference INNER_VAR in setrefs.c, but there is no
2914 : : * other redirection.
2915 : : */
2566 andres@anarazel.de 2916 : 2611 : econtext->ecxt_scantuple = existing;
3964 2917 : 2611 : econtext->ecxt_innertuple = excludedSlot;
2918 : 2611 : econtext->ecxt_outertuple = NULL;
2919 : :
3288 2920 [ + + ]: 2611 : if (!ExecQual(onConflictSetWhere, econtext))
2921 : : {
2566 2922 : 16 : ExecClearTuple(existing); /* see return below */
3964 2923 [ - + ]: 16 : InstrCountFiltered1(&mtstate->ps, 1);
2924 : 16 : return true; /* done with the tuple */
2925 : : }
2926 : :
2927 [ + + ]: 2595 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2928 : : {
2929 : : /*
2930 : : * Check target's existing tuple against UPDATE-applicable USING
2931 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2932 : : *
2933 : : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2934 : : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK.
2935 : : * Since SELECT permission on the target table is always required for
2936 : : * INSERT ... ON CONFLICT DO UPDATE, the rewriter also adds SELECT RLS
2937 : : * checks/WCOs for SELECT security quals, using WCOs of the same kind,
2938 : : * and this check enforces them too.
2939 : : *
2940 : : * The rewriter will also have associated UPDATE-applicable straight
2941 : : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2942 : : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2943 : : * kinds, so there is no danger of spurious over-enforcement in the
2944 : : * INSERT or UPDATE path.
2945 : : */
2946 : 36 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2947 : : existing,
2948 : : mtstate->ps.state);
2949 : : }
2950 : :
2951 : : /* Project the new tuple version */
2911 alvherre@alvh.no-ip. 2952 : 2583 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2953 : :
2954 : : /*
2955 : : * Note that it is possible that the target tuple has been modified in
2956 : : * this session, after the above table_tuple_lock. We choose to not error
2957 : : * out in that case, in line with ExecUpdate's treatment of similar cases.
2958 : : * This can happen if an UPDATE is triggered from within ExecQual(),
2959 : : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2960 : : * wCTE in the ON CONFLICT's SET.
2961 : : */
2962 : :
2963 : : /* Execute UPDATE with projection */
1459 2964 : 5151 : *returning = ExecUpdate(context, resultRelInfo,
2965 : : conflictTid, NULL, existing,
2566 andres@anarazel.de 2966 : 2583 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2967 : : canSetTag);
2968 : :
2969 : : /*
2970 : : * Clear out existing tuple, as there might not be another conflict among
2971 : : * the next input rows. Don't want to hold resources till the end of the
2972 : : * query. First though, make sure that the returning slot, if any, has a
2973 : : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2974 : : * columns.
2975 : : */
423 dean.a.rasheed@gmail 2976 [ + + ]: 2568 : if (*returning != NULL &&
2977 [ + + ]: 117 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2978 : 6 : ExecMaterializeSlot(*returning);
2979 : :
2566 andres@anarazel.de 2980 : 2568 : ExecClearTuple(existing);
2981 : :
3964 2982 : 2568 : return true;
2983 : : }
2984 : :
2985 : : /*
2986 : : * ExecOnConflictSelect --- execute SELECT of INSERT ON CONFLICT DO SELECT
2987 : : *
2988 : : * If SELECT FOR UPDATE/SHARE is specified, try to lock tuple as part of
2989 : : * speculative insertion. If a qual originating from ON CONFLICT DO SELECT is
2990 : : * satisfied, select (but still lock row, even though it may not satisfy
2991 : : * estate's snapshot).
2992 : : *
2993 : : * Returns true if we're done (with or without a select), or false if the
2994 : : * caller must retry the INSERT from scratch.
2995 : : */
2996 : : static bool
31 dean.a.rasheed@gmail 2997 :GNC 147 : ExecOnConflictSelect(ModifyTableContext *context,
2998 : : ResultRelInfo *resultRelInfo,
2999 : : ItemPointer conflictTid,
3000 : : TupleTableSlot *excludedSlot,
3001 : : bool canSetTag,
3002 : : TupleTableSlot **returning)
3003 : : {
3004 : 147 : ModifyTableState *mtstate = context->mtstate;
3005 : 147 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3006 : 147 : Relation relation = resultRelInfo->ri_RelationDesc;
3007 : 147 : ExprState *onConflictSelectWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
3008 : 147 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
3009 : 147 : LockClauseStrength lockStrength = resultRelInfo->ri_onConflict->oc_LockStrength;
3010 : :
3011 : : /*
3012 : : * Parse analysis should have blocked ON CONFLICT for all system
3013 : : * relations, which includes these. There's no fundamental obstacle to
3014 : : * supporting this; we'd just need to handle LOCKTAG_TUPLE appropriately.
3015 : : */
3016 [ - + ]: 147 : Assert(!resultRelInfo->ri_needLockTagTuple);
3017 : :
3018 : : /* Fetch/lock existing tuple, according to the requested lock strength */
3019 [ + + ]: 147 : if (lockStrength == LCS_NONE)
3020 : : {
3021 [ - + ]: 92 : if (!table_tuple_fetch_row_version(relation,
3022 : : conflictTid,
3023 : : SnapshotAny,
3024 : : existing))
31 dean.a.rasheed@gmail 3025 [ # # ]:UNC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
3026 : : }
3027 : : else
3028 : : {
3029 : : LockTupleMode lockmode;
3030 : :
31 dean.a.rasheed@gmail 3031 [ + + + + :GNC 55 : switch (lockStrength)
- ]
3032 : : {
3033 : 1 : case LCS_FORKEYSHARE:
3034 : 1 : lockmode = LockTupleKeyShare;
3035 : 1 : break;
3036 : 1 : case LCS_FORSHARE:
3037 : 1 : lockmode = LockTupleShare;
3038 : 1 : break;
3039 : 1 : case LCS_FORNOKEYUPDATE:
3040 : 1 : lockmode = LockTupleNoKeyExclusive;
3041 : 1 : break;
3042 : 52 : case LCS_FORUPDATE:
3043 : 52 : lockmode = LockTupleExclusive;
3044 : 52 : break;
31 dean.a.rasheed@gmail 3045 :UNC 0 : default:
3046 [ # # ]: 0 : elog(ERROR, "Unexpected lock strength %d", (int) lockStrength);
3047 : : }
3048 : :
31 dean.a.rasheed@gmail 3049 [ - + ]:GNC 55 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
3050 : : resultRelInfo->ri_RelationDesc, lockmode, false))
31 dean.a.rasheed@gmail 3051 :UNC 0 : return false;
3052 : : }
3053 : :
3054 : : /*
3055 : : * Verify that the tuple is visible to our MVCC snapshot if the current
3056 : : * isolation level mandates that. See comments in ExecOnConflictUpdate().
3057 : : */
31 dean.a.rasheed@gmail 3058 :GNC 138 : ExecCheckTupleVisible(context->estate, relation, existing);
3059 : :
3060 : : /*
3061 : : * Make tuple and any needed join variables available to ExecQual. The
3062 : : * EXCLUDED tuple is installed in ecxt_innertuple, while the target's
3063 : : * existing tuple is installed in the scantuple. EXCLUDED has been made
3064 : : * to reference INNER_VAR in setrefs.c, but there is no other redirection.
3065 : : */
3066 : 138 : econtext->ecxt_scantuple = existing;
3067 : 138 : econtext->ecxt_innertuple = excludedSlot;
3068 : 138 : econtext->ecxt_outertuple = NULL;
3069 : :
3070 [ + + ]: 138 : if (!ExecQual(onConflictSelectWhere, econtext))
3071 : : {
3072 : 18 : ExecClearTuple(existing); /* see return below */
3073 [ - + ]: 18 : InstrCountFiltered1(&mtstate->ps, 1);
3074 : 18 : return true; /* done with the tuple */
3075 : : }
3076 : :
3077 [ + + ]: 120 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3078 : : {
3079 : : /*
3080 : : * Check target's existing tuple against SELECT-applicable USING
3081 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
3082 : : *
3083 : : * The rewriter creates WCOs from the USING quals of SELECT policies,
3084 : : * and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK. If FOR
3085 : : * UPDATE/SHARE was specified, UPDATE permissions are required on the
3086 : : * target table, and the rewriter also adds WCOs built from the USING
3087 : : * quals of UPDATE policies, using WCOs of the same kind, and this
3088 : : * check enforces them too.
3089 : : */
3090 : 18 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
3091 : : existing,
3092 : : mtstate->ps.state);
3093 : : }
3094 : :
3095 : : /* RETURNING is required for DO SELECT */
3096 [ - + ]: 117 : Assert(resultRelInfo->ri_projectReturning);
3097 : :
3098 : 117 : *returning = ExecProcessReturning(context, resultRelInfo, false,
3099 : : existing, existing, context->planSlot);
3100 : :
3101 [ + - ]: 117 : if (canSetTag)
3102 : 117 : context->estate->es_processed++;
3103 : :
3104 : : /*
3105 : : * Before releasing the existing tuple, make sure that the returning slot
3106 : : * has a local copy of any pass-by-reference values.
3107 : : */
3108 : 117 : ExecMaterializeSlot(*returning);
3109 : :
3110 : : /*
3111 : : * Clear out existing tuple, as there might not be another conflict among
3112 : : * the next input rows. Don't want to hold resources till the end of the
3113 : : * query.
3114 : : */
3115 : 117 : ExecClearTuple(existing);
3116 : :
3117 : 117 : return true;
3118 : : }
3119 : :
3120 : : /*
3121 : : * Perform MERGE.
3122 : : */
3123 : : static TupleTableSlot *
1448 alvherre@alvh.no-ip. 3124 :CBC 7770 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3125 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
3126 : : {
728 dean.a.rasheed@gmail 3127 : 7770 : TupleTableSlot *rslot = NULL;
3128 : : bool matched;
3129 : :
3130 : : /*-----
3131 : : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
3132 : : * valid, depending on whether the result relation is a table or a view.
3133 : : * We execute the first action for which the additional WHEN MATCHED AND
3134 : : * quals pass. If an action without quals is found, that action is
3135 : : * executed.
3136 : : *
3137 : : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
3138 : : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
3139 : : * in sequence until one passes. This is almost identical to the WHEN
3140 : : * MATCHED case, and both cases are handled by ExecMergeMatched().
3141 : : *
3142 : : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
3143 : : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
3144 : : * TARGET] actions in sequence until one passes.
3145 : : *
3146 : : * Things get interesting in case of concurrent update/delete of the
3147 : : * target tuple. Such concurrent update/delete is detected while we are
3148 : : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
3149 : : *
3150 : : * A concurrent update can:
3151 : : *
3152 : : * 1. modify the target tuple so that the results from checking any
3153 : : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
3154 : : * SOURCE actions potentially change, but the result from the join
3155 : : * quals does not change.
3156 : : *
3157 : : * In this case, we are still dealing with the same kind of match
3158 : : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
3159 : : * actions from the start and choose the first one that satisfies the
3160 : : * new target tuple.
3161 : : *
3162 : : * 2. modify the target tuple in the WHEN MATCHED case so that the join
3163 : : * quals no longer pass and hence the source and target tuples no
3164 : : * longer match.
3165 : : *
3166 : : * In this case, we are now dealing with a NOT MATCHED case, and we
3167 : : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
3168 : : * TARGET] actions. First ExecMergeMatched() processes the list of
3169 : : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
3170 : : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
3171 : : * TARGET] actions in sequence until one passes. Thus we may execute
3172 : : * two actions; one of each kind.
3173 : : *
3174 : : * Thus we support concurrent updates that turn MATCHED candidate rows
3175 : : * into NOT MATCHED rows. However, we do not attempt to support cases
3176 : : * that would turn NOT MATCHED rows into MATCHED rows, or which would
3177 : : * cause a target row to match a different source row.
3178 : : *
3179 : : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
3180 : : * [BY TARGET].
3181 : : *
3182 : : * ExecMergeMatched() takes care of following the update chain and
3183 : : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3184 : : * action, as long as the target tuple still exists. If the target tuple
3185 : : * gets deleted or a concurrent update causes the join quals to fail, it
3186 : : * returns a matched status of false and we call ExecMergeNotMatched().
3187 : : * Given that ExecMergeMatched() always makes progress by following the
3188 : : * update chain and we never switch from ExecMergeNotMatched() to
3189 : : * ExecMergeMatched(), there is no risk of a livelock.
3190 : : */
745 3191 [ + + + + ]: 7770 : matched = tupleid != NULL || oldtuple != NULL;
1448 alvherre@alvh.no-ip. 3192 [ + + ]: 7770 : if (matched)
728 dean.a.rasheed@gmail 3193 : 6424 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3194 : : canSetTag, &matched);
3195 : :
3196 : : /*
3197 : : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3198 : : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3199 : : * "matched" to false, indicating that it no longer matches).
3200 : : */
1448 alvherre@alvh.no-ip. 3201 [ + + ]: 7722 : if (!matched)
3202 : : {
3203 : : /*
3204 : : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3205 : : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3206 : : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3207 : : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3208 : : * SOURCE action, and computed the row to return. If so, we cannot
3209 : : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3210 : : * pending (to be processed on the next call to ExecModifyTable()).
3211 : : * Otherwise, just process the action now.
3212 : : */
715 dean.a.rasheed@gmail 3213 [ + + ]: 1355 : if (rslot == NULL)
3214 : 1353 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3215 : : else
3216 : 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3217 : : }
3218 : :
728 3219 : 7692 : return rslot;
3220 : : }
3221 : :
3222 : : /*
3223 : : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3224 : : * action, depending on whether the join quals are satisfied. If the target
3225 : : * relation is a table, the current target tuple is identified by tupleid.
3226 : : * Otherwise, if the target relation is a view, oldtuple is the current target
3227 : : * tuple from the view.
3228 : : *
3229 : : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3230 : : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3231 : : * action do not pass, we check the second, then the third and so on. If we
3232 : : * reach the end without finding a qualifying action, we return NULL.
3233 : : * Otherwise, we execute the qualifying action and return its RETURNING
3234 : : * result, if any, or NULL.
3235 : : *
3236 : : * On entry, "*matched" is assumed to be true. If a concurrent update or
3237 : : * delete is detected that causes the join quals to no longer pass, we set it
3238 : : * to false, indicating that the caller should process any NOT MATCHED [BY
3239 : : * TARGET] actions.
3240 : : *
3241 : : * After a concurrent update, we restart from the first action to look for a
3242 : : * new qualifying action to execute. If the join quals originally passed, and
3243 : : * the concurrent update caused them to no longer pass, then we switch from
3244 : : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3245 : : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3246 : : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3247 : : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3248 : : */
3249 : : static TupleTableSlot *
1448 alvherre@alvh.no-ip. 3250 : 6424 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3251 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3252 : : bool *matched)
3253 : : {
3254 : 6424 : ModifyTableState *mtstate = context->mtstate;
715 dean.a.rasheed@gmail 3255 : 6424 : List **mergeActions = resultRelInfo->ri_MergeActions;
3256 : : ItemPointerData lockedtid;
3257 : : List *actionStates;
728 3258 : 6424 : TupleTableSlot *newslot = NULL;
3259 : 6424 : TupleTableSlot *rslot = NULL;
1448 alvherre@alvh.no-ip. 3260 : 6424 : EState *estate = context->estate;
3261 : 6424 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3262 : : bool isNull;
3263 : 6424 : EPQState *epqstate = &mtstate->mt_epqstate;
3264 : : ListCell *l;
3265 : :
3266 : : /* Expect matched to be true on entry */
715 dean.a.rasheed@gmail 3267 [ - + ]: 6424 : Assert(*matched);
3268 : :
3269 : : /*
3270 : : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3271 : : * are done.
3272 : : */
3273 [ + + ]: 6424 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3274 [ + + ]: 603 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
728 3275 : 267 : return NULL;
3276 : :
3277 : : /*
3278 : : * Make tuple and any needed join variables available to ExecQual and
3279 : : * ExecProject. The target's existing tuple is installed in the scantuple.
3280 : : * This target relation's slot is required only in the case of a MATCHED
3281 : : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3282 : : */
1448 alvherre@alvh.no-ip. 3283 : 6157 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3284 : 6157 : econtext->ecxt_innertuple = context->planSlot;
3285 : 6157 : econtext->ecxt_outertuple = NULL;
3286 : :
3287 : : /*
3288 : : * This routine is only invoked for matched target rows, so we should
3289 : : * either have the tupleid of the target row, or an old tuple from the
3290 : : * target wholerow junk attr.
3291 : : */
745 dean.a.rasheed@gmail 3292 [ + + - + ]: 6157 : Assert(tupleid != NULL || oldtuple != NULL);
537 noah@leadboat.com 3293 : 6157 : ItemPointerSetInvalid(&lockedtid);
745 dean.a.rasheed@gmail 3294 [ + + ]: 6157 : if (oldtuple != NULL)
3295 : : {
537 noah@leadboat.com 3296 [ - + ]: 48 : Assert(!resultRelInfo->ri_needLockTagTuple);
745 dean.a.rasheed@gmail 3297 : 48 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3298 : : false);
3299 : : }
3300 : : else
3301 : : {
537 noah@leadboat.com 3302 [ + + ]: 6109 : if (resultRelInfo->ri_needLockTagTuple)
3303 : : {
3304 : : /*
3305 : : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3306 : : * that don't match mas_whenqual. MERGE on system catalogs is a
3307 : : * minor use case, so don't bother optimizing those.
3308 : : */
3309 : 4115 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3310 : : InplaceUpdateTupleLock);
3311 : 4115 : lockedtid = *tupleid;
3312 : : }
3313 [ - + ]: 6109 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3314 : : tupleid,
3315 : : SnapshotAny,
3316 : : resultRelInfo->ri_oldTupleSlot))
537 noah@leadboat.com 3317 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3318 : : }
3319 : :
3320 : : /*
3321 : : * Test the join condition. If it's satisfied, perform a MATCHED action.
3322 : : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3323 : : *
3324 : : * Note that this join condition will be NULL if there are no NOT MATCHED
3325 : : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3326 : : * need only consider MATCHED actions here.
3327 : : */
715 dean.a.rasheed@gmail 3328 [ + + ]:CBC 6157 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3329 : 6064 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3330 : : else
3331 : 93 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3332 : :
3333 : 6157 : lmerge_matched:
3334 : :
3335 [ + + + + : 11046 : foreach(l, actionStates)
+ + ]
3336 : : {
1448 alvherre@alvh.no-ip. 3337 : 6237 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3338 : 6237 : CmdType commandType = relaction->mas_action->commandType;
3339 : : TM_Result result;
3340 : 6237 : UpdateContext updateCxt = {0};
3341 : :
3342 : : /*
3343 : : * Test condition, if any.
3344 : : *
3345 : : * In the absence of any condition, we perform the action
3346 : : * unconditionally (no need to check separately since ExecQual() will
3347 : : * return true if there are no conditions to evaluate).
3348 : : */
3349 [ + + ]: 6237 : if (!ExecQual(relaction->mas_whenqual, econtext))
3350 : 4849 : continue;
3351 : :
3352 : : /*
3353 : : * Check if the existing target tuple meets the USING checks of
3354 : : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3355 : : * error.
3356 : : *
3357 : : * The WITH CHECK quals for UPDATE RLS policies are applied in
3358 : : * ExecUpdateAct() and hence we need not do anything special to handle
3359 : : * them.
3360 : : *
3361 : : * NOTE: We must do this after WHEN quals are evaluated, so that we
3362 : : * check policies only when they matter.
3363 : : */
951 dean.a.rasheed@gmail 3364 [ + + + + ]: 1388 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3365 : : {
1448 alvherre@alvh.no-ip. 3366 : 57 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3367 : : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3368 : : resultRelInfo,
3369 : : resultRelInfo->ri_oldTupleSlot,
3370 [ + + ]: 57 : context->mtstate->ps.state);
3371 : : }
3372 : :
3373 : : /* Perform stated action */
3374 [ + + + - ]: 1376 : switch (commandType)
3375 : : {
3376 : 1098 : case CMD_UPDATE:
3377 : :
3378 : : /*
3379 : : * Project the output tuple, and use that to update the table.
3380 : : * We don't need to filter out junk attributes, because the
3381 : : * UPDATE action's targetlist doesn't have any.
3382 : : */
3383 : 1098 : newslot = ExecProject(relaction->mas_proj);
3384 : :
728 dean.a.rasheed@gmail 3385 : 1098 : mtstate->mt_merge_action = relaction;
1448 alvherre@alvh.no-ip. 3386 [ + + ]: 1098 : if (!ExecUpdatePrologue(context, resultRelInfo,
3387 : : tupleid, NULL, newslot, &result))
3388 : : {
1098 dean.a.rasheed@gmail 3389 [ + + ]: 10 : if (result == TM_Ok)
537 noah@leadboat.com 3390 : 80 : goto out; /* "do nothing" */
3391 : :
1098 dean.a.rasheed@gmail 3392 : 7 : break; /* concurrent update/delete */
3393 : : }
3394 : :
3395 : : /* INSTEAD OF ROW UPDATE Triggers */
745 3396 [ + + ]: 1088 : if (resultRelInfo->ri_TrigDesc &&
3397 [ + + ]: 174 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3398 : : {
3399 [ - + ]: 39 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3400 : : oldtuple, newslot))
537 noah@leadboat.com 3401 :UBC 0 : goto out; /* "do nothing" */
3402 : : }
3403 : : else
3404 : : {
3405 : : /* checked ri_needLockTagTuple above */
610 noah@leadboat.com 3406 [ - + ]:CBC 1049 : Assert(oldtuple == NULL);
3407 : :
745 dean.a.rasheed@gmail 3408 : 1049 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3409 : : NULL, newslot, canSetTag,
3410 : : &updateCxt);
3411 : :
3412 : : /*
3413 : : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3414 : : * cross-partition update was done, then there's nothing
3415 : : * else for us to do --- the UPDATE has been turned into a
3416 : : * DELETE and an INSERT, and we must not perform any of
3417 : : * the usual post-update tasks. Also, the RETURNING tuple
3418 : : * (if any) has been projected, so we can just return
3419 : : * that.
3420 : : */
3421 [ + + ]: 1037 : if (updateCxt.crossPartUpdate)
3422 : : {
3423 : 69 : mtstate->mt_merge_updated += 1;
537 noah@leadboat.com 3424 : 69 : rslot = context->cpUpdateReturningSlot;
3425 : 69 : goto out;
3426 : : }
3427 : : }
3428 : :
745 dean.a.rasheed@gmail 3429 [ + + ]: 1007 : if (result == TM_Ok)
3430 : : {
1448 alvherre@alvh.no-ip. 3431 : 963 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3432 : : tupleid, NULL, newslot);
3433 : 957 : mtstate->mt_merge_updated += 1;
3434 : : }
3435 : 1001 : break;
3436 : :
3437 : 263 : case CMD_DELETE:
728 dean.a.rasheed@gmail 3438 : 263 : mtstate->mt_merge_action = relaction;
1448 alvherre@alvh.no-ip. 3439 [ + + ]: 263 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3440 : : NULL, NULL, &result))
3441 : : {
1098 dean.a.rasheed@gmail 3442 [ + + ]: 6 : if (result == TM_Ok)
537 noah@leadboat.com 3443 : 3 : goto out; /* "do nothing" */
3444 : :
1098 dean.a.rasheed@gmail 3445 : 3 : break; /* concurrent update/delete */
3446 : : }
3447 : :
3448 : : /* INSTEAD OF ROW DELETE Triggers */
745 3449 [ + + ]: 257 : if (resultRelInfo->ri_TrigDesc &&
3450 [ + + ]: 28 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3451 : : {
3452 [ - + ]: 3 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3453 : : oldtuple))
537 noah@leadboat.com 3454 :UBC 0 : goto out; /* "do nothing" */
3455 : : }
3456 : : else
3457 : : {
3458 : : /* checked ri_needLockTagTuple above */
610 noah@leadboat.com 3459 [ - + ]:CBC 254 : Assert(oldtuple == NULL);
3460 : :
745 dean.a.rasheed@gmail 3461 : 254 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3462 : : false);
3463 : : }
3464 : :
1448 alvherre@alvh.no-ip. 3465 [ + + ]: 257 : if (result == TM_Ok)
3466 : : {
3467 : 248 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3468 : : false);
3469 : 248 : mtstate->mt_merge_deleted += 1;
3470 : : }
3471 : 257 : break;
3472 : :
3473 : 15 : case CMD_NOTHING:
3474 : : /* Doing nothing is always OK */
3475 : 15 : result = TM_Ok;
3476 : 15 : break;
3477 : :
1448 alvherre@alvh.no-ip. 3478 :UBC 0 : default:
715 dean.a.rasheed@gmail 3479 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3480 : : }
3481 : :
1448 alvherre@alvh.no-ip. 3482 [ + + + + :CBC 1283 : switch (result)
- - ]
3483 : : {
3484 : 1220 : case TM_Ok:
3485 : : /* all good; perform final actions */
1214 3486 [ + + + + ]: 1220 : if (canSetTag && commandType != CMD_NOTHING)
1448 3487 : 1194 : (estate->es_processed)++;
3488 : :
3489 : 1220 : break;
3490 : :
3491 : 16 : case TM_SelfModified:
3492 : :
3493 : : /*
3494 : : * The target tuple was already updated or deleted by the
3495 : : * current command, or by a later command in the current
3496 : : * transaction. The former case is explicitly disallowed by
3497 : : * the SQL standard for MERGE, which insists that the MERGE
3498 : : * join condition should not join a target row to more than
3499 : : * one source row.
3500 : : *
3501 : : * The latter case arises if the tuple is modified by a
3502 : : * command in a BEFORE trigger, or perhaps by a command in a
3503 : : * volatile function used in the query. In such situations we
3504 : : * should not ignore the MERGE action, but it is equally
3505 : : * unsafe to proceed. We don't want to discard the original
3506 : : * MERGE action while keeping the triggered actions based on
3507 : : * it; and it would be no better to allow the original MERGE
3508 : : * action while discarding the updates that it triggered. So
3509 : : * throwing an error is the only safe course.
3510 : : */
738 dean.a.rasheed@gmail 3511 [ + + ]: 16 : if (context->tmfd.cmax != estate->es_output_cid)
3512 [ + - ]: 6 : ereport(ERROR,
3513 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3514 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3515 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3516 : :
1448 alvherre@alvh.no-ip. 3517 [ + - ]: 10 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3518 [ + - ]: 10 : ereport(ERROR,
3519 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3520 : : /* translator: %s is a SQL command name */
3521 : : errmsg("%s command cannot affect row a second time",
3522 : : "MERGE"),
3523 : : errhint("Ensure that not more than one source row matches any one target row.")));
3524 : :
3525 : : /* This shouldn't happen */
1448 alvherre@alvh.no-ip. 3526 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3527 : : break;
3528 : :
1448 alvherre@alvh.no-ip. 3529 :CBC 5 : case TM_Deleted:
3530 [ - + ]: 5 : if (IsolationUsesXactSnapshot())
1448 alvherre@alvh.no-ip. 3531 [ # # ]:UBC 0 : ereport(ERROR,
3532 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3533 : : errmsg("could not serialize access due to concurrent delete")));
3534 : :
3535 : : /*
3536 : : * If the tuple was already deleted, set matched to false to
3537 : : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3538 : : */
728 dean.a.rasheed@gmail 3539 :CBC 5 : *matched = false;
537 noah@leadboat.com 3540 : 5 : goto out;
3541 : :
1448 alvherre@alvh.no-ip. 3542 : 42 : case TM_Updated:
3543 : : {
3544 : : bool was_matched;
3545 : : Relation resultRelationDesc;
3546 : : TupleTableSlot *epqslot,
3547 : : *inputslot;
3548 : : LockTupleMode lockmode;
3549 : :
10 akorotkov@postgresql 3550 [ + + ]: 42 : if (IsolationUsesXactSnapshot())
3551 [ + - ]: 1 : ereport(ERROR,
3552 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3553 : : errmsg("could not serialize access due to concurrent update")));
3554 : :
3555 : : /*
3556 : : * The target tuple was concurrently updated by some other
3557 : : * transaction. If we are currently processing a MATCHED
3558 : : * action, use EvalPlanQual() with the new version of the
3559 : : * tuple and recheck the join qual, to detect a change
3560 : : * from the MATCHED to the NOT MATCHED cases. If we are
3561 : : * already processing a NOT MATCHED BY SOURCE action, we
3562 : : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3563 : : * MATCHED).
3564 : : */
715 dean.a.rasheed@gmail 3565 : 41 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
1448 alvherre@alvh.no-ip. 3566 : 41 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3567 : 41 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3568 : :
715 dean.a.rasheed@gmail 3569 [ + - ]: 41 : if (was_matched)
3570 : 41 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3571 : : resultRelInfo->ri_RangeTableIndex);
3572 : : else
715 dean.a.rasheed@gmail 3573 :UBC 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3574 : :
1448 alvherre@alvh.no-ip. 3575 :CBC 41 : result = table_tuple_lock(resultRelationDesc, tupleid,
3576 : : estate->es_snapshot,
3577 : : inputslot, estate->es_output_cid,
3578 : : lockmode, LockWaitBlock,
3579 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3580 : : &context->tmfd);
3581 [ + - + - ]: 41 : switch (result)
3582 : : {
3583 : 40 : case TM_Ok:
3584 : :
3585 : : /*
3586 : : * If the tuple was updated and migrated to
3587 : : * another partition concurrently, the current
3588 : : * MERGE implementation can't follow. There's
3589 : : * probably a better way to handle this case, but
3590 : : * it'd require recognizing the relation to which
3591 : : * the tuple moved, and setting our current
3592 : : * resultRelInfo to that.
3593 : : */
191 dean.a.rasheed@gmail 3594 [ - + ]: 40 : if (ItemPointerIndicatesMovedPartitions(tupleid))
1448 alvherre@alvh.no-ip. 3595 [ # # ]:UBC 0 : ereport(ERROR,
3596 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3597 : : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3598 : :
3599 : : /*
3600 : : * If this was a MATCHED case, use EvalPlanQual()
3601 : : * to recheck the join condition.
3602 : : */
715 dean.a.rasheed@gmail 3603 [ + - ]:CBC 40 : if (was_matched)
3604 : : {
3605 : 40 : epqslot = EvalPlanQual(epqstate,
3606 : : resultRelationDesc,
3607 : : resultRelInfo->ri_RangeTableIndex,
3608 : : inputslot);
3609 : :
3610 : : /*
3611 : : * If the subplan didn't return a tuple, then
3612 : : * we must be dealing with an inner join for
3613 : : * which the join condition no longer matches.
3614 : : * This can only happen if there are no NOT
3615 : : * MATCHED actions, and so there is nothing
3616 : : * more to do.
3617 : : */
3618 [ + - - + ]: 40 : if (TupIsNull(epqslot))
537 noah@leadboat.com 3619 :UBC 0 : goto out;
3620 : :
3621 : : /*
3622 : : * If we got a NULL ctid from the subplan, the
3623 : : * join quals no longer pass and we switch to
3624 : : * the NOT MATCHED BY SOURCE case.
3625 : : */
715 dean.a.rasheed@gmail 3626 :CBC 40 : (void) ExecGetJunkAttribute(epqslot,
3627 : 40 : resultRelInfo->ri_RowIdAttNo,
3628 : : &isNull);
3629 [ + + ]: 40 : if (isNull)
3630 : 2 : *matched = false;
3631 : :
3632 : : /*
3633 : : * Otherwise, recheck the join quals to see if
3634 : : * we need to switch to the NOT MATCHED BY
3635 : : * SOURCE case.
3636 : : */
537 noah@leadboat.com 3637 [ + + ]: 40 : if (resultRelInfo->ri_needLockTagTuple)
3638 : : {
3639 [ + - ]: 1 : if (ItemPointerIsValid(&lockedtid))
3640 : 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3641 : : InplaceUpdateTupleLock);
191 dean.a.rasheed@gmail 3642 : 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3643 : : InplaceUpdateTupleLock);
3644 : 1 : lockedtid = *tupleid;
3645 : : }
3646 : :
715 3647 [ - + ]: 40 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3648 : : tupleid,
3649 : : SnapshotAny,
3650 : : resultRelInfo->ri_oldTupleSlot))
715 dean.a.rasheed@gmail 3651 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
3652 : :
715 dean.a.rasheed@gmail 3653 [ + + ]:CBC 40 : if (*matched)
3654 : 38 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3655 : : econtext);
3656 : :
3657 : : /* Switch lists, if necessary */
3658 [ + + ]: 40 : if (!*matched)
3659 : : {
3660 : 4 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3661 : :
3662 : : /*
3663 : : * If we have both NOT MATCHED BY SOURCE
3664 : : * and NOT MATCHED BY TARGET actions (a
3665 : : * full join between the source and target
3666 : : * relations), the single previously
3667 : : * matched tuple from the outer plan node
3668 : : * is treated as two not matched tuples,
3669 : : * in the same way as if they had not
3670 : : * matched to start with. Therefore, we
3671 : : * must adjust the outer plan node's tuple
3672 : : * count, if we're instrumenting the
3673 : : * query, to get the correct "skipped" row
3674 : : * count --- see show_modifytable_info().
3675 : : */
119 3676 [ + + ]: 4 : if (outerPlanState(mtstate)->instrument &&
3677 [ + - ]: 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3678 [ + - ]: 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3679 : 1 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3680 : : }
3681 : : }
3682 : :
3683 : : /*
3684 : : * Loop back and process the MATCHED or NOT
3685 : : * MATCHED BY SOURCE actions from the start.
3686 : : */
1448 alvherre@alvh.no-ip. 3687 : 40 : goto lmerge_matched;
3688 : :
1448 alvherre@alvh.no-ip. 3689 :UBC 0 : case TM_Deleted:
3690 : :
3691 : : /*
3692 : : * tuple already deleted; tell caller to run NOT
3693 : : * MATCHED [BY TARGET] actions
3694 : : */
728 dean.a.rasheed@gmail 3695 : 0 : *matched = false;
537 noah@leadboat.com 3696 : 0 : goto out;
3697 : :
1448 alvherre@alvh.no-ip. 3698 :CBC 1 : case TM_SelfModified:
3699 : :
3700 : : /*
3701 : : * This can be reached when following an update
3702 : : * chain from a tuple updated by another session,
3703 : : * reaching a tuple that was already updated or
3704 : : * deleted by the current command, or by a later
3705 : : * command in the current transaction. As above,
3706 : : * this should always be treated as an error.
3707 : : */
3708 [ - + ]: 1 : if (context->tmfd.cmax != estate->es_output_cid)
1448 alvherre@alvh.no-ip. 3709 [ # # ]:UBC 0 : ereport(ERROR,
3710 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3711 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3712 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3713 : :
738 dean.a.rasheed@gmail 3714 [ + - ]:CBC 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3715 [ + - ]: 1 : ereport(ERROR,
3716 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3717 : : /* translator: %s is a SQL command name */
3718 : : errmsg("%s command cannot affect row a second time",
3719 : : "MERGE"),
3720 : : errhint("Ensure that not more than one source row matches any one target row.")));
3721 : :
3722 : : /* This shouldn't happen */
738 dean.a.rasheed@gmail 3723 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3724 : : goto out;
3725 : :
1448 alvherre@alvh.no-ip. 3726 : 0 : default:
3727 : : /* see table_tuple_lock call in ExecDelete() */
3728 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3729 : : result);
3730 : : goto out;
3731 : : }
3732 : : }
3733 : :
3734 : 0 : case TM_Invisible:
3735 : : case TM_WouldBlock:
3736 : : case TM_BeingModified:
3737 : : /* these should not occur */
3738 [ # # ]: 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3739 : : break;
3740 : : }
3741 : :
3742 : : /* Process RETURNING if present */
728 dean.a.rasheed@gmail 3743 [ + + ]:CBC 1220 : if (resultRelInfo->ri_projectReturning)
3744 : : {
3745 [ + + - - ]: 215 : switch (commandType)
3746 : : {
3747 : 95 : case CMD_UPDATE:
423 3748 : 95 : rslot = ExecProcessReturning(context,
3749 : : resultRelInfo,
3750 : : false,
3751 : : resultRelInfo->ri_oldTupleSlot,
3752 : : newslot,
3753 : : context->planSlot);
728 3754 : 95 : break;
3755 : :
3756 : 120 : case CMD_DELETE:
423 3757 : 120 : rslot = ExecProcessReturning(context,
3758 : : resultRelInfo,
3759 : : true,
3760 : : resultRelInfo->ri_oldTupleSlot,
3761 : : NULL,
3762 : : context->planSlot);
728 3763 : 120 : break;
3764 : :
728 dean.a.rasheed@gmail 3765 :UBC 0 : case CMD_NOTHING:
3766 : 0 : break;
3767 : :
3768 : 0 : default:
3769 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d",
3770 : : (int) commandType);
3771 : : }
3772 : : }
3773 : :
3774 : : /*
3775 : : * We've activated one of the WHEN clauses, so we don't search
3776 : : * further. This is required behaviour, not an optimization.
3777 : : */
1448 alvherre@alvh.no-ip. 3778 :CBC 1220 : break;
3779 : : }
3780 : :
3781 : : /*
3782 : : * Successfully executed an action or no qualifying action was found.
3783 : : */
537 noah@leadboat.com 3784 : 6109 : out:
3785 [ + + ]: 6109 : if (ItemPointerIsValid(&lockedtid))
3786 : 4115 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3787 : : InplaceUpdateTupleLock);
728 dean.a.rasheed@gmail 3788 : 6109 : return rslot;
3789 : : }
3790 : :
3791 : : /*
3792 : : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3793 : : */
3794 : : static TupleTableSlot *
1448 alvherre@alvh.no-ip. 3795 : 1355 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3796 : : bool canSetTag)
3797 : : {
3798 : 1355 : ModifyTableState *mtstate = context->mtstate;
3799 : 1355 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3800 : : List *actionStates;
728 dean.a.rasheed@gmail 3801 : 1355 : TupleTableSlot *rslot = NULL;
3802 : : ListCell *l;
3803 : :
3804 : : /*
3805 : : * For INSERT actions, the root relation's merge action is OK since the
3806 : : * INSERT's targetlist and the WHEN conditions can only refer to the
3807 : : * source relation and hence it does not matter which result relation we
3808 : : * work with.
3809 : : *
3810 : : * XXX does this mean that we can avoid creating copies of actionStates on
3811 : : * partitioned tables, for not-matched actions?
3812 : : */
715 3813 : 1355 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3814 : :
3815 : : /*
3816 : : * Make source tuple available to ExecQual and ExecProject. We don't need
3817 : : * the target tuple, since the WHEN quals and targetlist can't refer to
3818 : : * the target columns.
3819 : : */
1448 alvherre@alvh.no-ip. 3820 : 1355 : econtext->ecxt_scantuple = NULL;
3821 : 1355 : econtext->ecxt_innertuple = context->planSlot;
3822 : 1355 : econtext->ecxt_outertuple = NULL;
3823 : :
3824 [ + - + + : 1790 : foreach(l, actionStates)
+ + ]
3825 : : {
3826 : 1355 : MergeActionState *action = (MergeActionState *) lfirst(l);
3827 : 1355 : CmdType commandType = action->mas_action->commandType;
3828 : : TupleTableSlot *newslot;
3829 : :
3830 : : /*
3831 : : * Test condition, if any.
3832 : : *
3833 : : * In the absence of any condition, we perform the action
3834 : : * unconditionally (no need to check separately since ExecQual() will
3835 : : * return true if there are no conditions to evaluate).
3836 : : */
3837 [ + + ]: 1355 : if (!ExecQual(action->mas_whenqual, econtext))
3838 : 435 : continue;
3839 : :
3840 : : /* Perform stated action */
3841 [ + - - ]: 920 : switch (commandType)
3842 : : {
3843 : 920 : case CMD_INSERT:
3844 : :
3845 : : /*
3846 : : * Project the tuple. In case of a partitioned table, the
3847 : : * projection was already built to use the root's descriptor,
3848 : : * so we don't need to map the tuple here.
3849 : : */
3850 : 920 : newslot = ExecProject(action->mas_proj);
728 dean.a.rasheed@gmail 3851 : 920 : mtstate->mt_merge_action = action;
3852 : :
3853 : 920 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3854 : : newslot, canSetTag, NULL, NULL);
1448 alvherre@alvh.no-ip. 3855 : 890 : mtstate->mt_merge_inserted += 1;
3856 : 890 : break;
1448 alvherre@alvh.no-ip. 3857 :UBC 0 : case CMD_NOTHING:
3858 : : /* Do nothing */
3859 : 0 : break;
3860 : 0 : default:
3861 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3862 : : }
3863 : :
3864 : : /*
3865 : : * We've activated one of the WHEN clauses, so we don't search
3866 : : * further. This is required behaviour, not an optimization.
3867 : : */
1448 alvherre@alvh.no-ip. 3868 :CBC 890 : break;
3869 : : }
3870 : :
728 dean.a.rasheed@gmail 3871 : 1325 : return rslot;
3872 : : }
3873 : :
3874 : : /*
3875 : : * Initialize state for execution of MERGE.
3876 : : */
3877 : : void
1448 alvherre@alvh.no-ip. 3878 : 816 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3879 : : {
391 amitlan@postgresql.o 3880 : 816 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3881 : 816 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
1448 alvherre@alvh.no-ip. 3882 : 816 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3883 : : ResultRelInfo *resultRelInfo;
3884 : : ExprContext *econtext;
3885 : : ListCell *lc;
3886 : : int i;
3887 : :
391 amitlan@postgresql.o 3888 [ - + ]: 816 : if (mergeActionLists == NIL)
1448 alvherre@alvh.no-ip. 3889 :UBC 0 : return;
3890 : :
1448 alvherre@alvh.no-ip. 3891 :CBC 816 : mtstate->mt_merge_subcommands = 0;
3892 : :
3893 [ + + ]: 816 : if (mtstate->ps.ps_ExprContext == NULL)
3894 : 671 : ExecAssignExprContext(estate, &mtstate->ps);
3895 : 816 : econtext = mtstate->ps.ps_ExprContext;
3896 : :
3897 : : /*
3898 : : * Create a MergeActionState for each action on the mergeActionList and
3899 : : * add it to either a list of matched actions or not-matched actions.
3900 : : *
3901 : : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3902 : : * anything here, do so there too.
3903 : : */
3904 : 816 : i = 0;
391 amitlan@postgresql.o 3905 [ + - + + : 1753 : foreach(lc, mergeActionLists)
+ + ]
3906 : : {
1448 alvherre@alvh.no-ip. 3907 : 937 : List *mergeActionList = lfirst(lc);
3908 : : Node *joinCondition;
3909 : : TupleDesc relationDesc;
3910 : : ListCell *l;
3911 : :
391 amitlan@postgresql.o 3912 : 937 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
1448 alvherre@alvh.no-ip. 3913 : 937 : resultRelInfo = mtstate->resultRelInfo + i;
3914 : 937 : i++;
3915 : 937 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3916 : :
3917 : : /* initialize slots for MERGE fetches from this rel */
3918 [ + - ]: 937 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3919 : 937 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3920 : :
3921 : : /* initialize state for join condition checking */
715 dean.a.rasheed@gmail 3922 : 937 : resultRelInfo->ri_MergeJoinCondition =
3923 : 937 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3924 : :
1448 alvherre@alvh.no-ip. 3925 [ + - + + : 2581 : foreach(l, mergeActionList)
+ + ]
3926 : : {
3927 : 1644 : MergeAction *action = (MergeAction *) lfirst(l);
3928 : : MergeActionState *action_state;
3929 : : TupleTableSlot *tgtslot;
3930 : : TupleDesc tgtdesc;
3931 : :
3932 : : /*
3933 : : * Build action merge state for this rel. (For partitions,
3934 : : * equivalent code exists in ExecInitPartitionInfo.)
3935 : : */
3936 : 1644 : action_state = makeNode(MergeActionState);
3937 : 1644 : action_state->mas_action = action;
3938 : 1644 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3939 : : &mtstate->ps);
3940 : :
3941 : : /*
3942 : : * We create three lists - one for each MergeMatchKind - and stick
3943 : : * the MergeActionState into the appropriate list.
3944 : : */
715 dean.a.rasheed@gmail 3945 : 3288 : resultRelInfo->ri_MergeActions[action->matchKind] =
3946 : 1644 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3947 : : action_state);
3948 : :
1448 alvherre@alvh.no-ip. 3949 [ + + + + : 1644 : switch (action->commandType)
- ]
3950 : : {
3951 : 546 : case CMD_INSERT:
3952 : : /* INSERT actions always use rootRelInfo */
3953 : 546 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3954 : : action->targetList);
3955 : :
3956 : : /*
3957 : : * If the MERGE targets a partitioned table, any INSERT
3958 : : * actions must be routed through it, not the child
3959 : : * relations. Initialize the routing struct and the root
3960 : : * table's "new" tuple slot for that, if not already done.
3961 : : * The projection we prepare, for all relations, uses the
3962 : : * root relation descriptor, and targets the plan's root
3963 : : * slot. (This is consistent with the fact that we
3964 : : * checked the plan output to match the root relation,
3965 : : * above.)
3966 : : */
3967 [ + + ]: 546 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3968 : : RELKIND_PARTITIONED_TABLE)
3969 : : {
3970 [ + + ]: 168 : if (mtstate->mt_partition_tuple_routing == NULL)
3971 : : {
3972 : : /*
3973 : : * Initialize planstate for routing if not already
3974 : : * done.
3975 : : *
3976 : : * Note that the slot is managed as a standalone
3977 : : * slot belonging to ModifyTableState, so we pass
3978 : : * NULL for the 2nd argument.
3979 : : */
3980 : 79 : mtstate->mt_root_tuple_slot =
3981 : 79 : table_slot_create(rootRelInfo->ri_RelationDesc,
3982 : : NULL);
3983 : 79 : mtstate->mt_partition_tuple_routing =
3984 : 79 : ExecSetupPartitionTupleRouting(estate,
3985 : : rootRelInfo->ri_RelationDesc);
3986 : : }
3987 : 168 : tgtslot = mtstate->mt_root_tuple_slot;
3988 : 168 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3989 : : }
3990 : : else
3991 : : {
3992 : : /*
3993 : : * If the MERGE targets an inherited table, we insert
3994 : : * into the root table, so we must initialize its
3995 : : * "new" tuple slot, if not already done, and use its
3996 : : * relation descriptor for the projection.
3997 : : *
3998 : : * For non-inherited tables, rootRelInfo and
3999 : : * resultRelInfo are the same, and the "new" tuple
4000 : : * slot will already have been initialized.
4001 : : */
288 dean.a.rasheed@gmail 4002 [ + + ]: 378 : if (rootRelInfo->ri_newTupleSlot == NULL)
4003 : 18 : rootRelInfo->ri_newTupleSlot =
4004 : 18 : table_slot_create(rootRelInfo->ri_RelationDesc,
4005 : : &estate->es_tupleTable);
4006 : :
4007 : 378 : tgtslot = rootRelInfo->ri_newTupleSlot;
4008 : 378 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4009 : : }
4010 : :
1448 alvherre@alvh.no-ip. 4011 : 546 : action_state->mas_proj =
4012 : 546 : ExecBuildProjectionInfo(action->targetList, econtext,
4013 : : tgtslot,
4014 : : &mtstate->ps,
4015 : : tgtdesc);
4016 : :
4017 : 546 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
4018 : 546 : break;
4019 : 825 : case CMD_UPDATE:
4020 : 825 : action_state->mas_proj =
4021 : 825 : ExecBuildUpdateProjection(action->targetList,
4022 : : true,
4023 : : action->updateColnos,
4024 : : relationDesc,
4025 : : econtext,
4026 : : resultRelInfo->ri_newTupleSlot,
4027 : : &mtstate->ps);
4028 : 825 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
4029 : 825 : break;
4030 : 235 : case CMD_DELETE:
4031 : 235 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
4032 : 235 : break;
4033 : 38 : case CMD_NOTHING:
4034 : 38 : break;
1448 alvherre@alvh.no-ip. 4035 :UBC 0 : default:
351 dean.a.rasheed@gmail 4036 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
4037 : : break;
4038 : : }
4039 : : }
4040 : : }
4041 : :
4042 : : /*
4043 : : * If the MERGE targets an inherited table, any INSERT actions will use
4044 : : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
4045 : : * Therefore we must initialize its WITH CHECK OPTION constraints and
4046 : : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
4047 : : * entries.
4048 : : *
4049 : : * Note that the planner does not build a withCheckOptionList or
4050 : : * returningList for the root relation, but as in ExecInitPartitionInfo,
4051 : : * we can use the first resultRelInfo entry as a reference to calculate
4052 : : * the attno's for the root table.
4053 : : */
288 dean.a.rasheed@gmail 4054 [ + + ]:CBC 816 : if (rootRelInfo != mtstate->resultRelInfo &&
4055 [ + + ]: 125 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
4056 [ + + ]: 24 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
4057 : : {
4058 : 18 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
4059 : 18 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
4060 : 18 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
4061 : 18 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
4062 : 18 : AttrMap *part_attmap = NULL;
4063 : : bool found_whole_row;
4064 : :
4065 [ + + ]: 18 : if (node->withCheckOptionLists != NIL)
4066 : : {
4067 : : List *wcoList;
4068 : 9 : List *wcoExprs = NIL;
4069 : :
4070 : : /* There should be as many WCO lists as result rels */
4071 [ - + ]: 9 : Assert(list_length(node->withCheckOptionLists) ==
4072 : : list_length(node->resultRelations));
4073 : :
4074 : : /*
4075 : : * Use the first WCO list as a reference. In the most common case,
4076 : : * this will be for the same relation as rootRelInfo, and so there
4077 : : * will be no need to adjust its attno's.
4078 : : */
4079 : 9 : wcoList = linitial(node->withCheckOptionLists);
4080 [ + - ]: 9 : if (rootRelation != firstResultRel)
4081 : : {
4082 : : /* Convert any Vars in it to contain the root's attno's */
4083 : : part_attmap =
4084 : 9 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4085 : : RelationGetDescr(firstResultRel),
4086 : : false);
4087 : :
4088 : : wcoList = (List *)
4089 : 9 : map_variable_attnos((Node *) wcoList,
4090 : : firstVarno, 0,
4091 : : part_attmap,
4092 : 9 : RelationGetForm(rootRelation)->reltype,
4093 : : &found_whole_row);
4094 : : }
4095 : :
4096 [ + - + + : 45 : foreach(lc, wcoList)
+ + ]
4097 : : {
4098 : 36 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
4099 : 36 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
4100 : : &mtstate->ps);
4101 : :
4102 : 36 : wcoExprs = lappend(wcoExprs, wcoExpr);
4103 : : }
4104 : :
4105 : 9 : rootRelInfo->ri_WithCheckOptions = wcoList;
4106 : 9 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4107 : : }
4108 : :
4109 [ + + ]: 18 : if (node->returningLists != NIL)
4110 : : {
4111 : : List *returningList;
4112 : :
4113 : : /* There should be as many returning lists as result rels */
4114 [ - + ]: 3 : Assert(list_length(node->returningLists) ==
4115 : : list_length(node->resultRelations));
4116 : :
4117 : : /*
4118 : : * Use the first returning list as a reference. In the most common
4119 : : * case, this will be for the same relation as rootRelInfo, and so
4120 : : * there will be no need to adjust its attno's.
4121 : : */
4122 : 3 : returningList = linitial(node->returningLists);
4123 [ + - ]: 3 : if (rootRelation != firstResultRel)
4124 : : {
4125 : : /* Convert any Vars in it to contain the root's attno's */
4126 [ - + ]: 3 : if (part_attmap == NULL)
4127 : : part_attmap =
288 dean.a.rasheed@gmail 4128 :UBC 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4129 : : RelationGetDescr(firstResultRel),
4130 : : false);
4131 : :
4132 : : returningList = (List *)
288 dean.a.rasheed@gmail 4133 :CBC 3 : map_variable_attnos((Node *) returningList,
4134 : : firstVarno, 0,
4135 : : part_attmap,
4136 : 3 : RelationGetForm(rootRelation)->reltype,
4137 : : &found_whole_row);
4138 : : }
4139 : 3 : rootRelInfo->ri_returningList = returningList;
4140 : :
4141 : : /* Initialize the RETURNING projection */
4142 : 3 : rootRelInfo->ri_projectReturning =
4143 : 3 : ExecBuildProjectionInfo(returningList, econtext,
4144 : : mtstate->ps.ps_ResultTupleSlot,
4145 : : &mtstate->ps,
4146 : : RelationGetDescr(rootRelation));
4147 : : }
4148 : : }
4149 : : }
4150 : :
4151 : : /*
4152 : : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
4153 : : *
4154 : : * We mark 'projectNewInfoValid' even though the projections themselves
4155 : : * are not initialized here.
4156 : : */
4157 : : void
1448 alvherre@alvh.no-ip. 4158 : 949 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
4159 : : ResultRelInfo *resultRelInfo)
4160 : : {
4161 : 949 : EState *estate = mtstate->ps.state;
4162 : :
4163 [ - + ]: 949 : Assert(!resultRelInfo->ri_projectNewInfoValid);
4164 : :
4165 : 949 : resultRelInfo->ri_oldTupleSlot =
4166 : 949 : table_slot_create(resultRelInfo->ri_RelationDesc,
4167 : : &estate->es_tupleTable);
4168 : 949 : resultRelInfo->ri_newTupleSlot =
4169 : 949 : table_slot_create(resultRelInfo->ri_RelationDesc,
4170 : : &estate->es_tupleTable);
4171 : 949 : resultRelInfo->ri_projectNewInfoValid = true;
4172 : 949 : }
4173 : :
4174 : : /*
4175 : : * Process BEFORE EACH STATEMENT triggers
4176 : : */
4177 : : static void
6000 tgl@sss.pgh.pa.us 4178 : 59510 : fireBSTriggers(ModifyTableState *node)
4179 : : {
2918 alvherre@alvh.no-ip. 4180 : 59510 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1973 heikki.linnakangas@i 4181 : 59510 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4182 : :
6000 tgl@sss.pgh.pa.us 4183 [ + + + + : 59510 : switch (node->operation)
- ]
4184 : : {
4185 : 45452 : case CMD_INSERT:
3240 rhaas@postgresql.org 4186 : 45452 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
2918 alvherre@alvh.no-ip. 4187 [ + + ]: 45446 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3964 andres@anarazel.de 4188 : 460 : ExecBSUpdateTriggers(node->ps.state,
4189 : : resultRelInfo);
6000 tgl@sss.pgh.pa.us 4190 : 45446 : break;
4191 : 7215 : case CMD_UPDATE:
3240 rhaas@postgresql.org 4192 : 7215 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
6000 tgl@sss.pgh.pa.us 4193 : 7215 : break;
4194 : 6104 : case CMD_DELETE:
3240 rhaas@postgresql.org 4195 : 6104 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
6000 tgl@sss.pgh.pa.us 4196 : 6104 : break;
1448 alvherre@alvh.no-ip. 4197 : 739 : case CMD_MERGE:
4198 [ + + ]: 739 : if (node->mt_merge_subcommands & MERGE_INSERT)
4199 : 406 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4200 [ + + ]: 739 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4201 : 494 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4202 [ + + ]: 739 : if (node->mt_merge_subcommands & MERGE_DELETE)
4203 : 193 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4204 : 739 : break;
6000 tgl@sss.pgh.pa.us 4205 :UBC 0 : default:
4206 [ # # ]: 0 : elog(ERROR, "unknown operation");
4207 : : break;
4208 : : }
6000 tgl@sss.pgh.pa.us 4209 :CBC 59504 : }
4210 : :
4211 : : /*
4212 : : * Process AFTER EACH STATEMENT triggers
4213 : : */
4214 : : static void
3182 rhodiumtoad@postgres 4215 : 57834 : fireASTriggers(ModifyTableState *node)
4216 : : {
2918 alvherre@alvh.no-ip. 4217 : 57834 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1973 heikki.linnakangas@i 4218 : 57834 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4219 : :
6000 tgl@sss.pgh.pa.us 4220 [ + + + + : 57834 : switch (node->operation)
- ]
4221 : : {
4222 : 44265 : case CMD_INSERT:
2918 alvherre@alvh.no-ip. 4223 [ + + ]: 44265 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3964 andres@anarazel.de 4224 : 406 : ExecASUpdateTriggers(node->ps.state,
4225 : : resultRelInfo,
3102 tgl@sss.pgh.pa.us 4226 : 406 : node->mt_oc_transition_capture);
3182 rhodiumtoad@postgres 4227 : 44265 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4228 : 44265 : node->mt_transition_capture);
6000 tgl@sss.pgh.pa.us 4229 : 44265 : break;
4230 : 6859 : case CMD_UPDATE:
3182 rhodiumtoad@postgres 4231 : 6859 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4232 : 6859 : node->mt_transition_capture);
6000 tgl@sss.pgh.pa.us 4233 : 6859 : break;
4234 : 6049 : case CMD_DELETE:
3182 rhodiumtoad@postgres 4235 : 6049 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4236 : 6049 : node->mt_transition_capture);
6000 tgl@sss.pgh.pa.us 4237 : 6049 : break;
1448 alvherre@alvh.no-ip. 4238 : 661 : case CMD_MERGE:
4239 [ + + ]: 661 : if (node->mt_merge_subcommands & MERGE_DELETE)
4240 : 175 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4241 : 175 : node->mt_transition_capture);
4242 [ + + ]: 661 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4243 : 443 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4244 : 443 : node->mt_transition_capture);
4245 [ + + ]: 661 : if (node->mt_merge_subcommands & MERGE_INSERT)
4246 : 371 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4247 : 371 : node->mt_transition_capture);
4248 : 661 : break;
6000 tgl@sss.pgh.pa.us 4249 :UBC 0 : default:
4250 [ # # ]: 0 : elog(ERROR, "unknown operation");
4251 : : break;
4252 : : }
6000 tgl@sss.pgh.pa.us 4253 :CBC 57834 : }
4254 : :
4255 : : /*
4256 : : * Set up the state needed for collecting transition tuples for AFTER
4257 : : * triggers.
4258 : : */
4259 : : static void
3182 rhodiumtoad@postgres 4260 : 59692 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4261 : : {
2918 alvherre@alvh.no-ip. 4262 : 59692 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
1973 heikki.linnakangas@i 4263 : 59692 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4264 : :
4265 : : /* Check for transition tables on the directly targeted relation. */
3182 rhodiumtoad@postgres 4266 : 59692 : mtstate->mt_transition_capture =
3102 tgl@sss.pgh.pa.us 4267 : 59692 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4268 : 59692 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4269 : : mtstate->operation);
2918 alvherre@alvh.no-ip. 4270 [ + + ]: 59692 : if (plan->operation == CMD_INSERT &&
4271 [ + + ]: 45456 : plan->onConflictAction == ONCONFLICT_UPDATE)
3102 tgl@sss.pgh.pa.us 4272 : 463 : mtstate->mt_oc_transition_capture =
4273 : 463 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4274 : 463 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4275 : : CMD_UPDATE);
2977 rhaas@postgresql.org 4276 : 59692 : }
4277 : :
4278 : : /*
4279 : : * ExecPrepareTupleRouting --- prepare for routing one tuple
4280 : : *
4281 : : * Determine the partition in which the tuple in slot is to be inserted,
4282 : : * and return its ResultRelInfo in *partRelInfo. The return value is
4283 : : * a slot holding the tuple of the partition rowtype.
4284 : : *
4285 : : * This also sets the transition table information in mtstate based on the
4286 : : * selected partition.
4287 : : */
4288 : : static TupleTableSlot *
2918 alvherre@alvh.no-ip. 4289 : 387969 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4290 : : EState *estate,
4291 : : PartitionTupleRouting *proute,
4292 : : ResultRelInfo *targetRelInfo,
4293 : : TupleTableSlot *slot,
4294 : : ResultRelInfo **partRelInfo)
4295 : : {
4296 : : ResultRelInfo *partrel;
4297 : : TupleConversionMap *map;
4298 : :
4299 : : /*
4300 : : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4301 : : * not find a valid partition for the tuple in 'slot' then an error is
4302 : : * raised. An error may also be raised if the found partition is not a
4303 : : * valid target for INSERTs. This is required since a partitioned table
4304 : : * UPDATE to another partition becomes a DELETE+INSERT.
4305 : : */
2676 4306 : 387969 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4307 : :
4308 : : /*
4309 : : * If we're capturing transition tuples, we might need to convert from the
4310 : : * partition rowtype to root partitioned table's rowtype. But if there
4311 : : * are no BEFORE triggers on the partition that could change the tuple, we
4312 : : * can just remember the original unconverted tuple to avoid a needless
4313 : : * round trip conversion.
4314 : : */
2918 4315 [ + + ]: 387858 : if (mtstate->mt_transition_capture != NULL)
4316 : : {
4317 : : bool has_before_insert_row_trig;
4318 : :
1973 heikki.linnakangas@i 4319 [ + + ]: 98 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4320 [ + + ]: 21 : partrel->ri_TrigDesc->trig_insert_before_row);
4321 : :
4322 : 77 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4323 [ + + ]: 77 : !has_before_insert_row_trig ? slot : NULL;
4324 : : }
4325 : :
4326 : : /*
4327 : : * Convert the tuple, if necessary.
4328 : : */
1199 alvherre@alvh.no-ip. 4329 : 387858 : map = ExecGetRootToChildMap(partrel, estate);
2721 andres@anarazel.de 4330 [ + + ]: 387858 : if (map != NULL)
4331 : : {
1973 heikki.linnakangas@i 4332 : 34298 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4333 : :
2721 andres@anarazel.de 4334 : 34298 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4335 : : }
4336 : :
1978 heikki.linnakangas@i 4337 : 387858 : *partRelInfo = partrel;
2918 alvherre@alvh.no-ip. 4338 : 387858 : return slot;
4339 : : }
4340 : :
4341 : : /* ----------------------------------------------------------------
4342 : : * ExecModifyTable
4343 : : *
4344 : : * Perform table modifications as required, and return RETURNING results
4345 : : * if needed.
4346 : : * ----------------------------------------------------------------
4347 : : */
4348 : : static TupleTableSlot *
3163 andres@anarazel.de 4349 : 64582 : ExecModifyTable(PlanState *pstate)
4350 : : {
4351 : 64582 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4352 : : ModifyTableContext context;
5861 bruce@momjian.us 4353 : 64582 : EState *estate = node->ps.state;
4354 : 64582 : CmdType operation = node->operation;
4355 : : ResultRelInfo *resultRelInfo;
4356 : : PlanState *subplanstate;
4357 : : TupleTableSlot *slot;
4358 : : TupleTableSlot *oldSlot;
4359 : : ItemPointerData tuple_ctid;
4360 : : HeapTupleData oldtupdata;
4361 : : HeapTuple oldtuple;
4362 : : ItemPointer tupleid;
4363 : : bool tuplock;
4364 : :
3155 andres@anarazel.de 4365 [ - + ]: 64582 : CHECK_FOR_INTERRUPTS();
4366 : :
4367 : : /*
4368 : : * This should NOT get called during EvalPlanQual; we should have passed a
4369 : : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4370 : : * Assert because this condition is easy to miss in testing. (Note:
4371 : : * although ModifyTable should not get executed within an EvalPlanQual
4372 : : * operation, we do have to allow it to be initialized and shut down in
4373 : : * case it is within a CTE subplan. Hence this test must be here, not in
4374 : : * ExecInitModifyTable.)
4375 : : */
2383 4376 [ - + ]: 64582 : if (estate->es_epq_active != NULL)
5160 tgl@sss.pgh.pa.us 4377 [ # # ]:UBC 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4378 : :
4379 : : /*
4380 : : * If we've already completed processing, don't try to do more. We need
4381 : : * this test because ExecPostprocessPlan might call us an extra time, and
4382 : : * our subplan's nodes aren't necessarily robust against being called
4383 : : * extra times.
4384 : : */
5497 tgl@sss.pgh.pa.us 4385 [ + + ]:CBC 64582 : if (node->mt_done)
4386 : 400 : return NULL;
4387 : :
4388 : : /*
4389 : : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4390 : : */
6000 4391 [ + + ]: 64182 : if (node->fireBSTriggers)
4392 : : {
4393 : 59510 : fireBSTriggers(node);
4394 : 59504 : node->fireBSTriggers = false;
4395 : : }
4396 : :
4397 : : /* Preload local variables */
1810 4398 : 64176 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4399 : 64176 : subplanstate = outerPlanState(node);
4400 : :
4401 : : /* Set global context */
1459 alvherre@alvh.no-ip. 4402 : 64176 : context.mtstate = node;
4403 : 64176 : context.epqstate = &node->mt_epqstate;
4404 : 64176 : context.estate = estate;
4405 : :
4406 : : /*
4407 : : * Fetch rows from subplan, and execute the required table modification
4408 : : * for each row.
4409 : : */
4410 : : for (;;)
4411 : : {
4412 : : /*
4413 : : * Reset the per-output-tuple exprcontext. This is needed because
4414 : : * triggers expect to use that context as workspace. It's a bit ugly
4415 : : * to do this below the top level of the plan, however. We might need
4416 : : * to rethink this later.
4417 : : */
5688 tgl@sss.pgh.pa.us 4418 [ + + ]: 7917499 : ResetPerTupleExprContext(estate);
4419 : :
4420 : : /*
4421 : : * Reset per-tuple memory context used for processing on conflict and
4422 : : * returning clauses, to free any expression evaluation storage
4423 : : * allocated in the previous cycle.
4424 : : */
2679 andres@anarazel.de 4425 [ + + ]: 7917499 : if (pstate->ps_ExprContext)
4426 : 178993 : ResetExprContext(pstate->ps_ExprContext);
4427 : :
4428 : : /*
4429 : : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4430 : : * to execute, do so now --- see the comments in ExecMerge().
4431 : : */
715 dean.a.rasheed@gmail 4432 [ + + ]: 7917499 : if (node->mt_merge_pending_not_matched != NULL)
4433 : : {
4434 : 2 : context.planSlot = node->mt_merge_pending_not_matched;
423 4435 : 2 : context.cpDeletedSlot = NULL;
4436 : :
715 4437 : 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4438 : 2 : node->canSetTag);
4439 : :
4440 : : /* Clear the pending action */
4441 : 2 : node->mt_merge_pending_not_matched = NULL;
4442 : :
4443 : : /*
4444 : : * If we got a RETURNING result, return it to the caller. We'll
4445 : : * continue the work on next call.
4446 : : */
4447 [ + - ]: 2 : if (slot)
4448 : 2 : return slot;
4449 : :
715 dean.a.rasheed@gmail 4450 :UBC 0 : continue; /* continue with the next tuple */
4451 : : }
4452 : :
4453 : : /* Fetch the next row from subplan */
1425 alvherre@alvh.no-ip. 4454 :CBC 7917497 : context.planSlot = ExecProcNode(subplanstate);
423 dean.a.rasheed@gmail 4455 : 7917288 : context.cpDeletedSlot = NULL;
4456 : :
4457 : : /* No more tuples to process? */
1425 alvherre@alvh.no-ip. 4458 [ + + + + ]: 7917288 : if (TupIsNull(context.planSlot))
4459 : : break;
4460 : :
4461 : : /*
4462 : : * When there are multiple result relations, each tuple contains a
4463 : : * junk column that gives the OID of the rel from which it came.
4464 : : * Extract it and select the correct result relation.
4465 : : */
1810 tgl@sss.pgh.pa.us 4466 [ + + ]: 7859453 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4467 : : {
4468 : : Datum datum;
4469 : : bool isNull;
4470 : : Oid resultoid;
4471 : :
1425 alvherre@alvh.no-ip. 4472 : 2630 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4473 : : &isNull);
1810 tgl@sss.pgh.pa.us 4474 [ + + ]: 2630 : if (isNull)
4475 : : {
4476 : : /*
4477 : : * For commands other than MERGE, any tuples having InvalidOid
4478 : : * for tableoid are errors. For MERGE, we may need to handle
4479 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4480 : : *
4481 : : * Note that we use the node's toplevel resultRelInfo, not any
4482 : : * specific partition's.
4483 : : */
1448 alvherre@alvh.no-ip. 4484 [ + - ]: 254 : if (operation == CMD_MERGE)
4485 : : {
1425 4486 : 254 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4487 : :
728 dean.a.rasheed@gmail 4488 : 254 : slot = ExecMerge(&context, node->resultRelInfo,
4489 : 254 : NULL, NULL, node->canSetTag);
4490 : :
4491 : : /*
4492 : : * If we got a RETURNING result, return it to the caller.
4493 : : * We'll continue the work on next call.
4494 : : */
4495 [ + + ]: 248 : if (slot)
4496 : 19 : return slot;
4497 : :
4498 : 229 : continue; /* continue with the next tuple */
4499 : : }
4500 : :
1810 tgl@sss.pgh.pa.us 4501 [ # # ]:UBC 0 : elog(ERROR, "tableoid is NULL");
4502 : : }
1810 tgl@sss.pgh.pa.us 4503 :CBC 2376 : resultoid = DatumGetObjectId(datum);
4504 : :
4505 : : /* If it's not the same as last time, we need to locate the rel */
4506 [ + + ]: 2376 : if (resultoid != node->mt_lastResultOid)
1804 4507 : 1639 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4508 : : false, true);
4509 : : }
4510 : :
4511 : : /*
4512 : : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4513 : : * here is compute the RETURNING expressions.
4514 : : */
3649 rhaas@postgresql.org 4515 [ + + ]: 7859199 : if (resultRelInfo->ri_usesFdwDirectModify)
4516 : : {
4517 [ - + ]: 347 : Assert(resultRelInfo->ri_projectReturning);
4518 : :
4519 : : /*
4520 : : * A scan slot containing the data that was actually inserted,
4521 : : * updated or deleted has already been made available to
4522 : : * ExecProcessReturning by IterateDirectModify, so no need to
4523 : : * provide it here. The individual old and new slots are not
4524 : : * needed, since direct-modify is disabled if the RETURNING list
4525 : : * refers to OLD/NEW values.
4526 : : */
423 dean.a.rasheed@gmail 4527 [ + - - + ]: 347 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4528 : : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4529 : :
31 dean.a.rasheed@gmail 4530 :GNC 347 : slot = ExecProcessReturning(&context, resultRelInfo,
4531 : : operation == CMD_DELETE,
4532 : : NULL, NULL, context.planSlot);
4533 : :
3649 rhaas@postgresql.org 4534 :CBC 347 : return slot;
4535 : : }
4536 : :
1425 alvherre@alvh.no-ip. 4537 : 7858852 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4538 : 7858852 : slot = context.planSlot;
4539 : :
3030 tgl@sss.pgh.pa.us 4540 : 7858852 : tupleid = NULL;
4375 noah@leadboat.com 4541 : 7858852 : oldtuple = NULL;
4542 : :
4543 : : /*
4544 : : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4545 : : * to be updated/deleted/merged. For a heap relation, that's a TID;
4546 : : * otherwise we may have a wholerow junk attr that carries the old
4547 : : * tuple in toto. Keep this in step with the part of
4548 : : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4549 : : */
1448 alvherre@alvh.no-ip. 4550 [ + + + + : 7858852 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4551 : : operation == CMD_MERGE)
4552 : : {
4553 : : char relkind;
4554 : : Datum datum;
4555 : : bool isNull;
4556 : :
1810 tgl@sss.pgh.pa.us 4557 : 956094 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4558 [ + + + + ]: 956094 : if (relkind == RELKIND_RELATION ||
4559 [ + + ]: 285 : relkind == RELKIND_MATVIEW ||
4560 : : relkind == RELKIND_PARTITIONED_TABLE)
4561 : : {
4562 : : /*
4563 : : * ri_RowIdAttNo refers to a ctid attribute. See the comment
4564 : : * in ExecInitModifyTable().
4565 : : */
51 amitlan@postgresql.o 4566 [ - + - - ]: 955812 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo) ||
4567 : : relkind == RELKIND_PARTITIONED_TABLE);
1810 tgl@sss.pgh.pa.us 4568 : 955812 : datum = ExecGetJunkAttribute(slot,
4569 : 955812 : resultRelInfo->ri_RowIdAttNo,
4570 : : &isNull);
4571 : :
4572 : : /*
4573 : : * For commands other than MERGE, any tuples having a null row
4574 : : * identifier are errors. For MERGE, we may need to handle
4575 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4576 : : *
4577 : : * Note that we use the node's toplevel resultRelInfo, not any
4578 : : * specific partition's.
4579 : : */
4580 [ + + ]: 955812 : if (isNull)
4581 : : {
1448 alvherre@alvh.no-ip. 4582 [ + - ]: 1068 : if (operation == CMD_MERGE)
4583 : : {
1425 4584 : 1068 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4585 : :
728 dean.a.rasheed@gmail 4586 : 1068 : slot = ExecMerge(&context, node->resultRelInfo,
4587 : 1068 : NULL, NULL, node->canSetTag);
4588 : :
4589 : : /*
4590 : : * If we got a RETURNING result, return it to the
4591 : : * caller. We'll continue the work on next call.
4592 : : */
4593 [ + + ]: 1047 : if (slot)
4594 : 64 : return slot;
4595 : :
4596 : 1004 : continue; /* continue with the next tuple */
4597 : : }
4598 : :
1810 tgl@sss.pgh.pa.us 4599 [ # # ]:UBC 0 : elog(ERROR, "ctid is NULL");
4600 : : }
4601 : :
1810 tgl@sss.pgh.pa.us 4602 :CBC 954744 : tupleid = (ItemPointer) DatumGetPointer(datum);
4603 : 954744 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4604 : 954744 : tupleid = &tuple_ctid;
4605 : : }
4606 : :
4607 : : /*
4608 : : * Use the wholerow attribute, when available, to reconstruct the
4609 : : * old relation tuple. The old tuple serves one or both of two
4610 : : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4611 : : * provides values for any unchanged columns for the NEW tuple of
4612 : : * an UPDATE, because the subplan does not produce all the columns
4613 : : * of the target table.
4614 : : *
4615 : : * Note that the wholerow attribute does not carry system columns,
4616 : : * so foreign table triggers miss seeing those, except that we
4617 : : * know enough here to set t_tableOid. Quite separately from
4618 : : * this, the FDW may fetch its own junk attrs to identify the row.
4619 : : *
4620 : : * Other relevant relkinds, currently limited to views, always
4621 : : * have a wholerow attribute.
4622 : : */
4623 [ + + ]: 282 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4624 : : {
4625 : 267 : datum = ExecGetJunkAttribute(slot,
4626 : 267 : resultRelInfo->ri_RowIdAttNo,
4627 : : &isNull);
4628 : :
4629 : : /*
4630 : : * For commands other than MERGE, any tuples having a null row
4631 : : * identifier are errors. For MERGE, we may need to handle
4632 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4633 : : *
4634 : : * Note that we use the node's toplevel resultRelInfo, not any
4635 : : * specific partition's.
4636 : : */
4637 [ + + ]: 267 : if (isNull)
4638 : : {
745 dean.a.rasheed@gmail 4639 [ + - ]: 24 : if (operation == CMD_MERGE)
4640 : : {
4641 : 24 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4642 : :
728 4643 : 24 : slot = ExecMerge(&context, node->resultRelInfo,
4644 : 24 : NULL, NULL, node->canSetTag);
4645 : :
4646 : : /*
4647 : : * If we got a RETURNING result, return it to the
4648 : : * caller. We'll continue the work on next call.
4649 : : */
4650 [ + + ]: 21 : if (slot)
4651 : 6 : return slot;
4652 : :
4653 : 15 : continue; /* continue with the next tuple */
4654 : : }
4655 : :
1810 tgl@sss.pgh.pa.us 4656 [ # # ]:UBC 0 : elog(ERROR, "wholerow is NULL");
4657 : : }
4658 : :
1810 tgl@sss.pgh.pa.us 4659 :CBC 243 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4660 : 243 : oldtupdata.t_len =
4661 : 243 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4662 : 243 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4663 : : /* Historically, view triggers see invalid t_tableOid. */
4664 : 243 : oldtupdata.t_tableOid =
4665 [ + + ]: 243 : (relkind == RELKIND_VIEW) ? InvalidOid :
4666 : 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4667 : :
4668 : 243 : oldtuple = &oldtupdata;
4669 : : }
4670 : : else
4671 : : {
4672 : : /* Only foreign tables are allowed to omit a row-ID attr */
4673 [ - + ]: 15 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4674 : : }
4675 : : }
4676 : :
6000 4677 [ + + + + : 7857760 : switch (operation)
- ]
4678 : : {
4679 : 6902758 : case CMD_INSERT:
4680 : : /* Initialize projection info if first time for this table */
1804 4681 [ + + ]: 6902758 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4682 : 44856 : ExecInitInsertProjection(node, resultRelInfo);
1425 alvherre@alvh.no-ip. 4683 : 6902758 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
1459 4684 : 6902758 : slot = ExecInsert(&context, resultRelInfo, slot,
1456 4685 : 6902758 : node->canSetTag, NULL, NULL);
6000 tgl@sss.pgh.pa.us 4686 : 6901668 : break;
4687 : :
4688 : 160106 : case CMD_UPDATE:
537 noah@leadboat.com 4689 : 160106 : tuplock = false;
4690 : :
4691 : : /* Initialize projection info if first time for this table */
1804 tgl@sss.pgh.pa.us 4692 [ + + ]: 160106 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4693 : 7055 : ExecInitUpdateProjection(node, resultRelInfo);
4694 : :
4695 : : /*
4696 : : * Make the new tuple by combining plan's output tuple with
4697 : : * the old tuple being updated.
4698 : : */
1810 4699 : 160106 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4700 [ + + ]: 160106 : if (oldtuple != NULL)
4701 : : {
537 noah@leadboat.com 4702 [ - + ]: 159 : Assert(!resultRelInfo->ri_needLockTagTuple);
4703 : : /* Use the wholerow junk attr as the old tuple. */
1810 tgl@sss.pgh.pa.us 4704 : 159 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4705 : : }
4706 : : else
4707 : : {
4708 : : /* Fetch the most recent version of old tuple. */
4709 : 159947 : Relation relation = resultRelInfo->ri_RelationDesc;
4710 : :
537 noah@leadboat.com 4711 [ + + ]: 159947 : if (resultRelInfo->ri_needLockTagTuple)
4712 : : {
4713 : 13495 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4714 : 13495 : tuplock = true;
4715 : : }
1810 tgl@sss.pgh.pa.us 4716 [ - + ]: 159947 : if (!table_tuple_fetch_row_version(relation, tupleid,
4717 : : SnapshotAny,
4718 : : oldSlot))
1810 tgl@sss.pgh.pa.us 4719 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
4720 : : }
1098 dean.a.rasheed@gmail 4721 :CBC 160106 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4722 : : oldSlot);
4723 : :
4724 : : /* Now apply the update. */
1459 alvherre@alvh.no-ip. 4725 : 160106 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
423 dean.a.rasheed@gmail 4726 : 160106 : oldSlot, slot, node->canSetTag);
537 noah@leadboat.com 4727 [ + + ]: 159854 : if (tuplock)
4728 : 13495 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4729 : : InplaceUpdateTupleLock);
6000 tgl@sss.pgh.pa.us 4730 : 159854 : break;
4731 : :
4732 : 788472 : case CMD_DELETE:
1459 alvherre@alvh.no-ip. 4733 : 788472 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
703 akorotkov@postgresql 4734 : 788472 : true, false, node->canSetTag, NULL, NULL, NULL);
6000 tgl@sss.pgh.pa.us 4735 : 788438 : break;
4736 : :
1448 alvherre@alvh.no-ip. 4737 : 6424 : case CMD_MERGE:
745 dean.a.rasheed@gmail 4738 : 6424 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4739 : 6424 : node->canSetTag);
1448 alvherre@alvh.no-ip. 4740 : 6376 : break;
4741 : :
6000 tgl@sss.pgh.pa.us 4742 :UBC 0 : default:
4743 [ # # ]: 0 : elog(ERROR, "unknown operation");
4744 : : break;
4745 : : }
4746 : :
4747 : : /*
4748 : : * If we got a RETURNING result, return it to caller. We'll continue
4749 : : * the work on next call.
4750 : : */
6000 tgl@sss.pgh.pa.us 4751 [ + + ]:CBC 7856336 : if (slot)
4752 : 4246 : return slot;
4753 : : }
4754 : :
4755 : : /*
4756 : : * Insert remaining tuples for batch insert.
4757 : : */
1206 efujita@postgresql.o 4758 [ + + ]: 57835 : if (estate->es_insert_pending_result_relations != NIL)
4759 : 13 : ExecPendingInserts(estate);
4760 : :
4761 : : /*
4762 : : * We're done, but fire AFTER STATEMENT triggers before exiting.
4763 : : */
6000 tgl@sss.pgh.pa.us 4764 : 57834 : fireASTriggers(node);
4765 : :
5497 4766 : 57834 : node->mt_done = true;
4767 : :
6000 4768 : 57834 : return NULL;
4769 : : }
4770 : :
4771 : : /*
4772 : : * ExecLookupResultRelByOid
4773 : : * If the table with given OID is among the result relations to be
4774 : : * updated by the given ModifyTable node, return its ResultRelInfo.
4775 : : *
4776 : : * If not found, return NULL if missing_ok, else raise error.
4777 : : *
4778 : : * If update_cache is true, then upon successful lookup, update the node's
4779 : : * one-element cache. ONLY ExecModifyTable may pass true for this.
4780 : : */
4781 : : ResultRelInfo *
1804 4782 : 5625 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4783 : : bool missing_ok, bool update_cache)
4784 : : {
4785 [ + + ]: 5625 : if (node->mt_resultOidHash)
4786 : : {
4787 : : /* Use the pre-built hash table to locate the rel */
4788 : : MTTargetRelLookup *mtlookup;
4789 : :
4790 : : mtlookup = (MTTargetRelLookup *)
4791 : 562 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4792 [ + - ]: 562 : if (mtlookup)
4793 : : {
4794 [ + + ]: 562 : if (update_cache)
4795 : : {
4796 : 412 : node->mt_lastResultOid = resultoid;
4797 : 412 : node->mt_lastResultIndex = mtlookup->relationIndex;
4798 : : }
4799 : 562 : return node->resultRelInfo + mtlookup->relationIndex;
4800 : : }
4801 : : }
4802 : : else
4803 : : {
4804 : : /* With few target rels, just search the ResultRelInfo array */
4805 [ + + ]: 9158 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4806 : : {
4807 : 5426 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4808 : :
4809 [ + + ]: 5426 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4810 : : {
4811 [ + + ]: 1331 : if (update_cache)
4812 : : {
4813 : 1227 : node->mt_lastResultOid = resultoid;
4814 : 1227 : node->mt_lastResultIndex = ndx;
4815 : : }
4816 : 1331 : return rInfo;
4817 : : }
4818 : : }
4819 : : }
4820 : :
4821 [ - + ]: 3732 : if (!missing_ok)
1804 tgl@sss.pgh.pa.us 4822 [ # # ]:UBC 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
1804 tgl@sss.pgh.pa.us 4823 :CBC 3732 : return NULL;
4824 : : }
4825 : :
4826 : : /* ----------------------------------------------------------------
4827 : : * ExecInitModifyTable
4828 : : * ----------------------------------------------------------------
4829 : : */
4830 : : ModifyTableState *
6000 4831 : 60221 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4832 : : {
4833 : : ModifyTableState *mtstate;
1810 4834 : 60221 : Plan *subplan = outerPlan(node);
6000 4835 : 60221 : CmdType operation = node->operation;
361 amitlan@postgresql.o 4836 : 60221 : int total_nrels = list_length(node->resultRelations);
4837 : : int nrels;
401 4838 : 60221 : List *resultRelations = NIL;
4839 : 60221 : List *withCheckOptionLists = NIL;
4840 : 60221 : List *returningLists = NIL;
4841 : 60221 : List *updateColnosLists = NIL;
391 4842 : 60221 : List *mergeActionLists = NIL;
4843 : 60221 : List *mergeJoinConditions = NIL;
4844 : : ResultRelInfo *resultRelInfo;
4845 : : List *arowmarks;
4846 : : ListCell *l;
4847 : : int i;
4848 : : Relation rel;
4849 : :
4850 : : /* check for unsupported flags */
6000 tgl@sss.pgh.pa.us 4851 [ - + ]: 60221 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4852 : :
4853 : : /*
4854 : : * Only consider unpruned relations for initializing their ResultRelInfo
4855 : : * struct and other fields such as withCheckOptions, etc.
4856 : : *
4857 : : * Note: We must avoid pruning every result relation. This is important
4858 : : * for MERGE, since even if every result relation is pruned from the
4859 : : * subplan, there might still be NOT MATCHED rows, for which there may be
4860 : : * INSERT actions to perform. To allow these actions to be found, at
4861 : : * least one result relation must be kept. Also, when inserting into a
4862 : : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4863 : : * as a reference for building the ResultRelInfo of the target partition.
4864 : : * In either case, it doesn't matter which result relation is kept, so we
4865 : : * just keep the first one, if all others have been pruned. See also,
4866 : : * ExecDoInitialPruning(), which ensures that this first result relation
4867 : : * has been locked.
4868 : : */
401 amitlan@postgresql.o 4869 : 60221 : i = 0;
4870 [ + - + + : 121714 : foreach(l, node->resultRelations)
+ + ]
4871 : : {
4872 : 61493 : Index rti = lfirst_int(l);
4873 : : bool keep_rel;
4874 : :
361 4875 : 61493 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4876 [ + + + + : 61493 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
+ + ]
4877 : : {
4878 : : /* all result relations pruned; keep the first one */
4879 : 24 : keep_rel = true;
4880 : 24 : rti = linitial_int(node->resultRelations);
4881 : 24 : i = 0;
4882 : : }
4883 : :
4884 [ + + ]: 61493 : if (keep_rel)
4885 : : {
401 4886 : 61450 : resultRelations = lappend_int(resultRelations, rti);
4887 [ + + ]: 61450 : if (node->withCheckOptionLists)
4888 : : {
4889 : 799 : List *withCheckOptions = list_nth_node(List,
4890 : : node->withCheckOptionLists,
4891 : : i);
4892 : :
4893 : 799 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4894 : : }
4895 [ + + ]: 61450 : if (node->returningLists)
4896 : : {
4897 : 3199 : List *returningList = list_nth_node(List,
4898 : : node->returningLists,
4899 : : i);
4900 : :
4901 : 3199 : returningLists = lappend(returningLists, returningList);
4902 : : }
4903 [ + + ]: 61450 : if (node->updateColnosLists)
4904 : : {
4905 : 8431 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4906 : :
4907 : 8431 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4908 : : }
391 4909 [ + + ]: 61450 : if (node->mergeActionLists)
4910 : : {
4911 : 943 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4912 : :
4913 : 943 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4914 : : }
4915 [ + + ]: 61450 : if (node->mergeJoinConditions)
4916 : : {
4917 : 943 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4918 : :
4919 : 943 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4920 : : }
4921 : : }
401 4922 : 61493 : i++;
4923 : : }
4924 : 60221 : nrels = list_length(resultRelations);
361 4925 [ - + ]: 60221 : Assert(nrels > 0);
4926 : :
4927 : : /*
4928 : : * create state structure
4929 : : */
6000 tgl@sss.pgh.pa.us 4930 : 60221 : mtstate = makeNode(ModifyTableState);
4931 : 60221 : mtstate->ps.plan = (Plan *) node;
4932 : 60221 : mtstate->ps.state = estate;
3163 andres@anarazel.de 4933 : 60221 : mtstate->ps.ExecProcNode = ExecModifyTable;
4934 : :
5497 tgl@sss.pgh.pa.us 4935 : 60221 : mtstate->operation = operation;
4936 : 60221 : mtstate->canSetTag = node->canSetTag;
4937 : 60221 : mtstate->mt_done = false;
4938 : :
1810 4939 : 60221 : mtstate->mt_nrels = nrels;
95 michael@paquier.xyz 4940 :GNC 60221 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
4941 : :
715 dean.a.rasheed@gmail 4942 :CBC 60221 : mtstate->mt_merge_pending_not_matched = NULL;
1448 alvherre@alvh.no-ip. 4943 : 60221 : mtstate->mt_merge_inserted = 0;
4944 : 60221 : mtstate->mt_merge_updated = 0;
4945 : 60221 : mtstate->mt_merge_deleted = 0;
401 amitlan@postgresql.o 4946 : 60221 : mtstate->mt_updateColnosLists = updateColnosLists;
391 4947 : 60221 : mtstate->mt_mergeActionLists = mergeActionLists;
4948 : 60221 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4949 : :
4950 : : /*----------
4951 : : * Resolve the target relation. This is the same as:
4952 : : *
4953 : : * - the relation for which we will fire FOR STATEMENT triggers,
4954 : : * - the relation into whose tuple format all captured transition tuples
4955 : : * must be converted, and
4956 : : * - the root partitioned table used for tuple routing.
4957 : : *
4958 : : * If it's a partitioned or inherited table, the root partition or
4959 : : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4960 : : * given explicitly in node->rootRelation. Otherwise, the target relation
4961 : : * is the sole relation in the node->resultRelations list and, since it can
4962 : : * never be pruned, also in the resultRelations list constructed above.
4963 : : *----------
4964 : : */
1979 heikki.linnakangas@i 4965 [ + + ]: 60221 : if (node->rootRelation > 0)
4966 : : {
401 amitlan@postgresql.o 4967 [ - + ]: 1485 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
1979 heikki.linnakangas@i 4968 : 1485 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4969 : 1485 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4970 : : node->rootRelation);
4971 : : }
4972 : : else
4973 : : {
873 tgl@sss.pgh.pa.us 4974 [ - + ]: 58736 : Assert(list_length(node->resultRelations) == 1);
391 amitlan@postgresql.o 4975 [ - + ]: 58736 : Assert(list_length(resultRelations) == 1);
1973 heikki.linnakangas@i 4976 : 58736 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4977 : 58736 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
391 amitlan@postgresql.o 4978 : 58736 : linitial_int(resultRelations));
4979 : : }
4980 : :
4981 : : /* set up epqstate with dummy subplan data for the moment */
1031 tgl@sss.pgh.pa.us 4982 : 60221 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4983 : : node->epqParam, resultRelations);
6000 4984 : 60221 : mtstate->fireBSTriggers = true;
4985 : :
4986 : : /*
4987 : : * Build state for collecting transition tuples. This requires having a
4988 : : * valid trigger query context, so skip it in explain-only mode.
4989 : : */
1973 heikki.linnakangas@i 4990 [ + + ]: 60221 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4991 : 59692 : ExecSetupTransitionCaptureState(mtstate, estate);
4992 : :
4993 : : /*
4994 : : * Open all the result relations and initialize the ResultRelInfo structs.
4995 : : * (But root relation was initialized above, if it's part of the array.)
4996 : : * We must do this before initializing the subplan, because direct-modify
4997 : : * FDWs expect their ResultRelInfos to be available.
4998 : : */
5497 tgl@sss.pgh.pa.us 4999 : 60221 : resultRelInfo = mtstate->resultRelInfo;
6000 5000 : 60221 : i = 0;
401 amitlan@postgresql.o 5001 [ + - + + : 121500 : foreach(l, resultRelations)
+ + ]
5002 : : {
1979 heikki.linnakangas@i 5003 : 61447 : Index resultRelation = lfirst_int(l);
745 dean.a.rasheed@gmail 5004 : 61447 : List *mergeActions = NIL;
5005 : :
391 amitlan@postgresql.o 5006 [ + + ]: 61447 : if (mergeActionLists)
5007 : 943 : mergeActions = list_nth(mergeActionLists, i);
5008 : :
1973 heikki.linnakangas@i 5009 [ + + ]: 61447 : if (resultRelInfo != mtstate->rootResultRelInfo)
5010 : : {
5011 : 2711 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
5012 : :
5013 : : /*
5014 : : * For child result relations, store the root result relation
5015 : : * pointer. We do so for the convenience of places that want to
5016 : : * look at the query's original target relation but don't have the
5017 : : * mtstate handy.
5018 : : */
1804 tgl@sss.pgh.pa.us 5019 : 2711 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
5020 : : }
5021 : :
5022 : : /* Initialize the usesFdwDirectModify flag */
1459 alvherre@alvh.no-ip. 5023 : 61447 : resultRelInfo->ri_usesFdwDirectModify =
5024 : 61447 : bms_is_member(i, node->fdwDirectModifyPlans);
5025 : :
5026 : : /*
5027 : : * Verify result relation is a valid target for the current operation
5028 : : */
192 dean.a.rasheed@gmail 5029 : 61447 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
5030 : : mergeActions);
5031 : :
1810 tgl@sss.pgh.pa.us 5032 : 61279 : resultRelInfo++;
5033 : 61279 : i++;
5034 : : }
5035 : :
5036 : : /*
5037 : : * Now we may initialize the subplan.
5038 : : */
5039 : 60053 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
5040 : :
5041 : : /*
5042 : : * Do additional per-result-relation initialization.
5043 : : */
5044 [ + + ]: 121315 : for (i = 0; i < nrels; i++)
5045 : : {
5046 : 61262 : resultRelInfo = &mtstate->resultRelInfo[i];
5047 : :
5048 : : /* Let FDWs init themselves for foreign-table result rels */
3649 rhaas@postgresql.org 5049 [ + + ]: 61262 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5050 [ + + ]: 61158 : resultRelInfo->ri_FdwRoutine != NULL &&
4753 tgl@sss.pgh.pa.us 5051 [ + - ]: 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
5052 : : {
5053 : 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
5054 : :
5055 : 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
5056 : : resultRelInfo,
5057 : : fdw_private,
5058 : : i,
5059 : : eflags);
5060 : : }
5061 : :
5062 : : /*
5063 : : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
5064 : : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
5065 : : * tables, the FDW might have created additional junk attr(s), but
5066 : : * those are no concern of ours.
5067 : : */
1448 alvherre@alvh.no-ip. 5068 [ + + + + : 61262 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
5069 : : operation == CMD_MERGE)
5070 : : {
5071 : : char relkind;
5072 : :
1804 tgl@sss.pgh.pa.us 5073 : 15661 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
5074 [ + + + + ]: 15661 : if (relkind == RELKIND_RELATION ||
5075 [ + + ]: 354 : relkind == RELKIND_MATVIEW ||
5076 : : relkind == RELKIND_PARTITIONED_TABLE)
5077 : : {
5078 : 15331 : resultRelInfo->ri_RowIdAttNo =
5079 : 15331 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
5080 : :
5081 : : /*
5082 : : * For heap relations, a ctid junk attribute must be present.
5083 : : * Partitioned tables should only appear here when all leaf
5084 : : * partitions were pruned, in which case no rows can be
5085 : : * produced and ctid is not needed.
5086 : : */
51 amitlan@postgresql.o 5087 [ + + ]: 15331 : if (relkind == RELKIND_PARTITIONED_TABLE)
5088 [ - + ]: 24 : Assert(nrels == 1);
5089 [ - + ]: 15307 : else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1804 tgl@sss.pgh.pa.us 5090 [ # # ]:UBC 0 : elog(ERROR, "could not find junk ctid column");
5091 : : }
1804 tgl@sss.pgh.pa.us 5092 [ + + ]:CBC 330 : else if (relkind == RELKIND_FOREIGN_TABLE)
5093 : : {
5094 : : /*
5095 : : * We don't support MERGE with foreign tables for now. (It's
5096 : : * problematic because the implementation uses CTID.)
5097 : : */
1448 alvherre@alvh.no-ip. 5098 [ - + ]: 186 : Assert(operation != CMD_MERGE);
5099 : :
5100 : : /*
5101 : : * When there is a row-level trigger, there should be a
5102 : : * wholerow attribute. We also require it to be present in
5103 : : * UPDATE and MERGE, so we can get the values of unchanged
5104 : : * columns.
5105 : : */
1804 tgl@sss.pgh.pa.us 5106 : 186 : resultRelInfo->ri_RowIdAttNo =
5107 : 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5108 : : "wholerow");
1448 alvherre@alvh.no-ip. 5109 [ + + - + ]: 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
1804 tgl@sss.pgh.pa.us 5110 [ - + ]: 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1804 tgl@sss.pgh.pa.us 5111 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
5112 : : }
5113 : : else
5114 : : {
5115 : : /* Other valid target relkinds must provide wholerow */
1804 tgl@sss.pgh.pa.us 5116 :CBC 144 : resultRelInfo->ri_RowIdAttNo =
5117 : 144 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5118 : : "wholerow");
5119 [ - + ]: 144 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1804 tgl@sss.pgh.pa.us 5120 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
5121 : : }
5122 : : }
5123 : : }
5124 : :
5125 : : /*
5126 : : * If this is an inherited update/delete/merge, there will be a junk
5127 : : * attribute named "tableoid" present in the subplan's targetlist. It
5128 : : * will be used to identify the result relation for a given tuple to be
5129 : : * updated/deleted/merged.
5130 : : */
1804 tgl@sss.pgh.pa.us 5131 :CBC 60053 : mtstate->mt_resultOidAttno =
5132 : 60053 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
361 amitlan@postgresql.o 5133 [ + + - + ]: 60053 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
1804 tgl@sss.pgh.pa.us 5134 : 60053 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
5135 : 60053 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
5136 : :
5137 : : /* Get the root target relation */
1973 heikki.linnakangas@i 5138 : 60053 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
5139 : :
5140 : : /*
5141 : : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
5142 : : * or MERGE might need this too, but only if it actually moves tuples
5143 : : * between partitions; in that case setup is done by
5144 : : * ExecCrossPartitionUpdate.
5145 : : */
2977 rhaas@postgresql.org 5146 [ + + + + ]: 60053 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5147 : : operation == CMD_INSERT)
2943 5148 : 2325 : mtstate->mt_partition_tuple_routing =
1804 tgl@sss.pgh.pa.us 5149 : 2325 : ExecSetupPartitionTupleRouting(estate, rel);
5150 : :
5151 : : /*
5152 : : * Initialize any WITH CHECK OPTION constraints if needed.
5153 : : */
4623 sfrost@snowman.net 5154 : 60053 : resultRelInfo = mtstate->resultRelInfo;
401 amitlan@postgresql.o 5155 [ + + + + : 60852 : foreach(l, withCheckOptionLists)
+ + ]
5156 : : {
4623 sfrost@snowman.net 5157 : 799 : List *wcoList = (List *) lfirst(l);
5158 : 799 : List *wcoExprs = NIL;
5159 : : ListCell *ll;
5160 : :
5161 [ + - + + : 2359 : foreach(ll, wcoList)
+ + ]
5162 : : {
5163 : 1560 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
3288 andres@anarazel.de 5164 : 1560 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
5165 : : &mtstate->ps);
5166 : :
4623 sfrost@snowman.net 5167 : 1560 : wcoExprs = lappend(wcoExprs, wcoExpr);
5168 : : }
5169 : :
5170 : 799 : resultRelInfo->ri_WithCheckOptions = wcoList;
5171 : 799 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
5172 : 799 : resultRelInfo++;
5173 : : }
5174 : :
5175 : : /*
5176 : : * Initialize RETURNING projections if needed.
5177 : : */
401 amitlan@postgresql.o 5178 [ + + ]: 60053 : if (returningLists)
5179 : : {
5180 : : TupleTableSlot *slot;
5181 : : ExprContext *econtext;
5182 : :
5183 : : /*
5184 : : * Initialize result tuple slot and assign its rowtype using the plan
5185 : : * node's declared targetlist, which the planner set up to be the same
5186 : : * as the first (before runtime pruning) RETURNING list. We assume
5187 : : * all the result rels will produce compatible output.
5188 : : */
2677 andres@anarazel.de 5189 : 3024 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
6000 tgl@sss.pgh.pa.us 5190 : 3024 : slot = mtstate->ps.ps_ResultTupleSlot;
5191 : :
5192 : : /* Need an econtext too */
3288 andres@anarazel.de 5193 [ + - ]: 3024 : if (mtstate->ps.ps_ExprContext == NULL)
5194 : 3024 : ExecAssignExprContext(estate, &mtstate->ps);
5195 : 3024 : econtext = mtstate->ps.ps_ExprContext;
5196 : :
5197 : : /*
5198 : : * Build a projection for each result rel.
5199 : : */
5497 tgl@sss.pgh.pa.us 5200 : 3024 : resultRelInfo = mtstate->resultRelInfo;
401 amitlan@postgresql.o 5201 [ + - + + : 6223 : foreach(l, returningLists)
+ + ]
5202 : : {
6000 tgl@sss.pgh.pa.us 5203 : 3199 : List *rlist = (List *) lfirst(l);
5204 : :
2900 rhaas@postgresql.org 5205 : 3199 : resultRelInfo->ri_returningList = rlist;
6000 tgl@sss.pgh.pa.us 5206 : 3199 : resultRelInfo->ri_projectReturning =
3288 andres@anarazel.de 5207 : 3199 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
3189 tgl@sss.pgh.pa.us 5208 : 3199 : resultRelInfo->ri_RelationDesc->rd_att);
6000 5209 : 3199 : resultRelInfo++;
5210 : : }
5211 : : }
5212 : : else
5213 : : {
5214 : : /*
5215 : : * We still must construct a dummy result tuple type, because InitPlan
5216 : : * expects one (maybe should change that?).
5217 : : */
2683 andres@anarazel.de 5218 : 57029 : ExecInitResultTypeTL(&mtstate->ps);
5219 : :
6000 tgl@sss.pgh.pa.us 5220 : 57029 : mtstate->ps.ps_ExprContext = NULL;
5221 : : }
5222 : :
5223 : : /* Set the list of arbiter indexes if needed for ON CONFLICT */
2911 alvherre@alvh.no-ip. 5224 : 60053 : resultRelInfo = mtstate->resultRelInfo;
5225 [ + + ]: 60053 : if (node->onConflictAction != ONCONFLICT_NONE)
5226 : : {
5227 : : /* insert may only have one relation, inheritance is not expanded */
361 amitlan@postgresql.o 5228 [ - + ]: 907 : Assert(total_nrels == 1);
2911 alvherre@alvh.no-ip. 5229 : 907 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5230 : : }
5231 : :
5232 : : /*
5233 : : * For ON CONFLICT DO SELECT/UPDATE, initialize the ON CONFLICT action
5234 : : * state.
5235 : : */
31 dean.a.rasheed@gmail 5236 [ + + ]:GNC 60053 : if (node->onConflictAction == ONCONFLICT_UPDATE ||
5237 [ + + ]: 59554 : node->onConflictAction == ONCONFLICT_SELECT)
5238 : : {
5239 : 667 : OnConflictActionState *onconfl = makeNode(OnConflictActionState);
5240 : :
5241 : : /* already exists if created by RETURNING processing above */
3964 andres@anarazel.de 5242 [ + + ]:CBC 667 : if (mtstate->ps.ps_ExprContext == NULL)
5243 : 350 : ExecAssignExprContext(estate, &mtstate->ps);
5244 : :
5245 : : /* action state for DO SELECT/UPDATE */
1770 tgl@sss.pgh.pa.us 5246 : 667 : resultRelInfo->ri_onConflict = onconfl;
5247 : :
5248 : : /* lock strength for DO SELECT [FOR UPDATE/SHARE] */
31 dean.a.rasheed@gmail 5249 :GNC 667 : onconfl->oc_LockStrength = node->onConflictLockStrength;
5250 : :
5251 : : /* initialize slot for the existing tuple */
1770 tgl@sss.pgh.pa.us 5252 :CBC 667 : onconfl->oc_Existing =
2561 andres@anarazel.de 5253 : 667 : table_slot_create(resultRelInfo->ri_RelationDesc,
5254 : 667 : &mtstate->ps.state->es_tupleTable);
5255 : :
5256 : : /*
5257 : : * For ON CONFLICT DO UPDATE, initialize target list and projection.
5258 : : */
31 dean.a.rasheed@gmail 5259 [ + + ]:GNC 667 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5260 : : {
5261 : : ExprContext *econtext;
5262 : : TupleDesc relationDesc;
5263 : :
5264 : 499 : econtext = mtstate->ps.ps_ExprContext;
5265 : 499 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5266 : :
5267 : : /*
5268 : : * Create the tuple slot for the UPDATE SET projection. We want a
5269 : : * slot of the table's type here, because the slot will be used to
5270 : : * insert into the table, and for RETURNING processing - which may
5271 : : * access system attributes.
5272 : : */
5273 : 499 : onconfl->oc_ProjSlot =
5274 : 499 : table_slot_create(resultRelInfo->ri_RelationDesc,
5275 : 499 : &mtstate->ps.state->es_tupleTable);
5276 : :
5277 : : /* build UPDATE SET projection state */
5278 : 499 : onconfl->oc_ProjInfo =
5279 : 499 : ExecBuildUpdateProjection(node->onConflictSet,
5280 : : true,
5281 : : node->onConflictCols,
5282 : : relationDesc,
5283 : : econtext,
5284 : : onconfl->oc_ProjSlot,
5285 : : &mtstate->ps);
5286 : : }
5287 : :
5288 : : /* initialize state to evaluate the WHERE clause, if any */
3964 andres@anarazel.de 5289 [ + + ]:CBC 667 : if (node->onConflictWhere)
5290 : : {
5291 : : ExprState *qualexpr;
5292 : :
3288 5293 : 154 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5294 : : &mtstate->ps);
1770 tgl@sss.pgh.pa.us 5295 : 154 : onconfl->oc_WhereClause = qualexpr;
5296 : : }
5297 : : }
5298 : :
5299 : : /*
5300 : : * If we have any secondary relations in an UPDATE or DELETE, they need to
5301 : : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5302 : : * EvalPlanQual mechanism needs to be told about them. This also goes for
5303 : : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5304 : : */
1810 5305 : 60053 : arowmarks = NIL;
5984 5306 [ + + + + : 61491 : foreach(l, node->rowMarks)
+ + ]
5307 : : {
3261 5308 : 1438 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
58 amitlan@postgresql.o 5309 : 1438 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5310 : : ExecRowMark *erm;
5311 : : ExecAuxRowMark *aerm;
5312 : :
5313 : : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5314 [ + + ]: 1438 : if (rc->isParent)
5315 : 71 : continue;
5316 : :
5317 : : /*
5318 : : * Also ignore rowmarks belonging to child tables that have been
5319 : : * pruned in ExecDoInitialPruning().
5320 : : */
5321 [ + + ]: 1367 : if (rte->rtekind == RTE_RELATION &&
401 5322 [ - + ]: 1074 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5984 tgl@sss.pgh.pa.us 5323 :UBC 0 : continue;
5324 : :
5325 : : /* Find ExecRowMark and build ExecAuxRowMark */
3960 tgl@sss.pgh.pa.us 5326 :CBC 1367 : erm = ExecFindRowMark(estate, rc->rti, false);
1810 5327 : 1367 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5328 : 1367 : arowmarks = lappend(arowmarks, aerm);
5329 : : }
5330 : :
5331 : : /* For a MERGE command, initialize its state */
1448 alvherre@alvh.no-ip. 5332 [ + + ]: 60053 : if (mtstate->operation == CMD_MERGE)
5333 : 816 : ExecInitMerge(mtstate, estate);
5334 : :
1810 tgl@sss.pgh.pa.us 5335 : 60053 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5336 : :
5337 : : /*
5338 : : * If there are a lot of result relations, use a hash table to speed the
5339 : : * lookups. If there are not a lot, a simple linear search is faster.
5340 : : *
5341 : : * It's not clear where the threshold is, but try 64 for starters. In a
5342 : : * debugging build, use a small threshold so that we get some test
5343 : : * coverage of both code paths.
5344 : : */
5345 : : #ifdef USE_ASSERT_CHECKING
5346 : : #define MT_NRELS_HASH 4
5347 : : #else
5348 : : #define MT_NRELS_HASH 64
5349 : : #endif
5350 [ + + ]: 60053 : if (nrels >= MT_NRELS_HASH)
5351 : : {
5352 : : HASHCTL hash_ctl;
5353 : :
5354 : 167 : hash_ctl.keysize = sizeof(Oid);
5355 : 167 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5356 : 167 : hash_ctl.hcxt = CurrentMemoryContext;
5357 : 167 : mtstate->mt_resultOidHash =
5358 : 167 : hash_create("ModifyTable target hash",
5359 : : nrels, &hash_ctl,
5360 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5361 [ + + ]: 934 : for (i = 0; i < nrels; i++)
5362 : : {
5363 : : Oid hashkey;
5364 : : MTTargetRelLookup *mtlookup;
5365 : : bool found;
5366 : :
5367 : 767 : resultRelInfo = &mtstate->resultRelInfo[i];
5368 : 767 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5369 : : mtlookup = (MTTargetRelLookup *)
5370 : 767 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5371 : : HASH_ENTER, &found);
5372 [ - + ]: 767 : Assert(!found);
5373 : 767 : mtlookup->relationIndex = i;
5374 : : }
5375 : : }
5376 : : else
5377 : 59886 : mtstate->mt_resultOidHash = NULL;
5378 : :
5379 : : /*
5380 : : * Determine if the FDW supports batch insert and determine the batch size
5381 : : * (a FDW may support batching, but it may be disabled for the
5382 : : * server/table).
5383 : : *
5384 : : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5385 : : * remains set to 0.
5386 : : */
1879 tomas.vondra@postgre 5387 [ + + ]: 60053 : if (operation == CMD_INSERT)
5388 : : {
5389 : : /* insert may only have one relation, inheritance is not expanded */
361 amitlan@postgresql.o 5390 [ - + ]: 45601 : Assert(total_nrels == 1);
1879 tomas.vondra@postgre 5391 : 45601 : resultRelInfo = mtstate->resultRelInfo;
1804 tgl@sss.pgh.pa.us 5392 [ + - ]: 45601 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5393 [ + + ]: 45601 : resultRelInfo->ri_FdwRoutine != NULL &&
5394 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5395 [ + - ]: 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5396 : : {
5397 : 88 : resultRelInfo->ri_BatchSize =
5398 : 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
1879 tomas.vondra@postgre 5399 [ - + ]: 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5400 : : }
5401 : : else
1804 tgl@sss.pgh.pa.us 5402 : 45513 : resultRelInfo->ri_BatchSize = 1;
5403 : : }
5404 : :
5405 : : /*
5406 : : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5407 : : * to estate->es_auxmodifytables so that it will be run to completion by
5408 : : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5409 : : * ModifyTable node too, but there's no need.) Note the use of lcons not
5410 : : * lappend: we need later-initialized ModifyTable nodes to be shut down
5411 : : * before earlier ones. This ensures that we don't throw away RETURNING
5412 : : * rows that need to be seen by a later CTE subplan.
5413 : : */
5497 5414 [ + + ]: 60053 : if (!mtstate->canSetTag)
5415 : 494 : estate->es_auxmodifytables = lcons(mtstate,
5416 : : estate->es_auxmodifytables);
5417 : :
6000 5418 : 60053 : return mtstate;
5419 : : }
5420 : :
5421 : : /* ----------------------------------------------------------------
5422 : : * ExecEndModifyTable
5423 : : *
5424 : : * Shuts down the plan.
5425 : : *
5426 : : * Returns nothing of interest.
5427 : : * ----------------------------------------------------------------
5428 : : */
5429 : : void
5430 : 57798 : ExecEndModifyTable(ModifyTableState *node)
5431 : : {
5432 : : int i;
5433 : :
5434 : : /*
5435 : : * Allow any FDWs to shut down
5436 : : */
1810 5437 [ + + ]: 116650 : for (i = 0; i < node->mt_nrels; i++)
5438 : : {
5439 : : int j;
4753 5440 : 58852 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5441 : :
3649 rhaas@postgresql.org 5442 [ + + ]: 58852 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5443 [ + + ]: 58756 : resultRelInfo->ri_FdwRoutine != NULL &&
4753 tgl@sss.pgh.pa.us 5444 [ + - ]: 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5445 : 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5446 : : resultRelInfo);
5447 : :
5448 : : /*
5449 : : * Cleanup the initialized batch slots. This only matters for FDWs
5450 : : * with batching, but the other cases will have ri_NumSlotsInitialized
5451 : : * == 0.
5452 : : */
1738 tomas.vondra@postgre 5453 [ + + ]: 58880 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5454 : : {
5455 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5456 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5457 : : }
5458 : : }
5459 : :
5460 : : /*
5461 : : * Close all the partitioned tables, leaf partitions, and their indices
5462 : : * and release the slot used for tuple routing, if set.
5463 : : */
2992 rhaas@postgresql.org 5464 [ + + ]: 57798 : if (node->mt_partition_tuple_routing)
5465 : : {
2900 5466 : 2350 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5467 : :
2676 alvherre@alvh.no-ip. 5468 [ + + ]: 2350 : if (node->mt_root_tuple_slot)
5469 : 334 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5470 : : }
5471 : :
5472 : : /*
5473 : : * Terminate EPQ execution if active
5474 : : */
5984 tgl@sss.pgh.pa.us 5475 : 57798 : EvalPlanQualEnd(&node->mt_epqstate);
5476 : :
5477 : : /*
5478 : : * shut down subplan
5479 : : */
1810 5480 : 57798 : ExecEndNode(outerPlanState(node));
6000 5481 : 57798 : }
5482 : :
5483 : : void
5725 tgl@sss.pgh.pa.us 5484 :UBC 0 : ExecReScanModifyTable(ModifyTableState *node)
5485 : : {
5486 : : /*
5487 : : * Currently, we don't need to support rescan on ModifyTable nodes. The
5488 : : * semantics of that would be a bit debatable anyway.
5489 : : */
6000 5490 [ # # ]: 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5491 : : }
|