Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * execMain.c
4 : : * top level executor interface routines
5 : : *
6 : : * INTERFACE ROUTINES
7 : : * ExecutorStart()
8 : : * ExecutorRun()
9 : : * ExecutorFinish()
10 : : * ExecutorEnd()
11 : : *
12 : : * These four procedures are the external interface to the executor.
13 : : * In each case, the query descriptor is required as an argument.
14 : : *
15 : : * ExecutorStart must be called at the beginning of execution of any
16 : : * query plan and ExecutorEnd must always be called at the end of
17 : : * execution of a plan (unless it is aborted due to error).
18 : : *
19 : : * ExecutorRun accepts direction and count arguments that specify whether
20 : : * the plan is to be executed forwards, backwards, and for how many tuples.
21 : : * In some cases ExecutorRun may be called multiple times to process all
22 : : * the tuples for a plan. It is also acceptable to stop short of executing
23 : : * the whole plan (but only if it is a SELECT).
24 : : *
25 : : * ExecutorFinish must be called after the final ExecutorRun call and
26 : : * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 : : * which should also omit ExecutorRun.
28 : : *
29 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
30 : : * Portions Copyright (c) 1994, Regents of the University of California
31 : : *
32 : : *
33 : : * IDENTIFICATION
34 : : * src/backend/executor/execMain.c
35 : : *
36 : : *-------------------------------------------------------------------------
37 : : */
38 : : #include "postgres.h"
39 : :
40 : : #include "access/sysattr.h"
41 : : #include "access/table.h"
42 : : #include "access/tableam.h"
43 : : #include "access/tupconvert.h"
44 : : #include "access/xact.h"
45 : : #include "catalog/namespace.h"
46 : : #include "catalog/partition.h"
47 : : #include "commands/matview.h"
48 : : #include "commands/trigger.h"
49 : : #include "executor/executor.h"
50 : : #include "executor/execPartition.h"
51 : : #include "executor/instrument.h"
52 : : #include "executor/nodeSubplan.h"
53 : : #include "foreign/fdwapi.h"
54 : : #include "mb/pg_wchar.h"
55 : : #include "miscadmin.h"
56 : : #include "nodes/queryjumble.h"
57 : : #include "parser/parse_relation.h"
58 : : #include "pgstat.h"
59 : : #include "rewrite/rewriteHandler.h"
60 : : #include "tcop/utility.h"
61 : : #include "utils/acl.h"
62 : : #include "utils/backend_status.h"
63 : : #include "utils/lsyscache.h"
64 : : #include "utils/partcache.h"
65 : : #include "utils/rls.h"
66 : : #include "utils/snapmgr.h"
67 : :
68 : :
69 : : /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 : : ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 : : ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 : : ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 : : ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74 : :
75 : : /* Hook for plugin to get control in ExecCheckPermissions() */
76 : : ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77 : :
78 : : /* decls for local routines only used within this module */
79 : : static void InitPlan(QueryDesc *queryDesc, int eflags);
80 : : static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 : : static void ExecPostprocessPlan(EState *estate);
82 : : static void ExecEndPlan(PlanState *planstate, EState *estate);
83 : : static void ExecutePlan(QueryDesc *queryDesc,
84 : : CmdType operation,
85 : : bool sendTuples,
86 : : uint64 numberTuples,
87 : : ScanDirection direction,
88 : : DestReceiver *dest);
89 : : static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
90 : : Bitmapset *modifiedCols,
91 : : AclMode requiredPerms);
92 : : static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
93 : : static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
94 : : static void ReportNotNullViolationError(ResultRelInfo *resultRelInfo,
95 : : TupleTableSlot *slot,
96 : : EState *estate, int attnum);
97 : :
98 : : /* end of local decls */
99 : :
100 : :
101 : : /* ----------------------------------------------------------------
102 : : * ExecutorStart
103 : : *
104 : : * This routine must be called at the beginning of any execution of any
105 : : * query plan
106 : : *
107 : : * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
108 : : * only because some places use QueryDescs for utility commands). The tupDesc
109 : : * field of the QueryDesc is filled in to describe the tuples that will be
110 : : * returned, and the internal fields (estate and planstate) are set up.
111 : : *
112 : : * eflags contains flag bits as described in executor.h.
113 : : *
114 : : * NB: the CurrentMemoryContext when this is called will become the parent
115 : : * of the per-query context used for this Executor invocation.
116 : : *
117 : : * We provide a function hook variable that lets loadable plugins
118 : : * get control when ExecutorStart is called. Such a plugin would
119 : : * normally call standard_ExecutorStart().
120 : : *
121 : : * ----------------------------------------------------------------
122 : : */
123 : : void
7371 tgl@sss.pgh.pa.us 124 :CBC 361590 : ExecutorStart(QueryDesc *queryDesc, int eflags)
125 : : {
126 : : /*
127 : : * In some cases (e.g. an EXECUTE statement or an execute message with the
128 : : * extended query protocol) the query_id won't be reported, so do it now.
129 : : *
130 : : * Note that it's harmless to report the query_id multiple times, as the
131 : : * call will be ignored if the top level query_id has already been
132 : : * reported.
133 : : */
1841 bruce@momjian.us 134 : 361590 : pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
135 : :
6376 tgl@sss.pgh.pa.us 136 [ + + ]: 361590 : if (ExecutorStart_hook)
348 amitlan@postgresql.o 137 : 61840 : (*ExecutorStart_hook) (queryDesc, eflags);
138 : : else
139 : 299750 : standard_ExecutorStart(queryDesc, eflags);
6376 tgl@sss.pgh.pa.us 140 : 360406 : }
141 : :
142 : : void
143 : 361590 : standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
144 : : {
145 : : EState *estate;
146 : : MemoryContext oldcontext;
147 : :
148 : : /* sanity checks: queryDesc must not be started already */
10467 bruce@momjian.us 149 [ - + ]: 361590 : Assert(queryDesc != NULL);
8552 tgl@sss.pgh.pa.us 150 [ - + ]: 361590 : Assert(queryDesc->estate == NULL);
151 : :
152 : : /* caller must ensure the query's snapshot is active */
782 heikki.linnakangas@i 153 [ - + ]: 361590 : Assert(GetActiveSnapshot() == queryDesc->snapshot);
154 : :
155 : : /*
156 : : * If the transaction is read-only, we need to check if any writes are
157 : : * planned to non-temporary tables. EXPLAIN is considered read-only.
158 : : *
159 : : * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
160 : : * would require (a) storing the combo CID hash in shared memory, rather
161 : : * than synchronizing it just once at the start of parallelism, and (b) an
162 : : * alternative to heap_update()'s reliance on xmax for mutual exclusion.
163 : : * INSERT may have no such troubles, but we forbid it to simplify the
164 : : * checks.
165 : : *
166 : : * We have lower-level defenses in CommandCounterIncrement and elsewhere
167 : : * against performing unsafe operations in parallel mode, but this gives a
168 : : * more user-friendly error message.
169 : : */
4023 rhaas@postgresql.org 170 [ + + + + ]: 361590 : if ((XactReadOnly || IsInParallelMode()) &&
171 [ + - ]: 38944 : !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
7014 tgl@sss.pgh.pa.us 172 : 38944 : ExecCheckXactReadOnly(queryDesc->plannedstmt);
173 : :
174 : : /*
175 : : * Build EState, switch into per-query memory context for startup.
176 : : */
8552 177 : 361572 : estate = CreateExecutorState();
178 : 361572 : queryDesc->estate = estate;
179 : :
8542 180 : 361572 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
181 : :
182 : : /*
183 : : * Fill in external parameters, if any, from queryDesc; and allocate
184 : : * workspace for internal parameters
185 : : */
8552 186 : 361572 : estate->es_param_list_info = queryDesc->params;
187 : :
3095 rhaas@postgresql.org 188 [ + + ]: 361572 : if (queryDesc->plannedstmt->paramExecTypes != NIL)
189 : : {
190 : : int nParamExec;
191 : :
192 : 119776 : nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
10295 bruce@momjian.us 193 : 119776 : estate->es_param_exec_vals = (ParamExecData *)
146 michael@paquier.xyz 194 :GNC 119776 : palloc0_array(ParamExecData, nParamExec);
195 : : }
196 : :
197 : : /* We now require all callers to provide sourceText */
1846 tgl@sss.pgh.pa.us 198 [ - + ]:CBC 361572 : Assert(queryDesc->sourceText != NULL);
3359 rhaas@postgresql.org 199 : 361572 : estate->es_sourceText = queryDesc->sourceText;
200 : :
201 : : /*
202 : : * Fill in the query environment, if any, from queryDesc.
203 : : */
3322 kgrittn@postgresql.o 204 : 361572 : estate->es_queryEnv = queryDesc->queryEnv;
205 : :
206 : : /*
207 : : * If non-read-only query, set the command ID to mark output tuples with
208 : : */
6731 tgl@sss.pgh.pa.us 209 [ + + - ]: 361572 : switch (queryDesc->operation)
210 : : {
211 : 288635 : case CMD_SELECT:
212 : :
213 : : /*
214 : : * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
215 : : * tuples
216 : : */
5160 217 [ + + ]: 288635 : if (queryDesc->plannedstmt->rowMarks != NIL ||
5548 218 [ + + ]: 282675 : queryDesc->plannedstmt->hasModifyingCTE)
6731 219 : 6056 : estate->es_output_cid = GetCurrentCommandId(true);
220 : :
221 : : /*
222 : : * A SELECT without modifying CTEs can't possibly queue triggers,
223 : : * so force skip-triggers mode. This is just a marginal efficiency
224 : : * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
225 : : * all that expensive, but we might as well do it.
226 : : */
5546 227 [ + + ]: 288635 : if (!queryDesc->plannedstmt->hasModifyingCTE)
228 : 288535 : eflags |= EXEC_FLAG_SKIP_TRIGGERS;
6731 229 : 288635 : break;
230 : :
231 : 72937 : case CMD_INSERT:
232 : : case CMD_DELETE:
233 : : case CMD_UPDATE:
234 : : case CMD_MERGE:
235 : 72937 : estate->es_output_cid = GetCurrentCommandId(true);
236 : 72937 : break;
237 : :
6731 tgl@sss.pgh.pa.us 238 :UBC 0 : default:
239 [ # # ]: 0 : elog(ERROR, "unrecognized operation code: %d",
240 : : (int) queryDesc->operation);
241 : : break;
242 : : }
243 : :
244 : : /*
245 : : * Copy other important information into the EState
246 : : */
6567 alvherre@alvh.no-ip. 247 :CBC 361572 : estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
248 : 361572 : estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
5546 tgl@sss.pgh.pa.us 249 : 361572 : estate->es_top_eflags = eflags;
5985 rhaas@postgresql.org 250 : 361572 : estate->es_instrument = queryDesc->instrument_options;
2963 tgl@sss.pgh.pa.us 251 : 361572 : estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
252 : :
253 : : /*
254 : : * Set up query-level instrumentation if extensions have requested it via
255 : : * query_instr_options. Ensure an extension has not allocated query_instr
256 : : * itself.
257 : : */
27 andres@anarazel.de 258 [ - + ]:GNC 361572 : Assert(queryDesc->query_instr == NULL);
259 [ + + ]: 361572 : if (queryDesc->query_instr_options)
260 : 42766 : queryDesc->query_instr = InstrAlloc(queryDesc->query_instr_options);
261 : :
262 : : /*
263 : : * Set up an AFTER-trigger statement context, unless told not to, or
264 : : * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
265 : : */
5546 tgl@sss.pgh.pa.us 266 [ + + ]:CBC 361572 : if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
267 : 71968 : AfterTriggerBeginQuery();
268 : :
269 : : /*
270 : : * Initialize the plan state tree
271 : : */
3153 272 : 361572 : InitPlan(queryDesc, eflags);
273 : :
8542 274 : 360406 : MemoryContextSwitchTo(oldcontext);
10892 scrappy@hub.org 275 : 360406 : }
276 : :
277 : : /* ----------------------------------------------------------------
278 : : * ExecutorRun
279 : : *
280 : : * This is the main routine of the executor module. It accepts
281 : : * the query descriptor from the traffic cop and executes the
282 : : * query plan.
283 : : *
284 : : * ExecutorStart must have been called already.
285 : : *
286 : : * If direction is NoMovementScanDirection then nothing is done
287 : : * except to start up/shut down the destination. Otherwise,
288 : : * we retrieve up to 'count' tuples in the specified direction.
289 : : *
290 : : * Note: count = 0 is interpreted as no portal limit, i.e., run to
291 : : * completion. Also note that the count limit is only applied to
292 : : * retrieved tuples, not for instance to those inserted/updated/deleted
293 : : * by a ModifyTable plan node.
294 : : *
295 : : * There is no return value, but output tuples (if any) are sent to
296 : : * the destination receiver specified in the QueryDesc; and the number
297 : : * of tuples processed at the top level can be found in
298 : : * estate->es_processed. The total number of tuples processed in all
299 : : * the ExecutorRun calls can be found in estate->es_total_processed.
300 : : *
301 : : * We provide a function hook variable that lets loadable plugins
302 : : * get control when ExecutorRun is called. Such a plugin would
303 : : * normally call standard_ExecutorRun().
304 : : *
305 : : * ----------------------------------------------------------------
306 : : */
307 : : void
8552 tgl@sss.pgh.pa.us 308 : 354502 : ExecutorRun(QueryDesc *queryDesc,
309 : : ScanDirection direction, uint64 count)
310 : : {
6500 311 [ + + ]: 354502 : if (ExecutorRun_hook)
512 312 : 60194 : (*ExecutorRun_hook) (queryDesc, direction, count);
313 : : else
314 : 294308 : standard_ExecutorRun(queryDesc, direction, count);
6500 315 : 338823 : }
316 : :
317 : : void
318 : 354502 : standard_ExecutorRun(QueryDesc *queryDesc,
319 : : ScanDirection direction, uint64 count)
320 : : {
321 : : EState *estate;
322 : : CmdType operation;
323 : : DestReceiver *dest;
324 : : bool sendTuples;
325 : : MemoryContext oldcontext;
326 : :
327 : : /* sanity checks */
8542 328 [ - + ]: 354502 : Assert(queryDesc != NULL);
329 : :
330 : 354502 : estate = queryDesc->estate;
331 : :
332 [ - + ]: 354502 : Assert(estate != NULL);
5546 333 [ - + ]: 354502 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
334 : :
335 : : /* caller must ensure the query's snapshot is active */
782 heikki.linnakangas@i 336 [ - + ]: 354502 : Assert(GetActiveSnapshot() == estate->es_snapshot);
337 : :
338 : : /*
339 : : * Switch into per-query memory context
340 : : */
8542 tgl@sss.pgh.pa.us 341 : 354502 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
342 : :
343 : : /* Allow instrumentation of Executor overall runtime */
27 andres@anarazel.de 344 [ + + ]:GNC 354502 : if (queryDesc->query_instr)
345 : 42453 : InstrStart(queryDesc->query_instr);
346 : :
347 : : /*
348 : : * extract information from the query descriptor and the query feature.
349 : : */
10467 bruce@momjian.us 350 :CBC 354502 : operation = queryDesc->operation;
351 : 354502 : dest = queryDesc->dest;
352 : :
353 : : /*
354 : : * startup tuple receiver, if we will be emitting tuples
355 : : */
8833 tgl@sss.pgh.pa.us 356 : 354502 : estate->es_processed = 0;
357 : :
7206 358 [ + + ]: 426086 : sendTuples = (operation == CMD_SELECT ||
6051 359 [ + + ]: 71584 : queryDesc->plannedstmt->hasReturning);
360 : :
7206 361 [ + + ]: 354502 : if (sendTuples)
3162 peter_e@gmx.net 362 : 286185 : dest->rStartup(dest, operation, queryDesc->tupDesc);
363 : :
364 : : /*
365 : : * Run plan, unless direction is NoMovement.
366 : : *
367 : : * Note: pquery.c selects NoMovement if a prior call already reached
368 : : * end-of-data in the user-specified fetch direction. This is important
369 : : * because various parts of the executor can misbehave if called again
370 : : * after reporting EOF. For example, heapam.c would actually restart a
371 : : * heapscan and return all its data afresh. There is also some doubt
372 : : * about whether a parallel plan would operate properly if an additional,
373 : : * necessarily non-parallel execution request occurs after completing a
374 : : * parallel execution. (That case should work, but it's untested.)
375 : : */
6395 tgl@sss.pgh.pa.us 376 [ + + ]: 354477 : if (!ScanDirectionIsNoMovement(direction))
512 377 : 353670 : ExecutePlan(queryDesc,
378 : : operation,
379 : : sendTuples,
380 : : count,
381 : : direction,
382 : : dest);
383 : :
384 : : /*
385 : : * Update es_total_processed to keep track of the number of tuples
386 : : * processed across multiple ExecutorRun() calls.
387 : : */
1125 michael@paquier.xyz 388 : 338823 : estate->es_total_processed += estate->es_processed;
389 : :
390 : : /*
391 : : * shutdown tuple receiver, if we started it
392 : : */
7206 tgl@sss.pgh.pa.us 393 [ + + ]: 338823 : if (sendTuples)
3162 peter_e@gmx.net 394 : 272668 : dest->rShutdown(dest);
395 : :
27 andres@anarazel.de 396 [ + + ]:GNC 338823 : if (queryDesc->query_instr)
397 : 40964 : InstrStop(queryDesc->query_instr);
398 : :
8542 tgl@sss.pgh.pa.us 399 :CBC 338823 : MemoryContextSwitchTo(oldcontext);
10892 scrappy@hub.org 400 : 338823 : }
401 : :
402 : : /* ----------------------------------------------------------------
403 : : * ExecutorFinish
404 : : *
405 : : * This routine must be called after the last ExecutorRun call.
406 : : * It performs cleanup such as firing AFTER triggers. It is
407 : : * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
408 : : * include these actions in the total runtime.
409 : : *
410 : : * We provide a function hook variable that lets loadable plugins
411 : : * get control when ExecutorFinish is called. Such a plugin would
412 : : * normally call standard_ExecutorFinish().
413 : : *
414 : : * ----------------------------------------------------------------
415 : : */
416 : : void
5546 tgl@sss.pgh.pa.us 417 : 329288 : ExecutorFinish(QueryDesc *queryDesc)
418 : : {
419 [ + + ]: 329288 : if (ExecutorFinish_hook)
420 : 54709 : (*ExecutorFinish_hook) (queryDesc);
421 : : else
422 : 274579 : standard_ExecutorFinish(queryDesc);
423 : 328491 : }
424 : :
425 : : void
426 : 329288 : standard_ExecutorFinish(QueryDesc *queryDesc)
427 : : {
428 : : EState *estate;
429 : : MemoryContext oldcontext;
430 : :
431 : : /* sanity checks */
432 [ - + ]: 329288 : Assert(queryDesc != NULL);
433 : :
434 : 329288 : estate = queryDesc->estate;
435 : :
436 [ - + ]: 329288 : Assert(estate != NULL);
437 [ - + ]: 329288 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
438 : :
439 : : /* This should be run once and only once per Executor instance */
348 amitlan@postgresql.o 440 [ - + ]: 329288 : Assert(!estate->es_finished);
441 : :
442 : : /* Switch into per-query memory context */
5546 tgl@sss.pgh.pa.us 443 : 329288 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
444 : :
445 : : /* Allow instrumentation of Executor overall runtime */
27 andres@anarazel.de 446 [ + + ]:GNC 329288 : if (queryDesc->query_instr)
447 : 40964 : InstrStart(queryDesc->query_instr);
448 : :
449 : : /* Run ModifyTable nodes to completion */
5546 tgl@sss.pgh.pa.us 450 :CBC 329288 : ExecPostprocessPlan(estate);
451 : :
452 : : /* Execute queued AFTER triggers, unless told not to */
453 [ + + ]: 329288 : if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
454 : 69000 : AfterTriggerEndQuery(estate);
455 : :
27 andres@anarazel.de 456 [ + + ]:GNC 328491 : if (queryDesc->query_instr)
457 : 40788 : InstrStop(queryDesc->query_instr);
458 : :
5546 tgl@sss.pgh.pa.us 459 :CBC 328491 : MemoryContextSwitchTo(oldcontext);
460 : :
461 : 328491 : estate->es_finished = true;
462 : 328491 : }
463 : :
464 : : /* ----------------------------------------------------------------
465 : : * ExecutorEnd
466 : : *
467 : : * This routine must be called at the end of execution of any
468 : : * query plan
469 : : *
470 : : * We provide a function hook variable that lets loadable plugins
471 : : * get control when ExecutorEnd is called. Such a plugin would
472 : : * normally call standard_ExecutorEnd().
473 : : *
474 : : * ----------------------------------------------------------------
475 : : */
476 : : void
8552 477 : 342580 : ExecutorEnd(QueryDesc *queryDesc)
478 : : {
6376 479 [ + + ]: 342580 : if (ExecutorEnd_hook)
480 : 57626 : (*ExecutorEnd_hook) (queryDesc);
481 : : else
482 : 284954 : standard_ExecutorEnd(queryDesc);
483 : 342580 : }
484 : :
485 : : void
486 : 342580 : standard_ExecutorEnd(QueryDesc *queryDesc)
487 : : {
488 : : EState *estate;
489 : : MemoryContext oldcontext;
490 : :
491 : : /* sanity checks */
10467 bruce@momjian.us 492 [ - + ]: 342580 : Assert(queryDesc != NULL);
493 : :
8552 tgl@sss.pgh.pa.us 494 : 342580 : estate = queryDesc->estate;
495 : :
8542 496 [ - + ]: 342580 : Assert(estate != NULL);
497 : :
540 michael@paquier.xyz 498 [ + + ]: 342580 : if (estate->es_parallel_workers_to_launch > 0)
499 : 500 : pgstat_update_parallel_workers_stats((PgStat_Counter) estate->es_parallel_workers_to_launch,
500 : 500 : (PgStat_Counter) estate->es_parallel_workers_launched);
501 : :
502 : : /*
503 : : * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
504 : : * Assert is needed because ExecutorFinish is new as of 9.1, and callers
505 : : * might forget to call it.
506 : : */
348 amitlan@postgresql.o 507 [ + + - + ]: 342580 : Assert(estate->es_finished ||
508 : : (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
509 : :
510 : : /*
511 : : * Switch into per-query memory context to run ExecEndPlan
512 : : */
8542 tgl@sss.pgh.pa.us 513 : 342580 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
514 : :
515 : 342580 : ExecEndPlan(queryDesc->planstate, estate);
516 : :
517 : : /* do away with our snapshots */
6567 alvherre@alvh.no-ip. 518 : 342580 : UnregisterSnapshot(estate->es_snapshot);
519 : 342580 : UnregisterSnapshot(estate->es_crosscheck_snapshot);
520 : :
521 : : /*
522 : : * Must switch out of context before destroying it
523 : : */
8542 tgl@sss.pgh.pa.us 524 : 342580 : MemoryContextSwitchTo(oldcontext);
525 : :
526 : : /*
527 : : * Release EState and per-query memory context. This should release
528 : : * everything the executor has allocated.
529 : : */
530 : 342580 : FreeExecutorState(estate);
531 : :
532 : : /* Reset queryDesc fields that no longer point to anything */
533 : 342580 : queryDesc->tupDesc = NULL;
534 : 342580 : queryDesc->estate = NULL;
535 : 342580 : queryDesc->planstate = NULL;
27 andres@anarazel.de 536 :GNC 342580 : queryDesc->query_instr = NULL;
9553 tgl@sss.pgh.pa.us 537 :CBC 342580 : }
538 : :
539 : : /* ----------------------------------------------------------------
540 : : * ExecutorRewind
541 : : *
542 : : * This routine may be called on an open queryDesc to rewind it
543 : : * to the start.
544 : : * ----------------------------------------------------------------
545 : : */
546 : : void
8456 547 : 63 : ExecutorRewind(QueryDesc *queryDesc)
548 : : {
549 : : EState *estate;
550 : : MemoryContext oldcontext;
551 : :
552 : : /* sanity checks */
553 [ - + ]: 63 : Assert(queryDesc != NULL);
554 : :
555 : 63 : estate = queryDesc->estate;
556 : :
557 [ - + ]: 63 : Assert(estate != NULL);
558 : :
559 : : /* It's probably not sensible to rescan updating queries */
560 [ - + ]: 63 : Assert(queryDesc->operation == CMD_SELECT);
561 : :
562 : : /*
563 : : * Switch into per-query memory context
564 : : */
565 : 63 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
566 : :
567 : : /*
568 : : * rescan plan
569 : : */
5776 570 : 63 : ExecReScan(queryDesc->planstate);
571 : :
8456 572 : 63 : MemoryContextSwitchTo(oldcontext);
573 : 63 : }
574 : :
575 : :
576 : : /*
577 : : * ExecCheckPermissions
578 : : * Check access permissions of relations mentioned in a query
579 : : *
580 : : * Returns true if permissions are adequate. Otherwise, throws an appropriate
581 : : * error if ereport_on_violation is true, or simply returns false otherwise.
582 : : *
583 : : * Note that this does NOT address row-level security policies (aka: RLS). If
584 : : * rows will be returned to the user as a result of this permission check
585 : : * passing, then RLS also needs to be consulted (and check_enable_rls()).
586 : : *
587 : : * See rewrite/rowsecurity.c.
588 : : *
589 : : * NB: rangeTable is no longer used by us, but kept around for the hooks that
590 : : * might still want to look at the RTEs.
591 : : */
592 : : bool
1246 alvherre@alvh.no-ip. 593 : 368603 : ExecCheckPermissions(List *rangeTable, List *rteperminfos,
594 : : bool ereport_on_violation)
595 : : {
596 : : ListCell *l;
5766 rhaas@postgresql.org 597 : 368603 : bool result = true;
598 : :
599 : : #ifdef USE_ASSERT_CHECKING
1097 alvherre@alvh.no-ip. 600 : 368603 : Bitmapset *indexset = NULL;
601 : :
602 : : /* Check that rteperminfos is consistent with rangeTable */
603 [ + - + + : 1122069 : foreach(l, rangeTable)
+ + ]
604 : : {
605 : 753466 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
606 : :
607 [ + + ]: 753466 : if (rte->perminfoindex != 0)
608 : : {
609 : : /* Sanity checks */
610 : :
611 : : /*
612 : : * Only relation RTEs and subquery RTEs that were once relation
613 : : * RTEs (views, property graphs) have their perminfoindex set.
614 : : */
1057 amitlan@postgresql.o 615 [ + + + - : 381679 : Assert(rte->rtekind == RTE_RELATION ||
+ + - + ]
616 : : (rte->rtekind == RTE_SUBQUERY &&
617 : : (rte->relkind == RELKIND_VIEW || rte->relkind == RELKIND_PROPGRAPH)));
618 : :
1097 alvherre@alvh.no-ip. 619 : 381679 : (void) getRTEPermissionInfo(rteperminfos, rte);
620 : : /* Many-to-one mapping not allowed */
621 [ - + ]: 381679 : Assert(!bms_is_member(rte->perminfoindex, indexset));
622 : 381679 : indexset = bms_add_member(indexset, rte->perminfoindex);
623 : : }
624 : : }
625 : :
626 : : /* All rteperminfos are referenced */
627 [ - + ]: 368603 : Assert(bms_num_members(indexset) == list_length(rteperminfos));
628 : : #endif
629 : :
1246 630 [ + + + + : 749185 : foreach(l, rteperminfos)
+ + ]
631 : : {
632 : 381499 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
633 : :
634 [ - + ]: 381499 : Assert(OidIsValid(perminfo->relid));
635 : 381499 : result = ExecCheckOneRelPerms(perminfo);
5766 rhaas@postgresql.org 636 [ + + ]: 381499 : if (!result)
637 : : {
638 [ + + ]: 917 : if (ereport_on_violation)
1246 alvherre@alvh.no-ip. 639 : 909 : aclcheck_error(ACLCHECK_NO_PRIV,
640 : 909 : get_relkind_objtype(get_rel_relkind(perminfo->relid)),
641 : 909 : get_rel_name(perminfo->relid));
5766 rhaas@postgresql.org 642 : 8 : return false;
643 : : }
644 : : }
645 : :
5779 646 [ + + ]: 367686 : if (ExecutorCheckPerms_hook)
1246 alvherre@alvh.no-ip. 647 : 6 : result = (*ExecutorCheckPerms_hook) (rangeTable, rteperminfos,
648 : : ereport_on_violation);
5766 rhaas@postgresql.org 649 : 367686 : return result;
650 : : }
651 : :
652 : : /*
653 : : * ExecCheckOneRelPerms
654 : : * Check access permissions for a single relation.
655 : : */
656 : : bool
1246 alvherre@alvh.no-ip. 657 : 398347 : ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
658 : : {
659 : : AclMode requiredPerms;
660 : : AclMode relPerms;
661 : : AclMode remainingPerms;
662 : : Oid userid;
663 : 398347 : Oid relOid = perminfo->relid;
664 : :
665 : 398347 : requiredPerms = perminfo->requiredPerms;
666 [ - + ]: 398347 : Assert(requiredPerms != 0);
667 : :
668 : : /*
669 : : * userid to check as: current user unless we have a setuid indication.
670 : : *
671 : : * Note: GetUserId() is presently fast enough that there's no harm in
672 : : * calling it separately for each relation. If that stops being true, we
673 : : * could call it once in ExecCheckPermissions and pass the userid down
674 : : * from there. But for now, no need for the extra clutter.
675 : : */
676 : 796694 : userid = OidIsValid(perminfo->checkAsUser) ?
677 [ + + ]: 398347 : perminfo->checkAsUser : GetUserId();
678 : :
679 : : /*
680 : : * We must have *all* the requiredPerms bits, but some of the bits can be
681 : : * satisfied from column-level rather than relation-level permissions.
682 : : * First, remove any bits that are satisfied by relation permissions.
683 : : */
6312 tgl@sss.pgh.pa.us 684 : 398347 : relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
685 : 398347 : remainingPerms = requiredPerms & ~relPerms;
686 [ + + ]: 398347 : if (remainingPerms != 0)
687 : : {
4015 andres@anarazel.de 688 : 2052 : int col = -1;
689 : :
690 : : /*
691 : : * If we lack any permissions that exist only as relation permissions,
692 : : * we can fail straight away.
693 : : */
6312 tgl@sss.pgh.pa.us 694 [ + + ]: 2052 : if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
5766 rhaas@postgresql.org 695 : 104 : return false;
696 : :
697 : : /*
698 : : * Check to see if we have the needed privileges at column level.
699 : : *
700 : : * Note: failures just report a table-level error; it would be nicer
701 : : * to report a column-level error if we have some but not all of the
702 : : * column privileges.
703 : : */
6312 tgl@sss.pgh.pa.us 704 [ + + ]: 1948 : if (remainingPerms & ACL_SELECT)
705 : : {
706 : : /*
707 : : * When the query doesn't explicitly reference any columns (for
708 : : * example, SELECT COUNT(*) FROM table), allow the query if we
709 : : * have SELECT on any column of the rel, as per SQL spec.
710 : : */
1246 alvherre@alvh.no-ip. 711 [ + + ]: 1104 : if (bms_is_empty(perminfo->selectedCols))
712 : : {
6312 tgl@sss.pgh.pa.us 713 [ + + ]: 65 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
714 : : ACLMASK_ANY) != ACLCHECK_OK)
5766 rhaas@postgresql.org 715 : 25 : return false;
716 : : }
717 : :
1246 alvherre@alvh.no-ip. 718 [ + + ]: 1762 : while ((col = bms_next_member(perminfo->selectedCols, col)) >= 0)
719 : : {
720 : : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
4176 tgl@sss.pgh.pa.us 721 : 1362 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
722 : :
723 [ + + ]: 1362 : if (attno == InvalidAttrNumber)
724 : : {
725 : : /* Whole-row reference, must have priv on all cols */
6312 726 [ + + ]: 44 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
727 : : ACLMASK_ALL) != ACLCHECK_OK)
5766 rhaas@postgresql.org 728 : 28 : return false;
729 : : }
730 : : else
731 : : {
4176 tgl@sss.pgh.pa.us 732 [ + + ]: 1318 : if (pg_attribute_aclcheck(relOid, attno, userid,
733 : : ACL_SELECT) != ACLCHECK_OK)
5766 rhaas@postgresql.org 734 : 651 : return false;
735 : : }
736 : : }
737 : : }
738 : :
739 : : /*
740 : : * Basically the same for the mod columns, for both INSERT and UPDATE
741 : : * privilege as specified by remainingPerms.
742 : : */
1246 alvherre@alvh.no-ip. 743 [ + + ]: 1244 : if (remainingPerms & ACL_INSERT &&
744 [ + + ]: 220 : !ExecCheckPermissionsModified(relOid,
745 : : userid,
746 : : perminfo->insertedCols,
747 : : ACL_INSERT))
4015 andres@anarazel.de 748 : 116 : return false;
749 : :
1246 alvherre@alvh.no-ip. 750 [ + + ]: 1128 : if (remainingPerms & ACL_UPDATE &&
751 [ + + ]: 825 : !ExecCheckPermissionsModified(relOid,
752 : : userid,
753 : : perminfo->updatedCols,
754 : : ACL_UPDATE))
4015 andres@anarazel.de 755 : 260 : return false;
756 : : }
757 : 397163 : return true;
758 : : }
759 : :
760 : : /*
761 : : * ExecCheckPermissionsModified
762 : : * Check INSERT or UPDATE access permissions for a single relation (these
763 : : * are processed uniformly).
764 : : */
765 : : static bool
1246 alvherre@alvh.no-ip. 766 : 1045 : ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
767 : : AclMode requiredPerms)
768 : : {
4015 andres@anarazel.de 769 : 1045 : int col = -1;
770 : :
771 : : /*
772 : : * When the query doesn't explicitly update any columns, allow the query
773 : : * if we have permission on any column of the rel. This is to handle
774 : : * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
775 : : */
776 [ + + ]: 1045 : if (bms_is_empty(modifiedCols))
777 : : {
778 [ + + ]: 49 : if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
779 : : ACLMASK_ANY) != ACLCHECK_OK)
780 : 36 : return false;
781 : : }
782 : :
783 [ + + ]: 1772 : while ((col = bms_next_member(modifiedCols, col)) >= 0)
784 : : {
785 : : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
786 : 1103 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
787 : :
788 [ - + ]: 1103 : if (attno == InvalidAttrNumber)
789 : : {
790 : : /* whole-row reference can't happen here */
4015 andres@anarazel.de 791 [ # # ]:UBC 0 : elog(ERROR, "whole-row update is not implemented");
792 : : }
793 : : else
794 : : {
4015 andres@anarazel.de 795 [ + + ]:CBC 1103 : if (pg_attribute_aclcheck(relOid, attno, userid,
796 : : requiredPerms) != ACLCHECK_OK)
797 : 340 : return false;
798 : : }
799 : : }
5766 rhaas@postgresql.org 800 : 669 : return true;
801 : : }
802 : :
803 : : /*
804 : : * Check that the query does not imply any writes to non-temp tables;
805 : : * unless we're in parallel mode, in which case don't even allow writes
806 : : * to temp tables.
807 : : *
808 : : * Note: in a Hot Standby this would need to reject writes to temp
809 : : * tables just as we do in parallel mode; but an HS standby can't have created
810 : : * any temp tables in the first place, so no need to check that.
811 : : */
812 : : static void
6746 bruce@momjian.us 813 : 38944 : ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
814 : : {
815 : : ListCell *l;
816 : :
817 : : /*
818 : : * Fail if write permissions are requested in parallel mode for table
819 : : * (temp or non-temp), otherwise fail for any non-temp table.
820 : : */
1246 alvherre@alvh.no-ip. 821 [ + + + + : 112386 : foreach(l, plannedstmt->permInfos)
+ + ]
822 : : {
823 : 73460 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
824 : :
825 [ + + ]: 73460 : if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
8147 tgl@sss.pgh.pa.us 826 : 73434 : continue;
827 : :
1246 alvherre@alvh.no-ip. 828 [ + + ]: 26 : if (isTempNamespace(get_rel_namespace(perminfo->relid)))
8147 tgl@sss.pgh.pa.us 829 : 8 : continue;
830 : :
2255 alvherre@alvh.no-ip. 831 : 18 : PreventCommandIfReadOnly(CreateCommandName((Node *) plannedstmt));
832 : : }
833 : :
4023 rhaas@postgresql.org 834 [ + + - + ]: 38926 : if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
2255 alvherre@alvh.no-ip. 835 : 8 : PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
8516 peter_e@gmx.net 836 : 38926 : }
837 : :
838 : :
839 : : /* ----------------------------------------------------------------
840 : : * InitPlan
841 : : *
842 : : * Initializes the query plan: open files, allocate storage
843 : : * and start up the rule manager
844 : : * ----------------------------------------------------------------
845 : : */
846 : : static void
7371 tgl@sss.pgh.pa.us 847 : 361572 : InitPlan(QueryDesc *queryDesc, int eflags)
848 : : {
8552 849 : 361572 : CmdType operation = queryDesc->operation;
7014 850 : 361572 : PlannedStmt *plannedstmt = queryDesc->plannedstmt;
851 : 361572 : Plan *plan = plannedstmt->planTree;
852 : 361572 : List *rangeTable = plannedstmt->rtable;
8310 bruce@momjian.us 853 : 361572 : EState *estate = queryDesc->estate;
854 : : PlanState *planstate;
855 : : TupleDesc tupType;
856 : : ListCell *l;
857 : : int i;
858 : :
859 : : /*
860 : : * Do permissions checks
861 : : */
1246 alvherre@alvh.no-ip. 862 : 361572 : ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
863 : :
864 : : /*
865 : : * initialize the node's execution state
866 : : */
452 amitlan@postgresql.o 867 : 360719 : ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos,
868 : 360719 : bms_copy(plannedstmt->unprunableRelids));
869 : :
6035 tgl@sss.pgh.pa.us 870 : 360719 : estate->es_plannedstmt = plannedstmt;
460 amitlan@postgresql.o 871 : 360719 : estate->es_part_prune_infos = plannedstmt->partPruneInfos;
872 : :
873 : : /*
874 : : * Perform runtime "initial" pruning to identify which child subplans,
875 : : * corresponding to the children of plan nodes that contain
876 : : * PartitionPruneInfo such as Append, will not be executed. The results,
877 : : * which are bitmapsets of indexes of the child subplans that will be
878 : : * executed, are saved in es_part_prune_results. These results correspond
879 : : * to each PartitionPruneInfo entry, and the es_part_prune_results list is
880 : : * parallel to es_part_prune_infos.
881 : : */
459 882 : 360719 : ExecDoInitialPruning(estate);
883 : :
884 : : /*
885 : : * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
886 : : */
2766 tgl@sss.pgh.pa.us 887 [ + + ]: 360719 : if (plannedstmt->rowMarks)
888 : : {
889 : 7363 : estate->es_rowmarks = (ExecRowMark **)
146 michael@paquier.xyz 890 :GNC 7363 : palloc0_array(ExecRowMark *, estate->es_range_table_size);
2766 tgl@sss.pgh.pa.us 891 [ + - + + :CBC 16964 : foreach(l, plannedstmt->rowMarks)
+ + ]
892 : : {
893 : 9605 : PlanRowMark *rc = (PlanRowMark *) lfirst(l);
109 amitlan@postgresql.o 894 : 9605 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
895 : : Oid relid;
896 : : Relation relation;
897 : : ExecRowMark *erm;
898 : :
899 : : /* ignore "parent" rowmarks; they are irrelevant at runtime */
900 [ + + ]: 9605 : if (rc->isParent)
901 : 1278 : continue;
902 : :
903 : : /*
904 : : * Also ignore rowmarks belonging to child tables that have been
905 : : * pruned in ExecDoInitialPruning().
906 : : */
907 [ + + ]: 8327 : if (rte->rtekind == RTE_RELATION &&
452 908 [ + + ]: 7950 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
2766 tgl@sss.pgh.pa.us 909 : 48 : continue;
910 : :
911 : : /* get relation's OID (will produce InvalidOid if subquery) */
109 amitlan@postgresql.o 912 : 8279 : relid = rte->relid;
913 : :
914 : : /* open relation, if we need to access it for this mark type */
2766 tgl@sss.pgh.pa.us 915 [ + + - ]: 8279 : switch (rc->markType)
916 : : {
917 : 7792 : case ROW_MARK_EXCLUSIVE:
918 : : case ROW_MARK_NOKEYEXCLUSIVE:
919 : : case ROW_MARK_SHARE:
920 : : case ROW_MARK_KEYSHARE:
921 : : case ROW_MARK_REFERENCE:
412 amitlan@postgresql.o 922 : 7792 : relation = ExecGetRangeTableRelation(estate, rc->rti, false);
2766 tgl@sss.pgh.pa.us 923 : 7792 : break;
924 : 487 : case ROW_MARK_COPY:
925 : : /* no physical table access is required */
926 : 487 : relation = NULL;
927 : 487 : break;
2766 tgl@sss.pgh.pa.us 928 :UBC 0 : default:
929 [ # # ]: 0 : elog(ERROR, "unrecognized markType: %d", rc->markType);
930 : : relation = NULL; /* keep compiler quiet */
931 : : break;
932 : : }
933 : :
934 : : /* Check that relation is a legal target for marking */
2766 tgl@sss.pgh.pa.us 935 [ + + ]:CBC 8279 : if (relation)
936 : 7792 : CheckValidRowMarkRel(relation, rc->markType);
937 : :
146 michael@paquier.xyz 938 :GNC 8275 : erm = palloc_object(ExecRowMark);
2766 tgl@sss.pgh.pa.us 939 :CBC 8275 : erm->relation = relation;
940 : 8275 : erm->relid = relid;
941 : 8275 : erm->rti = rc->rti;
942 : 8275 : erm->prti = rc->prti;
943 : 8275 : erm->rowmarkId = rc->rowmarkId;
944 : 8275 : erm->markType = rc->markType;
945 : 8275 : erm->strength = rc->strength;
946 : 8275 : erm->waitPolicy = rc->waitPolicy;
947 : 8275 : erm->ermActive = false;
948 : 8275 : ItemPointerSetInvalid(&(erm->curCtid));
949 : 8275 : erm->ermExtra = NULL;
950 : :
951 [ + - + - : 8275 : Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
- + ]
952 : : estate->es_rowmarks[erm->rti - 1] == NULL);
953 : :
954 : 8275 : estate->es_rowmarks[erm->rti - 1] = erm;
955 : : }
956 : : }
957 : :
958 : : /*
959 : : * Initialize the executor's tuple table to empty.
960 : : */
6064 961 : 360715 : estate->es_tupleTable = NIL;
962 : :
963 : : /* signal that this EState is not used for EPQ */
2434 andres@anarazel.de 964 : 360715 : estate->es_epq_active = NULL;
965 : :
966 : : /*
967 : : * Initialize private state information for each SubPlan. We must do this
968 : : * before running ExecInitNode on the main query tree, since
969 : : * ExecInitSubPlan expects to be able to find these entries.
970 : : */
7007 tgl@sss.pgh.pa.us 971 [ - + ]: 360715 : Assert(estate->es_subplanstates == NIL);
972 : 360715 : i = 1; /* subplan indices count from 1 */
973 [ + + + + : 391613 : foreach(l, plannedstmt->subplans)
+ + ]
974 : : {
6746 bruce@momjian.us 975 : 30898 : Plan *subplan = (Plan *) lfirst(l);
976 : : PlanState *subplanstate;
977 : : int sp_eflags;
978 : :
979 : : /*
980 : : * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
981 : : * it is a parameterless subplan (not initplan), we suggest that it be
982 : : * prepared to handle REWIND efficiently; otherwise there is no need.
983 : : */
4567 kgrittn@postgresql.o 984 : 30898 : sp_eflags = eflags
985 : : & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
7007 tgl@sss.pgh.pa.us 986 [ + + ]: 30898 : if (bms_is_member(i, plannedstmt->rewindPlanIDs))
987 : 36 : sp_eflags |= EXEC_FLAG_REWIND;
988 : :
989 : 30898 : subplanstate = ExecInitNode(subplan, estate, sp_eflags);
990 : :
991 : 30898 : estate->es_subplanstates = lappend(estate->es_subplanstates,
992 : : subplanstate);
993 : :
994 : 30898 : i++;
995 : : }
996 : :
997 : : /*
998 : : * Initialize the private state information for all the nodes in the query
999 : : * tree. This opens files, allocates storage and leaves us ready to start
1000 : : * processing tuples.
1001 : : */
7371 1002 : 360715 : planstate = ExecInitNode(plan, estate, eflags);
1003 : :
1004 : : /*
1005 : : * Get the tuple descriptor describing the type of tuples to return.
1006 : : */
8401 1007 : 360406 : tupType = ExecGetResultType(planstate);
1008 : :
1009 : : /*
1010 : : * Initialize the junk filter if needed. SELECT queries need a filter if
1011 : : * there are any junk attrs in the top-level tlist.
1012 : : */
6051 1013 [ + + ]: 360406 : if (operation == CMD_SELECT)
1014 : : {
10108 bruce@momjian.us 1015 : 288173 : bool junk_filter_needed = false;
1016 : : ListCell *tlist;
1017 : :
6051 tgl@sss.pgh.pa.us 1018 [ + + + + : 1085164 : foreach(tlist, plan->targetlist)
+ + ]
1019 : : {
1020 : 811935 : TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1021 : :
1022 [ + + ]: 811935 : if (tle->resjunk)
1023 : : {
9684 1024 : 14944 : junk_filter_needed = true;
1025 : 14944 : break;
1026 : : }
1027 : : }
1028 : :
1029 [ + + ]: 288173 : if (junk_filter_needed)
1030 : : {
1031 : : JunkFilter *j;
1032 : : TupleTableSlot *slot;
1033 : :
2728 andres@anarazel.de 1034 : 14944 : slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
6051 tgl@sss.pgh.pa.us 1035 : 14944 : j = ExecInitJunkFilter(planstate->plan->targetlist,
1036 : : slot);
1037 : 14944 : estate->es_junkFilter = j;
1038 : :
1039 : : /* Want to return the cleaned tuple type */
1040 : 14944 : tupType = j->jf_cleanTupType;
1041 : : }
1042 : : }
1043 : :
8552 1044 : 360406 : queryDesc->tupDesc = tupType;
1045 : 360406 : queryDesc->planstate = planstate;
10892 scrappy@hub.org 1046 : 360406 : }
1047 : :
1048 : : /*
1049 : : * Check that a proposed result relation is a legal target for the operation
1050 : : *
1051 : : * Generally the parser and/or planner should have noticed any such mistake
1052 : : * already, but let's make sure.
1053 : : *
1054 : : * For INSERT ON CONFLICT, the result relation is required to support the
1055 : : * onConflictAction, regardless of whether a conflict actually occurs.
1056 : : *
1057 : : * For MERGE, mergeActions is the list of actions that may be performed. The
1058 : : * result relation is required to support every action, regardless of whether
1059 : : * or not they are all executed.
1060 : : *
1061 : : * Note: when changing this function, you probably also need to look at
1062 : : * CheckValidRowMarkRel.
1063 : : */
1064 : : void
796 dean.a.rasheed@gmail 1065 : 80581 : CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation,
1066 : : OnConflictAction onConflictAction, List *mergeActions)
1067 : : {
3162 rhaas@postgresql.org 1068 : 80581 : Relation resultRel = resultRelInfo->ri_RelationDesc;
1069 : : FdwRoutine *fdwroutine;
1070 : :
1071 : : /* Expect a fully-formed ResultRelInfo from InitResultRelInfo(). */
588 noah@leadboat.com 1072 [ - + ]: 80581 : Assert(resultRelInfo->ri_needLockTagTuple ==
1073 : : IsInplaceUpdateRelation(resultRel));
1074 : :
5548 tgl@sss.pgh.pa.us 1075 [ + - - + : 80581 : switch (resultRel->rd_rel->relkind)
+ + - - ]
1076 : : {
6838 1077 : 79883 : case RELKIND_RELATION:
1078 : : case RELKIND_PARTITIONED_TABLE:
1079 : :
1080 : : /*
1081 : : * For MERGE, check that the target relation supports each action.
1082 : : * For other operations, just check the operation itself.
1083 : : */
243 dean.a.rasheed@gmail 1084 [ + + ]: 79883 : if (operation == CMD_MERGE)
1085 [ + - + + : 4346 : foreach_node(MergeAction, action, mergeActions)
+ + ]
1086 : 2028 : CheckCmdReplicaIdentity(resultRel, action->commandType);
1087 : : else
1088 : 78716 : CheckCmdReplicaIdentity(resultRel, operation);
1089 : :
1090 : : /*
1091 : : * For INSERT ON CONFLICT DO UPDATE, additionally check that the
1092 : : * target relation supports UPDATE.
1093 : : */
1094 [ + + ]: 79668 : if (onConflictAction == ONCONFLICT_UPDATE)
1095 : 802 : CheckCmdReplicaIdentity(resultRel, CMD_UPDATE);
6838 tgl@sss.pgh.pa.us 1096 : 79660 : break;
9305 tgl@sss.pgh.pa.us 1097 :UBC 0 : case RELKIND_SEQUENCE:
8324 1098 [ # # ]: 0 : ereport(ERROR,
1099 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1100 : : errmsg("cannot change sequence \"%s\"",
1101 : : RelationGetRelationName(resultRel))));
1102 : : break;
9305 1103 : 0 : case RELKIND_TOASTVALUE:
8324 1104 [ # # ]: 0 : ereport(ERROR,
1105 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1106 : : errmsg("cannot change TOAST relation \"%s\"",
1107 : : RelationGetRelationName(resultRel))));
1108 : : break;
9305 tgl@sss.pgh.pa.us 1109 :CBC 277 : case RELKIND_VIEW:
1110 : :
1111 : : /*
1112 : : * Okay only if there's a suitable INSTEAD OF trigger. Otherwise,
1113 : : * complain, but omit errdetail because we haven't got the
1114 : : * information handy (and given that it really shouldn't happen,
1115 : : * it's not worth great exertion to get).
1116 : : */
796 dean.a.rasheed@gmail 1117 [ - + ]: 277 : if (!view_has_instead_trigger(resultRel, operation, mergeActions))
796 dean.a.rasheed@gmail 1118 :UBC 0 : error_view_not_updatable(resultRel, operation, mergeActions,
1119 : : NULL);
9305 tgl@sss.pgh.pa.us 1120 :CBC 277 : break;
4811 kgrittn@postgresql.o 1121 : 74 : case RELKIND_MATVIEW:
4676 1122 [ - + ]: 74 : if (!MatViewIncrementalMaintenanceIsEnabled())
4676 kgrittn@postgresql.o 1123 [ # # ]:UBC 0 : ereport(ERROR,
1124 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1125 : : errmsg("cannot change materialized view \"%s\"",
1126 : : RelationGetRelationName(resultRel))));
4811 kgrittn@postgresql.o 1127 :CBC 74 : break;
5603 rhaas@postgresql.org 1128 : 347 : case RELKIND_FOREIGN_TABLE:
1129 : : /* Okay only if the FDW supports it */
3162 1130 : 347 : fdwroutine = resultRelInfo->ri_FdwRoutine;
4804 tgl@sss.pgh.pa.us 1131 [ + + + - ]: 347 : switch (operation)
1132 : : {
1133 : 157 : case CMD_INSERT:
1134 [ + + ]: 157 : if (fdwroutine->ExecForeignInsert == NULL)
1135 [ + - ]: 5 : ereport(ERROR,
1136 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1137 : : errmsg("cannot insert into foreign table \"%s\"",
1138 : : RelationGetRelationName(resultRel))));
4710 1139 [ + - ]: 152 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1140 [ - + ]: 152 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
4710 tgl@sss.pgh.pa.us 1141 [ # # ]:UBC 0 : ereport(ERROR,
1142 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1143 : : errmsg("foreign table \"%s\" does not allow inserts",
1144 : : RelationGetRelationName(resultRel))));
4804 tgl@sss.pgh.pa.us 1145 :CBC 152 : break;
1146 : 107 : case CMD_UPDATE:
1147 [ + + ]: 107 : if (fdwroutine->ExecForeignUpdate == NULL)
1148 [ + - ]: 2 : ereport(ERROR,
1149 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1150 : : errmsg("cannot update foreign table \"%s\"",
1151 : : RelationGetRelationName(resultRel))));
4710 1152 [ + - ]: 105 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1153 [ - + ]: 105 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
4710 tgl@sss.pgh.pa.us 1154 [ # # ]:UBC 0 : ereport(ERROR,
1155 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1156 : : errmsg("foreign table \"%s\" does not allow updates",
1157 : : RelationGetRelationName(resultRel))));
4804 tgl@sss.pgh.pa.us 1158 :CBC 105 : break;
1159 : 83 : case CMD_DELETE:
1160 [ + + ]: 83 : if (fdwroutine->ExecForeignDelete == NULL)
1161 [ + - ]: 2 : ereport(ERROR,
1162 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1163 : : errmsg("cannot delete from foreign table \"%s\"",
1164 : : RelationGetRelationName(resultRel))));
4710 1165 [ + - ]: 81 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1166 [ - + ]: 81 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
4710 tgl@sss.pgh.pa.us 1167 [ # # ]:UBC 0 : ereport(ERROR,
1168 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1169 : : errmsg("foreign table \"%s\" does not allow deletes",
1170 : : RelationGetRelationName(resultRel))));
4804 tgl@sss.pgh.pa.us 1171 :CBC 81 : break;
4804 tgl@sss.pgh.pa.us 1172 :UBC 0 : default:
1173 [ # # ]: 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1174 : : break;
1175 : : }
5603 rhaas@postgresql.org 1176 :CBC 338 : break;
50 peter@eisentraut.org 1177 :UNC 0 : case RELKIND_PROPGRAPH:
1178 [ # # ]: 0 : ereport(ERROR,
1179 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1180 : : errmsg("cannot change property graph \"%s\"",
1181 : : RelationGetRelationName(resultRel))));
1182 : : break;
6838 tgl@sss.pgh.pa.us 1183 :UBC 0 : default:
1184 [ # # ]: 0 : ereport(ERROR,
1185 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1186 : : errmsg("cannot change relation \"%s\"",
1187 : : RelationGetRelationName(resultRel))));
1188 : : break;
1189 : : }
5548 tgl@sss.pgh.pa.us 1190 :CBC 80349 : }
1191 : :
1192 : : /*
1193 : : * Check that a proposed rowmark target relation is a legal target
1194 : : *
1195 : : * In most cases parser and/or planner should have noticed this already, but
1196 : : * they don't cover all cases.
1197 : : */
1198 : : static void
5451 1199 : 7792 : CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1200 : : {
1201 : : FdwRoutine *fdwroutine;
1202 : :
1203 [ + - - - : 7792 : switch (rel->rd_rel->relkind)
+ - - - ]
1204 : : {
1205 : 7784 : case RELKIND_RELATION:
1206 : : case RELKIND_PARTITIONED_TABLE:
1207 : : /* OK */
1208 : 7784 : break;
5451 tgl@sss.pgh.pa.us 1209 :UBC 0 : case RELKIND_SEQUENCE:
1210 : : /* Must disallow this because we don't vacuum sequences */
1211 [ # # ]: 0 : ereport(ERROR,
1212 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1213 : : errmsg("cannot lock rows in sequence \"%s\"",
1214 : : RelationGetRelationName(rel))));
1215 : : break;
1216 : 0 : case RELKIND_TOASTVALUE:
1217 : : /* We could allow this, but there seems no good reason to */
1218 [ # # ]: 0 : ereport(ERROR,
1219 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1220 : : errmsg("cannot lock rows in TOAST relation \"%s\"",
1221 : : RelationGetRelationName(rel))));
1222 : : break;
1223 : 0 : case RELKIND_VIEW:
1224 : : /* Should not get here; planner should have expanded the view */
1225 [ # # ]: 0 : ereport(ERROR,
1226 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1227 : : errmsg("cannot lock rows in view \"%s\"",
1228 : : RelationGetRelationName(rel))));
1229 : : break;
4811 kgrittn@postgresql.o 1230 :CBC 8 : case RELKIND_MATVIEW:
1231 : : /* Allow referencing a matview, but not actual locking clauses */
4443 tgl@sss.pgh.pa.us 1232 [ + + ]: 8 : if (markType != ROW_MARK_REFERENCE)
1233 [ + - ]: 4 : ereport(ERROR,
1234 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1235 : : errmsg("cannot lock rows in materialized view \"%s\"",
1236 : : RelationGetRelationName(rel))));
4811 kgrittn@postgresql.o 1237 : 4 : break;
5451 tgl@sss.pgh.pa.us 1238 :UBC 0 : case RELKIND_FOREIGN_TABLE:
1239 : : /* Okay only if the FDW supports it */
4011 1240 : 0 : fdwroutine = GetFdwRoutineForRelation(rel, false);
1241 [ # # ]: 0 : if (fdwroutine->RefetchForeignRow == NULL)
1242 [ # # ]: 0 : ereport(ERROR,
1243 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1244 : : errmsg("cannot lock rows in foreign table \"%s\"",
1245 : : RelationGetRelationName(rel))));
5451 1246 : 0 : break;
50 peter@eisentraut.org 1247 :UNC 0 : case RELKIND_PROPGRAPH:
1248 : : /* Should not get here; rewriter should have expanded the graph */
1249 [ # # ]: 0 : ereport(ERROR,
1250 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1251 : : errmsg_internal("cannot lock rows in property graph \"%s\"",
1252 : : RelationGetRelationName(rel))));
1253 : : break;
5451 tgl@sss.pgh.pa.us 1254 :UBC 0 : default:
1255 [ # # ]: 0 : ereport(ERROR,
1256 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1257 : : errmsg("cannot lock rows in relation \"%s\"",
1258 : : RelationGetRelationName(rel))));
1259 : : break;
1260 : : }
5451 tgl@sss.pgh.pa.us 1261 :CBC 7788 : }
1262 : :
1263 : : /*
1264 : : * Initialize ResultRelInfo data for one result relation
1265 : : *
1266 : : * Caution: before Postgres 9.1, this function included the relkind checking
1267 : : * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1268 : : * appropriate. Be sure callers cover those needs.
1269 : : */
1270 : : void
5548 1271 : 255278 : InitResultRelInfo(ResultRelInfo *resultRelInfo,
1272 : : Relation resultRelationDesc,
1273 : : Index resultRelationIndex,
1274 : : ResultRelInfo *partition_root_rri,
1275 : : int instrument_options)
1276 : : {
9305 1277 [ + - + - : 13274456 : MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
+ - + - +
+ ]
1278 : 255278 : resultRelInfo->type = T_ResultRelInfo;
1279 : 255278 : resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1280 : 255278 : resultRelInfo->ri_RelationDesc = resultRelationDesc;
1281 : 255278 : resultRelInfo->ri_NumIndices = 0;
1282 : 255278 : resultRelInfo->ri_IndexRelationDescs = NULL;
1283 : 255278 : resultRelInfo->ri_IndexRelationInfo = NULL;
588 noah@leadboat.com 1284 : 255278 : resultRelInfo->ri_needLockTagTuple =
1285 : 255278 : IsInplaceUpdateRelation(resultRelationDesc);
1286 : : /* make a copy so as not to depend on relcache info not changing... */
5548 tgl@sss.pgh.pa.us 1287 : 255278 : resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
7711 1288 [ + + ]: 255278 : if (resultRelInfo->ri_TrigDesc)
1289 : : {
7507 bruce@momjian.us 1290 : 12405 : int n = resultRelInfo->ri_TrigDesc->numtriggers;
1291 : :
7711 tgl@sss.pgh.pa.us 1292 : 12405 : resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
146 michael@paquier.xyz 1293 :GNC 12405 : palloc0_array(FmgrInfo, n);
3339 andres@anarazel.de 1294 :CBC 12405 : resultRelInfo->ri_TrigWhenExprs = (ExprState **)
146 michael@paquier.xyz 1295 :GNC 12405 : palloc0_array(ExprState *, n);
5985 rhaas@postgresql.org 1296 [ - + ]:CBC 12405 : if (instrument_options)
30 andres@anarazel.de 1297 :UNC 0 : resultRelInfo->ri_TrigInstrument = InstrAllocTrigger(n, instrument_options);
1298 : : }
1299 : : else
1300 : : {
7711 tgl@sss.pgh.pa.us 1301 :CBC 242873 : resultRelInfo->ri_TrigFunctions = NULL;
6010 1302 : 242873 : resultRelInfo->ri_TrigWhenExprs = NULL;
7711 1303 : 242873 : resultRelInfo->ri_TrigInstrument = NULL;
1304 : : }
4804 1305 [ + + ]: 255278 : if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1306 : 360 : resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1307 : : else
1308 : 254918 : resultRelInfo->ri_FdwRoutine = NULL;
1309 : :
1310 : : /* The following fields are set later if needed */
1861 1311 : 255278 : resultRelInfo->ri_RowIdAttNo = 0;
1216 1312 : 255278 : resultRelInfo->ri_extraUpdatedCols = NULL;
1861 1313 : 255278 : resultRelInfo->ri_projectNew = NULL;
1314 : 255278 : resultRelInfo->ri_newTupleSlot = NULL;
1315 : 255278 : resultRelInfo->ri_oldTupleSlot = NULL;
1855 1316 : 255278 : resultRelInfo->ri_projectNewInfoValid = false;
4804 1317 : 255278 : resultRelInfo->ri_FdwState = NULL;
3700 rhaas@postgresql.org 1318 : 255278 : resultRelInfo->ri_usesFdwDirectModify = false;
403 peter@eisentraut.org 1319 : 255278 : resultRelInfo->ri_CheckConstraintExprs = NULL;
1320 : 255278 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs = NULL;
1156 tgl@sss.pgh.pa.us 1321 : 255278 : resultRelInfo->ri_GeneratedExprsI = NULL;
1322 : 255278 : resultRelInfo->ri_GeneratedExprsU = NULL;
7206 1323 : 255278 : resultRelInfo->ri_projectReturning = NULL;
2962 alvherre@alvh.no-ip. 1324 : 255278 : resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1325 : 255278 : resultRelInfo->ri_onConflict = NULL;
34 peter@eisentraut.org 1326 :GNC 255278 : resultRelInfo->ri_forPortionOf = NULL;
2625 andres@anarazel.de 1327 :CBC 255278 : resultRelInfo->ri_ReturningSlot = NULL;
1328 : 255278 : resultRelInfo->ri_TrigOldSlot = NULL;
1329 : 255278 : resultRelInfo->ri_TrigNewSlot = NULL;
474 dean.a.rasheed@gmail 1330 : 255278 : resultRelInfo->ri_AllNullSlot = NULL;
766 1331 : 255278 : resultRelInfo->ri_MergeActions[MERGE_WHEN_MATCHED] = NIL;
1332 : 255278 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] = NIL;
1333 : 255278 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET] = NIL;
1334 : 255278 : resultRelInfo->ri_MergeJoinCondition = NULL;
1335 : :
1336 : : /*
1337 : : * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1338 : : * non-NULL partition_root_rri. For child relations that are part of the
1339 : : * initial query rather than being dynamically added by tuple routing,
1340 : : * this field is filled in ExecInitModifyTable().
1341 : : */
1912 heikki.linnakangas@i 1342 : 255278 : resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1343 : : /* Set by ExecGetRootToChildMap */
1250 alvherre@alvh.no-ip. 1344 : 255278 : resultRelInfo->ri_RootToChildMap = NULL;
1345 : 255278 : resultRelInfo->ri_RootToChildMapValid = false;
1346 : : /* Set by ExecInitRoutingInfo */
1347 : 255278 : resultRelInfo->ri_PartitionTupleSlot = NULL;
2024 heikki.linnakangas@i 1348 : 255278 : resultRelInfo->ri_ChildToRootMap = NULL;
1855 tgl@sss.pgh.pa.us 1349 : 255278 : resultRelInfo->ri_ChildToRootMapValid = false;
2588 andres@anarazel.de 1350 : 255278 : resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
9305 tgl@sss.pgh.pa.us 1351 : 255278 : }
1352 : :
1353 : : /*
1354 : : * ExecGetTriggerResultRel
1355 : : * Get a ResultRelInfo for a trigger target relation.
1356 : : *
1357 : : * Most of the time, triggers are fired on one of the result relations of the
1358 : : * query, and so we can just return a suitable one we already made and stored
1359 : : * in the es_opened_result_relations or es_tuple_routing_result_relations
1360 : : * Lists.
1361 : : *
1362 : : * However, it is sometimes necessary to fire triggers on other relations;
1363 : : * this happens mainly when an RI update trigger queues additional triggers
1364 : : * on other relations, which will be processed in the context of the outer
1365 : : * query. For efficiency's sake, we want to have a ResultRelInfo for those
1366 : : * triggers too; that can avoid repeated re-opening of the relation. (It
1367 : : * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1368 : : * triggers.) So we make additional ResultRelInfo's as needed, and save them
1369 : : * in es_trig_target_relations.
1370 : : */
1371 : : ResultRelInfo *
1507 alvherre@alvh.no-ip. 1372 : 5613 : ExecGetTriggerResultRel(EState *estate, Oid relid,
1373 : : ResultRelInfo *rootRelInfo)
1374 : : {
1375 : : ResultRelInfo *rInfo;
1376 : : ListCell *l;
1377 : : Relation rel;
1378 : : MemoryContext oldcontext;
1379 : :
1380 : : /*
1381 : : * Before creating a new ResultRelInfo, check if we've already made and
1382 : : * cached one for this relation. We must ensure that the given
1383 : : * 'rootRelInfo' matches the one stored in the cached ResultRelInfo as
1384 : : * trigger handling for partitions can result in mixed requirements for
1385 : : * what ri_RootResultRelInfo is set to.
1386 : : */
1387 : :
1388 : : /* Search through the query result relations */
2030 heikki.linnakangas@i 1389 [ + + + + : 7490 : foreach(l, estate->es_opened_result_relations)
+ + ]
1390 : : {
1391 : 6201 : rInfo = lfirst(l);
191 drowley@postgresql.o 1392 [ + + ]: 6201 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid &&
1393 [ + + ]: 4586 : rInfo->ri_RootResultRelInfo == rootRelInfo)
6838 tgl@sss.pgh.pa.us 1394 : 4324 : return rInfo;
1395 : : }
1396 : :
1397 : : /*
1398 : : * Search through the result relations that were created during tuple
1399 : : * routing, if any.
1400 : : */
3008 rhaas@postgresql.org 1401 [ + + + + : 2025 : foreach(l, estate->es_tuple_routing_result_relations)
+ + ]
1402 : : {
3182 1403 : 756 : rInfo = (ResultRelInfo *) lfirst(l);
191 drowley@postgresql.o 1404 [ + + ]: 756 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid &&
1405 [ + + ]: 491 : rInfo->ri_RootResultRelInfo == rootRelInfo)
3182 rhaas@postgresql.org 1406 : 20 : return rInfo;
1407 : : }
1408 : :
1409 : : /* Nope, but maybe we already made an extra ResultRelInfo for it */
6838 tgl@sss.pgh.pa.us 1410 [ + + + + : 1798 : foreach(l, estate->es_trig_target_relations)
+ + ]
1411 : : {
1412 : 541 : rInfo = (ResultRelInfo *) lfirst(l);
191 drowley@postgresql.o 1413 [ + + ]: 541 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid &&
1414 [ + + ]: 24 : rInfo->ri_RootResultRelInfo == rootRelInfo)
6838 tgl@sss.pgh.pa.us 1415 : 12 : return rInfo;
1416 : : }
1417 : : /* Nope, so we need a new one */
1418 : :
1419 : : /*
1420 : : * Open the target relation's relcache entry. We assume that an
1421 : : * appropriate lock is still held by the backend from whenever the trigger
1422 : : * event got queued, so we need take no new lock here. Also, we need not
1423 : : * recheck the relkind, so no need for CheckValidResultRel.
1424 : : */
2661 andres@anarazel.de 1425 : 1257 : rel = table_open(relid, NoLock);
1426 : :
1427 : : /*
1428 : : * Make the new entry in the right context.
1429 : : */
6838 tgl@sss.pgh.pa.us 1430 : 1257 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1431 : 1257 : rInfo = makeNode(ResultRelInfo);
6612 1432 : 1257 : InitResultRelInfo(rInfo,
1433 : : rel,
1434 : : 0, /* dummy rangetable index */
1435 : : rootRelInfo,
1436 : : estate->es_instrument);
6838 1437 : 1257 : estate->es_trig_target_relations =
1438 : 1257 : lappend(estate->es_trig_target_relations, rInfo);
1439 : 1257 : MemoryContextSwitchTo(oldcontext);
1440 : :
1441 : : /*
1442 : : * Currently, we don't need any index information in ResultRelInfos used
1443 : : * only for triggers, so no need to call ExecOpenIndices.
1444 : : */
1445 : :
1446 : 1257 : return rInfo;
1447 : : }
1448 : :
1449 : : /*
1450 : : * Return the ancestor relations of a given leaf partition result relation
1451 : : * up to and including the query's root target relation.
1452 : : *
1453 : : * These work much like the ones opened by ExecGetTriggerResultRel, except
1454 : : * that we need to keep them in a separate list.
1455 : : *
1456 : : * These are closed by ExecCloseResultRelations.
1457 : : */
1458 : : List *
1507 alvherre@alvh.no-ip. 1459 : 202 : ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
1460 : : {
1461 : 202 : ResultRelInfo *rootRelInfo = resultRelInfo->ri_RootResultRelInfo;
1462 : 202 : Relation partRel = resultRelInfo->ri_RelationDesc;
1463 : : Oid rootRelOid;
1464 : :
1465 [ - + ]: 202 : if (!partRel->rd_rel->relispartition)
1507 alvherre@alvh.no-ip. 1466 [ # # ]:UBC 0 : elog(ERROR, "cannot find ancestors of a non-partition result relation");
1507 alvherre@alvh.no-ip. 1467 [ - + ]:CBC 202 : Assert(rootRelInfo != NULL);
1468 : 202 : rootRelOid = RelationGetRelid(rootRelInfo->ri_RelationDesc);
1469 [ + + ]: 202 : if (resultRelInfo->ri_ancestorResultRels == NIL)
1470 : : {
1471 : : ListCell *lc;
1472 : 158 : List *oids = get_partition_ancestors(RelationGetRelid(partRel));
1473 : 158 : List *ancResultRels = NIL;
1474 : :
1475 [ + - + - : 202 : foreach(lc, oids)
+ - ]
1476 : : {
1477 : 202 : Oid ancOid = lfirst_oid(lc);
1478 : : Relation ancRel;
1479 : : ResultRelInfo *rInfo;
1480 : :
1481 : : /*
1482 : : * Ignore the root ancestor here, and use ri_RootResultRelInfo
1483 : : * (below) for it instead. Also, we stop climbing up the
1484 : : * hierarchy when we find the table that was mentioned in the
1485 : : * query.
1486 : : */
1487 [ + + ]: 202 : if (ancOid == rootRelOid)
1488 : 158 : break;
1489 : :
1490 : : /*
1491 : : * All ancestors up to the root target relation must have been
1492 : : * locked by the planner or AcquireExecutorLocks().
1493 : : */
1494 : 44 : ancRel = table_open(ancOid, NoLock);
1495 : 44 : rInfo = makeNode(ResultRelInfo);
1496 : :
1497 : : /* dummy rangetable index */
1498 : 44 : InitResultRelInfo(rInfo, ancRel, 0, NULL,
1499 : : estate->es_instrument);
1500 : 44 : ancResultRels = lappend(ancResultRels, rInfo);
1501 : : }
1502 : 158 : ancResultRels = lappend(ancResultRels, rootRelInfo);
1503 : 158 : resultRelInfo->ri_ancestorResultRels = ancResultRels;
1504 : : }
1505 : :
1506 : : /* We must have found some ancestor */
1507 [ - + ]: 202 : Assert(resultRelInfo->ri_ancestorResultRels != NIL);
1508 : :
1509 : 202 : return resultRelInfo->ri_ancestorResultRels;
1510 : : }
1511 : :
1512 : : /* ----------------------------------------------------------------
1513 : : * ExecPostprocessPlan
1514 : : *
1515 : : * Give plan nodes a final chance to execute before shutdown
1516 : : * ----------------------------------------------------------------
1517 : : */
1518 : : static void
5548 tgl@sss.pgh.pa.us 1519 : 329288 : ExecPostprocessPlan(EState *estate)
1520 : : {
1521 : : ListCell *lc;
1522 : :
1523 : : /*
1524 : : * Make sure nodes run forward.
1525 : : */
1526 : 329288 : estate->es_direction = ForwardScanDirection;
1527 : :
1528 : : /*
1529 : : * Run any secondary ModifyTable nodes to completion, in case the main
1530 : : * query did not fetch all rows from them. (We do this to ensure that
1531 : : * such nodes have predictable results.)
1532 : : */
1533 [ + + + + : 329925 : foreach(lc, estate->es_auxmodifytables)
+ + ]
1534 : : {
5504 bruce@momjian.us 1535 : 637 : PlanState *ps = (PlanState *) lfirst(lc);
1536 : :
1537 : : for (;;)
5548 tgl@sss.pgh.pa.us 1538 : 100 : {
1539 : : TupleTableSlot *slot;
1540 : :
1541 : : /* Reset the per-output-tuple exprcontext each time */
1542 [ + + ]: 737 : ResetPerTupleExprContext(estate);
1543 : :
1544 : 737 : slot = ExecProcNode(ps);
1545 : :
1546 [ + + + - ]: 737 : if (TupIsNull(slot))
1547 : : break;
1548 : : }
1549 : : }
1550 : 329288 : }
1551 : :
1552 : : /* ----------------------------------------------------------------
1553 : : * ExecEndPlan
1554 : : *
1555 : : * Cleans up the query plan -- closes files and frees up storage
1556 : : *
1557 : : * NOTE: we are no longer very worried about freeing storage per se
1558 : : * in this code; FreeExecutorState should be guaranteed to release all
1559 : : * memory that needs to be released. What we are worried about doing
1560 : : * is closing relations and dropping buffer pins. Thus, for example,
1561 : : * tuple tables must be cleared or dropped to ensure pins are released.
1562 : : * ----------------------------------------------------------------
1563 : : */
1564 : : static void
8306 bruce@momjian.us 1565 : 342580 : ExecEndPlan(PlanState *planstate, EState *estate)
1566 : : {
1567 : : ListCell *l;
1568 : :
1569 : : /*
1570 : : * shut down the node-type-specific query processing
1571 : : */
8552 tgl@sss.pgh.pa.us 1572 : 342580 : ExecEndNode(planstate);
1573 : :
1574 : : /*
1575 : : * for subplans too
1576 : : */
7007 1577 [ + + + + : 373064 : foreach(l, estate->es_subplanstates)
+ + ]
1578 : : {
6746 bruce@momjian.us 1579 : 30484 : PlanState *subplanstate = (PlanState *) lfirst(l);
1580 : :
7007 tgl@sss.pgh.pa.us 1581 : 30484 : ExecEndNode(subplanstate);
1582 : : }
1583 : :
1584 : : /*
1585 : : * destroy the executor's tuple table. Actually we only care about
1586 : : * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1587 : : * the TupleTableSlots, since the containing memory context is about to go
1588 : : * away anyway.
1589 : : */
6064 1590 : 342580 : ExecResetTupleTable(estate->es_tupleTable, false);
1591 : :
1592 : : /*
1593 : : * Close any Relations that have been opened for range table entries or
1594 : : * result relations.
1595 : : */
2030 heikki.linnakangas@i 1596 : 342580 : ExecCloseResultRelations(estate);
1597 : 342580 : ExecCloseRangeTableRelations(estate);
1598 : 342580 : }
1599 : :
1600 : : /*
1601 : : * Close any relations that have been opened for ResultRelInfos.
1602 : : */
1603 : : void
1604 : 343849 : ExecCloseResultRelations(EState *estate)
1605 : : {
1606 : : ListCell *l;
1607 : :
1608 : : /*
1609 : : * close indexes of result relation(s) if any. (Rels themselves are
1610 : : * closed in ExecCloseRangeTableRelations())
1611 : : *
1612 : : * In addition, close the stub RTs that may be in each resultrel's
1613 : : * ri_ancestorResultRels.
1614 : : */
1615 [ + + + + : 417143 : foreach(l, estate->es_opened_result_relations)
+ + ]
1616 : : {
1617 : 73294 : ResultRelInfo *resultRelInfo = lfirst(l);
1618 : : ListCell *lc;
1619 : :
9305 tgl@sss.pgh.pa.us 1620 : 73294 : ExecCloseIndices(resultRelInfo);
1507 alvherre@alvh.no-ip. 1621 [ + + + + : 73464 : foreach(lc, resultRelInfo->ri_ancestorResultRels)
+ + ]
1622 : : {
1623 : 170 : ResultRelInfo *rInfo = lfirst(lc);
1624 : :
1625 : : /*
1626 : : * Ancestors with RTI > 0 (should only be the root ancestor) are
1627 : : * closed by ExecCloseRangeTableRelations.
1628 : : */
1629 [ + + ]: 170 : if (rInfo->ri_RangeTableIndex > 0)
1630 : 138 : continue;
1631 : :
1632 : 32 : table_close(rInfo->ri_RelationDesc, NoLock);
1633 : : }
1634 : : }
1635 : :
1636 : : /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
2030 heikki.linnakangas@i 1637 [ + + + + : 344773 : foreach(l, estate->es_trig_target_relations)
+ + ]
1638 : : {
1639 : 924 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1640 : :
1641 : : /*
1642 : : * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1643 : : * might be issuing a duplicate close against a Relation opened by
1644 : : * ExecGetRangeTableRelation.
1645 : : */
1646 [ - + ]: 924 : Assert(resultRelInfo->ri_RangeTableIndex == 0);
1647 : :
1648 : : /*
1649 : : * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1650 : : * these rels, we needn't call ExecCloseIndices either.
1651 : : */
1652 [ - + ]: 924 : Assert(resultRelInfo->ri_NumIndices == 0);
1653 : :
1654 : 924 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1655 : : }
1656 : 343849 : }
1657 : :
1658 : : /*
1659 : : * Close all relations opened by ExecGetRangeTableRelation().
1660 : : *
1661 : : * We do not release any locks we might hold on those rels.
1662 : : */
1663 : : void
1664 : 343533 : ExecCloseRangeTableRelations(EState *estate)
1665 : : {
1666 : : int i;
1667 : :
1668 [ + + ]: 1065373 : for (i = 0; i < estate->es_range_table_size; i++)
1669 : : {
2770 tgl@sss.pgh.pa.us 1670 [ + + ]: 721840 : if (estate->es_relations[i])
2661 andres@anarazel.de 1671 : 357058 : table_close(estate->es_relations[i], NoLock);
1672 : : }
10892 scrappy@hub.org 1673 : 343533 : }
1674 : :
1675 : : /* ----------------------------------------------------------------
1676 : : * ExecutePlan
1677 : : *
1678 : : * Processes the query plan until we have retrieved 'numberTuples' tuples,
1679 : : * moving in the specified direction.
1680 : : *
1681 : : * Runs to completion if numberTuples is 0
1682 : : * ----------------------------------------------------------------
1683 : : */
1684 : : static void
512 tgl@sss.pgh.pa.us 1685 : 353670 : ExecutePlan(QueryDesc *queryDesc,
1686 : : CmdType operation,
1687 : : bool sendTuples,
1688 : : uint64 numberTuples,
1689 : : ScanDirection direction,
1690 : : DestReceiver *dest)
1691 : : {
1692 : 353670 : EState *estate = queryDesc->estate;
1693 : 353670 : PlanState *planstate = queryDesc->planstate;
1694 : : bool use_parallel_mode;
1695 : : TupleTableSlot *slot;
1696 : : uint64 current_tuple_count;
1697 : :
1698 : : /*
1699 : : * initialize local variables
1700 : : */
10467 bruce@momjian.us 1701 : 353670 : current_tuple_count = 0;
1702 : :
1703 : : /*
1704 : : * Set the direction.
1705 : : */
1706 : 353670 : estate->es_direction = direction;
1707 : :
1708 : : /*
1709 : : * Set up parallel mode if appropriate.
1710 : : *
1711 : : * Parallel mode only supports complete execution of a plan. If we've
1712 : : * already partially executed it, or if the caller asks us to exit early,
1713 : : * we must force the plan to run without parallelism.
1714 : : */
512 tgl@sss.pgh.pa.us 1715 [ + + + + ]: 353670 : if (queryDesc->already_executed || numberTuples != 0)
3854 rhaas@postgresql.org 1716 : 72675 : use_parallel_mode = false;
1717 : : else
512 tgl@sss.pgh.pa.us 1718 : 280995 : use_parallel_mode = queryDesc->plannedstmt->parallelModeNeeded;
1719 : 353670 : queryDesc->already_executed = true;
1720 : :
3112 rhaas@postgresql.org 1721 : 353670 : estate->es_use_parallel_mode = use_parallel_mode;
3854 1722 [ + + ]: 353670 : if (use_parallel_mode)
1723 : 508 : EnterParallelMode();
1724 : :
1725 : : /*
1726 : : * Loop until we've processed the proper number of tuples from the plan.
1727 : : */
1728 : : for (;;)
1729 : : {
1730 : : /* Reset the per-output-tuple exprcontext */
9234 tgl@sss.pgh.pa.us 1731 [ + + ]: 10030014 : ResetPerTupleExprContext(estate);
1732 : :
1733 : : /*
1734 : : * Execute the plan and obtain a tuple
1735 : : */
6049 1736 : 10030014 : slot = ExecProcNode(planstate);
1737 : :
1738 : : /*
1739 : : * if the tuple is null, then we assume there is nothing more to
1740 : : * process so we just end the loop...
1741 : : */
1742 [ + + + + ]: 10014368 : if (TupIsNull(slot))
1743 : : break;
1744 : :
1745 : : /*
1746 : : * If we have a junk filter, then project a new tuple with the junk
1747 : : * removed.
1748 : : *
1749 : : * Store this new "clean" tuple in the junkfilter's resultSlot.
1750 : : * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1751 : : * because that tuple slot has the wrong descriptor.)
1752 : : */
1753 [ + + ]: 9728489 : if (estate->es_junkFilter != NULL)
1754 : 176055 : slot = ExecFilterJunk(estate->es_junkFilter, slot);
1755 : :
1756 : : /*
1757 : : * If we are supposed to send the tuple somewhere, do so. (In
1758 : : * practice, this is probably always the case at this point.)
1759 : : */
6051 1760 [ + - ]: 9728489 : if (sendTuples)
1761 : : {
1762 : : /*
1763 : : * If we are not able to send the tuple, we assume the destination
1764 : : * has closed and no more tuples can be sent. If that's the case,
1765 : : * end the loop.
1766 : : */
3162 peter_e@gmx.net 1767 [ + + ]: 9728489 : if (!dest->receiveSlot(slot, dest))
3620 rhaas@postgresql.org 1768 :GBC 1 : break;
1769 : : }
1770 : :
1771 : : /*
1772 : : * Count tuples processed, if this is a SELECT. (For other operation
1773 : : * types, the ModifyTable plan node must count the appropriate
1774 : : * events.)
1775 : : */
6051 tgl@sss.pgh.pa.us 1776 [ + + ]:CBC 9728480 : if (operation == CMD_SELECT)
1777 : 9723779 : (estate->es_processed)++;
1778 : :
1779 : : /*
1780 : : * check our tuple count.. if we've processed the proper number then
1781 : : * quit, else loop again and process more tuples. Zero numberTuples
1782 : : * means no limit.
1783 : : */
9322 1784 : 9728480 : current_tuple_count++;
8518 1785 [ + + + + ]: 9728480 : if (numberTuples && numberTuples == current_tuple_count)
10467 bruce@momjian.us 1786 : 52136 : break;
1787 : : }
1788 : :
1789 : : /*
1790 : : * If we know we won't need to back up, we can release resources at this
1791 : : * point.
1792 : : */
2362 tmunro@postgresql.or 1793 [ + + ]: 338016 : if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1324 tgl@sss.pgh.pa.us 1794 : 333670 : ExecShutdownNode(planstate);
1795 : :
3854 rhaas@postgresql.org 1796 [ + + ]: 338016 : if (use_parallel_mode)
1797 : 500 : ExitParallelMode();
10892 scrappy@hub.org 1798 : 338016 : }
1799 : :
1800 : :
1801 : : /*
1802 : : * ExecRelCheck --- check that tuple meets check constraints for result relation
1803 : : *
1804 : : * Returns NULL if OK, else name of failed check constraint
1805 : : */
1806 : : static const char *
9305 tgl@sss.pgh.pa.us 1807 : 1863 : ExecRelCheck(ResultRelInfo *resultRelInfo,
1808 : : TupleTableSlot *slot, EState *estate)
1809 : : {
1810 : 1863 : Relation rel = resultRelInfo->ri_RelationDesc;
10466 bruce@momjian.us 1811 : 1863 : int ncheck = rel->rd_att->constr->num_check;
1812 : 1863 : ConstrCheck *check = rel->rd_att->constr->check;
1813 : : ExprContext *econtext;
1814 : : MemoryContext oldContext;
1815 : :
1816 : : /*
1817 : : * CheckNNConstraintFetch let this pass with only a warning, but now we
1818 : : * should fail rather than possibly failing to enforce an important
1819 : : * constraint.
1820 : : */
1855 tgl@sss.pgh.pa.us 1821 [ - + ]: 1863 : if (ncheck != rel->rd_rel->relchecks)
1855 tgl@sss.pgh.pa.us 1822 [ # # ]:UBC 0 : elog(ERROR, "%d pg_constraint record(s) missing for relation \"%s\"",
1823 : : rel->rd_rel->relchecks - ncheck, RelationGetRelationName(rel));
1824 : :
1825 : : /*
1826 : : * If first time through for this result relation, build expression
1827 : : * nodetrees for rel's constraint expressions. Keep them in the per-query
1828 : : * memory context so they'll survive throughout the query.
1829 : : */
403 peter@eisentraut.org 1830 [ + + ]:CBC 1863 : if (resultRelInfo->ri_CheckConstraintExprs == NULL)
1831 : : {
9305 tgl@sss.pgh.pa.us 1832 : 974 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
403 peter@eisentraut.org 1833 : 974 : resultRelInfo->ri_CheckConstraintExprs = palloc0_array(ExprState *, ncheck);
1834 [ + + ]: 2545 : for (int i = 0; i < ncheck; i++)
1835 : : {
1836 : : Expr *checkconstr;
1837 : :
1838 : : /* Skip not enforced constraint */
479 1839 [ + + ]: 1575 : if (!check[i].ccenforced)
1840 : 216 : continue;
1841 : :
3339 andres@anarazel.de 1842 : 1359 : checkconstr = stringToNode(check[i].ccbin);
452 peter@eisentraut.org 1843 : 1359 : checkconstr = (Expr *) expand_generated_columns_in_expr((Node *) checkconstr, rel, 1);
403 1844 : 1355 : resultRelInfo->ri_CheckConstraintExprs[i] =
3339 andres@anarazel.de 1845 : 1359 : ExecPrepareExpr(checkconstr, estate);
1846 : : }
9305 tgl@sss.pgh.pa.us 1847 : 970 : MemoryContextSwitchTo(oldContext);
1848 : : }
1849 : :
1850 : : /*
1851 : : * We will use the EState's per-tuple context for evaluating constraint
1852 : : * expressions (creating it if it's not already there).
1853 : : */
9234 1854 [ + + ]: 1859 : econtext = GetPerTupleExprContext(estate);
1855 : :
1856 : : /* Arrange for econtext's scan tuple to be the tuple under test */
9403 1857 : 1859 : econtext->ecxt_scantuple = slot;
1858 : :
1859 : : /* And evaluate the constraints */
403 peter@eisentraut.org 1860 [ + + ]: 4283 : for (int i = 0; i < ncheck; i++)
1861 : : {
1862 : 2756 : ExprState *checkconstr = resultRelInfo->ri_CheckConstraintExprs[i];
1863 : :
1864 : : /*
1865 : : * NOTE: SQL specifies that a NULL result from a constraint expression
1866 : : * is not to be treated as a failure. Therefore, use ExecCheck not
1867 : : * ExecQual.
1868 : : */
479 1869 [ + + + + ]: 2756 : if (checkconstr && !ExecCheck(checkconstr, econtext))
10108 bruce@momjian.us 1870 : 332 : return check[i].ccname;
1871 : : }
1872 : :
1873 : : /* NULL result means no error */
8324 tgl@sss.pgh.pa.us 1874 : 1527 : return NULL;
1875 : : }
1876 : :
1877 : : /*
1878 : : * ExecPartitionCheck --- check that tuple meets the partition constraint.
1879 : : *
1880 : : * Returns true if it meets the partition constraint. If the constraint
1881 : : * fails and we're asked to emit an error, do so and don't return; otherwise
1882 : : * return false.
1883 : : */
1884 : : bool
3436 rhaas@postgresql.org 1885 : 8697 : ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1886 : : EState *estate, bool emitError)
1887 : : {
1888 : : ExprContext *econtext;
1889 : : bool success;
1890 : :
1891 : : /*
1892 : : * If first time through, build expression state tree for the partition
1893 : : * check expression. (In the corner case where the partition check
1894 : : * expression is empty, ie there's a default partition and nothing else,
1895 : : * we'll be fooled into executing this code each time through. But it's
1896 : : * pretty darn cheap in that case, so we don't worry about it.)
1897 : : */
1898 [ + + ]: 8697 : if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1899 : : {
1900 : : /*
1901 : : * Ensure that the qual tree and prepared expression are in the
1902 : : * query-lifespan context.
1903 : : */
2057 tgl@sss.pgh.pa.us 1904 : 2515 : MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1905 : 2515 : List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc);
1906 : :
3339 andres@anarazel.de 1907 : 2515 : resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
2057 tgl@sss.pgh.pa.us 1908 : 2515 : MemoryContextSwitchTo(oldcxt);
1909 : : }
1910 : :
1911 : : /*
1912 : : * We will use the EState's per-tuple context for evaluating constraint
1913 : : * expressions (creating it if it's not already there).
1914 : : */
3436 rhaas@postgresql.org 1915 [ + + ]: 8697 : econtext = GetPerTupleExprContext(estate);
1916 : :
1917 : : /* Arrange for econtext's scan tuple to be the tuple under test */
1918 : 8697 : econtext->ecxt_scantuple = slot;
1919 : :
1920 : : /*
1921 : : * As in case of the cataloged constraints, we treat a NULL result as
1922 : : * success here, not a failure.
1923 : : */
2885 alvherre@alvh.no-ip. 1924 : 8697 : success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1925 : :
1926 : : /* if asked to emit error, don't actually return on failure */
1927 [ + + + + ]: 8697 : if (!success && emitError)
1928 : 134 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1929 : :
1930 : 8563 : return success;
1931 : : }
1932 : :
1933 : : /*
1934 : : * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1935 : : * partition constraint check.
1936 : : */
1937 : : void
3042 rhaas@postgresql.org 1938 : 166 : ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1939 : : TupleTableSlot *slot,
1940 : : EState *estate)
1941 : : {
1942 : : Oid root_relid;
1943 : : TupleDesc tupdesc;
1944 : : char *val_desc;
1945 : : Bitmapset *modifiedCols;
1946 : :
1947 : : /*
1948 : : * If the tuple has been routed, it's been converted to the partition's
1949 : : * rowtype, which might differ from the root table's. We must convert it
1950 : : * back to the root table's rowtype so that val_desc in the error message
1951 : : * matches the input tuple.
1952 : : */
1912 heikki.linnakangas@i 1953 [ + + ]: 166 : if (resultRelInfo->ri_RootResultRelInfo)
1954 : : {
1955 : 13 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1956 : : TupleDesc old_tupdesc;
1957 : : AttrMap *map;
1958 : :
1959 : 13 : root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
1960 : 13 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1961 : :
2685 alvherre@alvh.no-ip. 1962 : 13 : old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1963 : : /* a reverse map */
1253 1964 : 13 : map = build_attrmap_by_name_if_req(old_tupdesc, tupdesc, false);
1965 : :
1966 : : /*
1967 : : * Partition-specific slot's tupdesc can't be changed, so allocate a
1968 : : * new one.
1969 : : */
3042 rhaas@postgresql.org 1970 [ + + ]: 13 : if (map != NULL)
2772 andres@anarazel.de 1971 : 5 : slot = execute_attr_map_slot(map, slot,
1972 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
1912 heikki.linnakangas@i 1973 : 13 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1974 : 13 : ExecGetUpdatedCols(rootrel, estate));
1975 : : }
1976 : : else
1977 : : {
2685 alvherre@alvh.no-ip. 1978 : 153 : root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1979 : 153 : tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1912 heikki.linnakangas@i 1980 : 153 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1981 : 153 : ExecGetUpdatedCols(resultRelInfo, estate));
1982 : : }
1983 : :
2685 alvherre@alvh.no-ip. 1984 : 166 : val_desc = ExecBuildSlotValueDescription(root_relid,
1985 : : slot,
1986 : : tupdesc,
1987 : : modifiedCols,
1988 : : 64);
3042 rhaas@postgresql.org 1989 [ + - + - ]: 166 : ereport(ERROR,
1990 : : (errcode(ERRCODE_CHECK_VIOLATION),
1991 : : errmsg("new row for relation \"%s\" violates partition constraint",
1992 : : RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1993 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1994 : : errtable(resultRelInfo->ri_RelationDesc)));
1995 : : }
1996 : :
1997 : : /*
1998 : : * ExecConstraints - check constraints of the tuple in 'slot'
1999 : : *
2000 : : * This checks the traditional NOT NULL and check constraints.
2001 : : *
2002 : : * The partition constraint is *NOT* checked.
2003 : : *
2004 : : * Note: 'slot' contains the tuple to check the constraints of, which may
2005 : : * have been converted from the original input tuple after tuple routing.
2006 : : * 'resultRelInfo' is the final result relation, after tuple routing.
2007 : : */
2008 : : void
8324 tgl@sss.pgh.pa.us 2009 : 3091467 : ExecConstraints(ResultRelInfo *resultRelInfo,
2010 : : TupleTableSlot *slot, EState *estate)
2011 : : {
9305 2012 : 3091467 : Relation rel = resultRelInfo->ri_RelationDesc;
4562 2013 : 3091467 : TupleDesc tupdesc = RelationGetDescr(rel);
2014 : 3091467 : TupleConstr *constr = tupdesc->constr;
2015 : : Bitmapset *modifiedCols;
403 peter@eisentraut.org 2016 : 3091467 : List *notnull_virtual_attrs = NIL;
2017 : :
2057 tgl@sss.pgh.pa.us 2018 [ - + ]: 3091467 : Assert(constr); /* we should not be called otherwise */
2019 : :
2020 : : /*
2021 : : * Verify not-null constraints.
2022 : : *
2023 : : * Not-null constraints on virtual generated columns are collected and
2024 : : * checked separately below.
2025 : : */
2026 [ + + ]: 3091467 : if (constr->has_not_null)
2027 : : {
403 peter@eisentraut.org 2028 [ + + ]: 11111999 : for (AttrNumber attnum = 1; attnum <= tupdesc->natts; attnum++)
2029 : : {
2030 : 8024940 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2031 : :
2032 [ + + + + ]: 8024940 : if (att->attnotnull && att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2033 : 60 : notnull_virtual_attrs = lappend_int(notnull_virtual_attrs, attnum);
2034 [ + + + + ]: 8024880 : else if (att->attnotnull && slot_attisnull(slot, attnum))
2035 : 229 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
2036 : : }
2037 : : }
2038 : :
2039 : : /*
2040 : : * Verify not-null constraints on virtual generated column, if any.
2041 : : */
2042 [ + + ]: 3091238 : if (notnull_virtual_attrs)
2043 : : {
2044 : : AttrNumber attnum;
2045 : :
2046 : 60 : attnum = ExecRelGenVirtualNotNull(resultRelInfo, slot, estate,
2047 : : notnull_virtual_attrs);
2048 [ + + ]: 60 : if (attnum != InvalidAttrNumber)
2049 : 28 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
2050 : : }
2051 : :
2052 : : /*
2053 : : * Verify check constraints.
2054 : : */
1855 tgl@sss.pgh.pa.us 2055 [ + + ]: 3091210 : if (rel->rd_rel->relchecks > 0)
2056 : : {
2057 : : const char *failed;
2058 : :
9305 2059 [ + + ]: 1863 : if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2060 : : {
2061 : : char *val_desc;
3408 rhaas@postgresql.org 2062 : 332 : Relation orig_rel = rel;
2063 : :
2064 : : /*
2065 : : * If the tuple has been routed, it's been converted to the
2066 : : * partition's rowtype, which might differ from the root table's.
2067 : : * We must convert it back to the root table's rowtype so that
2068 : : * val_desc shown error message matches the input tuple.
2069 : : */
1912 heikki.linnakangas@i 2070 [ + + ]: 332 : if (resultRelInfo->ri_RootResultRelInfo)
2071 : : {
2072 : 68 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
3312 rhaas@postgresql.org 2073 : 68 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2074 : : AttrMap *map;
2075 : :
1912 heikki.linnakangas@i 2076 : 68 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2077 : : /* a reverse map */
2330 michael@paquier.xyz 2078 : 68 : map = build_attrmap_by_name_if_req(old_tupdesc,
2079 : : tupdesc,
2080 : : false);
2081 : :
2082 : : /*
2083 : : * Partition-specific slot's tupdesc can't be changed, so
2084 : : * allocate a new one.
2085 : : */
3312 rhaas@postgresql.org 2086 [ + + ]: 68 : if (map != NULL)
2772 andres@anarazel.de 2087 : 40 : slot = execute_attr_map_slot(map, slot,
2088 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
1912 heikki.linnakangas@i 2089 : 68 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2090 : 68 : ExecGetUpdatedCols(rootrel, estate));
2091 : 68 : rel = rootrel->ri_RelationDesc;
2092 : : }
2093 : : else
2094 : 264 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2095 : 264 : ExecGetUpdatedCols(resultRelInfo, estate));
4131 sfrost@snowman.net 2096 : 332 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2097 : : slot,
2098 : : tupdesc,
2099 : : modifiedCols,
2100 : : 64);
8324 tgl@sss.pgh.pa.us 2101 [ + - + - ]: 332 : ereport(ERROR,
2102 : : (errcode(ERRCODE_CHECK_VIOLATION),
2103 : : errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2104 : : RelationGetRelationName(orig_rel), failed),
2105 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2106 : : errtableconstraint(orig_rel, failed)));
2107 : : }
2108 : : }
10483 vadim4o@yahoo.com 2109 : 3090874 : }
2110 : :
2111 : : /*
2112 : : * Verify not-null constraints on virtual generated columns of the given
2113 : : * tuple slot.
2114 : : *
2115 : : * Return value of InvalidAttrNumber means all not-null constraints on virtual
2116 : : * generated columns are satisfied. A return value > 0 means a not-null
2117 : : * violation happened for that attribute.
2118 : : *
2119 : : * notnull_virtual_attrs is the list of the attnums of virtual generated column with
2120 : : * not-null constraints.
2121 : : */
2122 : : AttrNumber
403 peter@eisentraut.org 2123 : 116 : ExecRelGenVirtualNotNull(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2124 : : EState *estate, List *notnull_virtual_attrs)
2125 : : {
2126 : 116 : Relation rel = resultRelInfo->ri_RelationDesc;
2127 : : ExprContext *econtext;
2128 : : MemoryContext oldContext;
2129 : :
2130 : : /*
2131 : : * We implement this by building a NullTest node for each virtual
2132 : : * generated column, which we cache in resultRelInfo, and running those
2133 : : * through ExecCheck().
2134 : : */
2135 [ + + ]: 116 : if (resultRelInfo->ri_GenVirtualNotNullConstraintExprs == NULL)
2136 : : {
2137 : 84 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2138 : 84 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs =
2139 : 84 : palloc0_array(ExprState *, list_length(notnull_virtual_attrs));
2140 : :
2141 [ + - + + : 272 : foreach_int(attnum, notnull_virtual_attrs)
+ + ]
2142 : : {
2143 : 104 : int i = foreach_current_index(attnum);
2144 : : NullTest *nnulltest;
2145 : :
2146 : : /* "generated_expression IS NOT NULL" check. */
2147 : 104 : nnulltest = makeNode(NullTest);
2148 : 104 : nnulltest->arg = (Expr *) build_generation_expression(rel, attnum);
2149 : 104 : nnulltest->nulltesttype = IS_NOT_NULL;
2150 : 104 : nnulltest->argisrow = false;
2151 : 104 : nnulltest->location = -1;
2152 : :
2153 : 104 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i] =
2154 : 104 : ExecPrepareExpr((Expr *) nnulltest, estate);
2155 : : }
2156 : 84 : MemoryContextSwitchTo(oldContext);
2157 : : }
2158 : :
2159 : : /*
2160 : : * We will use the EState's per-tuple context for evaluating virtual
2161 : : * generated column not null constraint expressions (creating it if it's
2162 : : * not already there).
2163 : : */
2164 [ + + ]: 116 : econtext = GetPerTupleExprContext(estate);
2165 : :
2166 : : /* Arrange for econtext's scan tuple to be the tuple under test */
2167 : 116 : econtext->ecxt_scantuple = slot;
2168 : :
2169 : : /* And evaluate the check constraints for virtual generated column */
2170 [ + - + + : 288 : foreach_int(attnum, notnull_virtual_attrs)
+ + ]
2171 : : {
2172 : 152 : int i = foreach_current_index(attnum);
2173 : 152 : ExprState *exprstate = resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i];
2174 : :
2175 [ - + ]: 152 : Assert(exprstate != NULL);
2176 [ + + ]: 152 : if (!ExecCheck(exprstate, econtext))
2177 : 48 : return attnum;
2178 : : }
2179 : :
2180 : : /* InvalidAttrNumber result means no error */
2181 : 68 : return InvalidAttrNumber;
2182 : : }
2183 : :
2184 : : /*
2185 : : * Report a violation of a not-null constraint that was already detected.
2186 : : */
2187 : : static void
2188 : 257 : ReportNotNullViolationError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2189 : : EState *estate, int attnum)
2190 : : {
2191 : : Bitmapset *modifiedCols;
2192 : : char *val_desc;
2193 : 257 : Relation rel = resultRelInfo->ri_RelationDesc;
2194 : 257 : Relation orig_rel = rel;
2195 : 257 : TupleDesc tupdesc = RelationGetDescr(rel);
2196 : 257 : TupleDesc orig_tupdesc = RelationGetDescr(rel);
2197 : 257 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2198 : :
2199 [ - + ]: 257 : Assert(attnum > 0);
2200 : :
2201 : : /*
2202 : : * If the tuple has been routed, it's been converted to the partition's
2203 : : * rowtype, which might differ from the root table's. We must convert it
2204 : : * back to the root table's rowtype so that val_desc shown error message
2205 : : * matches the input tuple.
2206 : : */
2207 [ + + ]: 257 : if (resultRelInfo->ri_RootResultRelInfo)
2208 : : {
2209 : 56 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2210 : : AttrMap *map;
2211 : :
2212 : 56 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2213 : : /* a reverse map */
2214 : 56 : map = build_attrmap_by_name_if_req(orig_tupdesc,
2215 : : tupdesc,
2216 : : false);
2217 : :
2218 : : /*
2219 : : * Partition-specific slot's tupdesc can't be changed, so allocate a
2220 : : * new one.
2221 : : */
2222 [ + + ]: 56 : if (map != NULL)
2223 : 28 : slot = execute_attr_map_slot(map, slot,
2224 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
2225 : 56 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2226 : 56 : ExecGetUpdatedCols(rootrel, estate));
2227 : 56 : rel = rootrel->ri_RelationDesc;
2228 : : }
2229 : : else
2230 : 201 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2231 : 201 : ExecGetUpdatedCols(resultRelInfo, estate));
2232 : :
2233 : 257 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2234 : : slot,
2235 : : tupdesc,
2236 : : modifiedCols,
2237 : : 64);
2238 [ + - + - ]: 257 : ereport(ERROR,
2239 : : errcode(ERRCODE_NOT_NULL_VIOLATION),
2240 : : errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
2241 : : NameStr(att->attname),
2242 : : RelationGetRelationName(orig_rel)),
2243 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2244 : : errtablecol(orig_rel, attnum));
2245 : : }
2246 : :
2247 : : /*
2248 : : * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2249 : : * of the specified kind.
2250 : : *
2251 : : * Note that this needs to be called multiple times to ensure that all kinds of
2252 : : * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2253 : : * CHECK OPTION set and from row-level security policies). See ExecInsert()
2254 : : * and ExecUpdate().
2255 : : */
2256 : : void
4029 sfrost@snowman.net 2257 : 1609 : ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2258 : : TupleTableSlot *slot, EState *estate)
2259 : : {
4131 2260 : 1609 : Relation rel = resultRelInfo->ri_RelationDesc;
2261 : 1609 : TupleDesc tupdesc = RelationGetDescr(rel);
2262 : : ExprContext *econtext;
2263 : : ListCell *l1,
2264 : : *l2;
2265 : :
2266 : : /*
2267 : : * We will use the EState's per-tuple context for evaluating constraint
2268 : : * expressions (creating it if it's not already there).
2269 : : */
4674 2270 [ + + ]: 1609 : econtext = GetPerTupleExprContext(estate);
2271 : :
2272 : : /* Arrange for econtext's scan tuple to be the tuple under test */
2273 : 1609 : econtext->ecxt_scantuple = slot;
2274 : :
2275 : : /* Check each of the constraints */
2276 [ + - + + : 4438 : forboth(l1, resultRelInfo->ri_WithCheckOptions,
+ - + + +
+ + - +
+ ]
2277 : : l2, resultRelInfo->ri_WithCheckOptionExprs)
2278 : : {
2279 : 3179 : WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
4382 bruce@momjian.us 2280 : 3179 : ExprState *wcoExpr = (ExprState *) lfirst(l2);
2281 : :
2282 : : /*
2283 : : * Skip any WCOs which are not the kind we are looking for at this
2284 : : * time.
2285 : : */
4029 sfrost@snowman.net 2286 [ + + ]: 3179 : if (wco->kind != kind)
2287 : 1912 : continue;
2288 : :
2289 : : /*
2290 : : * WITH CHECK OPTION checks are intended to ensure that the new tuple
2291 : : * is visible (in the case of a view) or that it passes the
2292 : : * 'with-check' policy (in the case of row security). If the qual
2293 : : * evaluates to NULL or FALSE, then the new tuple won't be included in
2294 : : * the view or doesn't pass the 'with-check' policy for the table.
2295 : : */
3339 andres@anarazel.de 2296 [ + + ]: 1267 : if (!ExecQual(wcoExpr, econtext))
2297 : : {
2298 : : char *val_desc;
2299 : : Bitmapset *modifiedCols;
2300 : :
4029 sfrost@snowman.net 2301 [ + + + + : 350 : switch (wco->kind)
- ]
2302 : : {
2303 : : /*
2304 : : * For WITH CHECK OPTIONs coming from views, we might be
2305 : : * able to provide the details on the row, depending on
2306 : : * the permissions on the relation (that is, if the user
2307 : : * could view it directly anyway). For RLS violations, we
2308 : : * don't include the data since we don't know if the user
2309 : : * should be able to view the tuple as that depends on the
2310 : : * USING policy.
2311 : : */
2312 : 150 : case WCO_VIEW_CHECK:
2313 : : /* See the comment in ExecConstraints(). */
1912 heikki.linnakangas@i 2314 [ + + ]: 150 : if (resultRelInfo->ri_RootResultRelInfo)
2315 : : {
2316 : 27 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
3214 rhaas@postgresql.org 2317 : 27 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2318 : : AttrMap *map;
2319 : :
1912 heikki.linnakangas@i 2320 : 27 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2321 : : /* a reverse map */
2330 michael@paquier.xyz 2322 : 27 : map = build_attrmap_by_name_if_req(old_tupdesc,
2323 : : tupdesc,
2324 : : false);
2325 : :
2326 : : /*
2327 : : * Partition-specific slot's tupdesc can't be changed,
2328 : : * so allocate a new one.
2329 : : */
3214 rhaas@postgresql.org 2330 [ + + ]: 27 : if (map != NULL)
2772 andres@anarazel.de 2331 : 16 : slot = execute_attr_map_slot(map, slot,
2332 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
2333 : :
1912 heikki.linnakangas@i 2334 : 27 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2335 : 27 : ExecGetUpdatedCols(rootrel, estate));
2336 : 27 : rel = rootrel->ri_RelationDesc;
2337 : : }
2338 : : else
2339 : 123 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2340 : 123 : ExecGetUpdatedCols(resultRelInfo, estate));
4029 sfrost@snowman.net 2341 : 150 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2342 : : slot,
2343 : : tupdesc,
2344 : : modifiedCols,
2345 : : 64);
2346 : :
2347 [ + - + - ]: 150 : ereport(ERROR,
2348 : : (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2349 : : errmsg("new row violates check option for view \"%s\"",
2350 : : wco->relname),
2351 : : val_desc ? errdetail("Failing row contains %s.",
2352 : : val_desc) : 0));
2353 : : break;
2354 : 164 : case WCO_RLS_INSERT_CHECK:
2355 : : case WCO_RLS_UPDATE_CHECK:
3885 2356 [ + + ]: 164 : if (wco->polname != NULL)
2357 [ + - ]: 39 : ereport(ERROR,
2358 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2359 : : errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2360 : : wco->polname, wco->relname)));
2361 : : else
2362 [ + - ]: 125 : ereport(ERROR,
2363 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2364 : : errmsg("new row violates row-level security policy for table \"%s\"",
2365 : : wco->relname)));
2366 : : break;
1499 alvherre@alvh.no-ip. 2367 : 16 : case WCO_RLS_MERGE_UPDATE_CHECK:
2368 : : case WCO_RLS_MERGE_DELETE_CHECK:
2369 [ - + ]: 16 : if (wco->polname != NULL)
1499 alvherre@alvh.no-ip. 2370 [ # # ]:UBC 0 : ereport(ERROR,
2371 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2372 : : errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2373 : : wco->polname, wco->relname)));
2374 : : else
1499 alvherre@alvh.no-ip. 2375 [ + - ]:CBC 16 : ereport(ERROR,
2376 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2377 : : errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2378 : : wco->relname)));
2379 : : break;
4015 andres@anarazel.de 2380 : 20 : case WCO_RLS_CONFLICT_CHECK:
3885 sfrost@snowman.net 2381 [ - + ]: 20 : if (wco->polname != NULL)
3885 sfrost@snowman.net 2382 [ # # ]:UBC 0 : ereport(ERROR,
2383 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2384 : : errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2385 : : wco->polname, wco->relname)));
2386 : : else
3885 sfrost@snowman.net 2387 [ + - ]:CBC 20 : ereport(ERROR,
2388 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2389 : : errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2390 : : wco->relname)));
2391 : : break;
4029 sfrost@snowman.net 2392 :UBC 0 : default:
2393 [ # # ]: 0 : elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2394 : : break;
2395 : : }
2396 : : }
2397 : : }
4674 sfrost@snowman.net 2398 :CBC 1259 : }
2399 : :
2400 : : /*
2401 : : * ExecBuildSlotValueDescription -- construct a string representing a tuple
2402 : : *
2403 : : * This is intentionally very similar to BuildIndexValueDescription, but
2404 : : * unlike that function, we truncate long field values (to at most maxfieldlen
2405 : : * bytes). That seems necessary here since heap field values could be very
2406 : : * long, whereas index entries typically aren't so wide.
2407 : : *
2408 : : * Also, unlike the case with index entries, we need to be prepared to ignore
2409 : : * dropped columns. We used to use the slot's tuple descriptor to decode the
2410 : : * data, but the slot's descriptor doesn't identify dropped columns, so we
2411 : : * now need to be passed the relation's descriptor.
2412 : : *
2413 : : * Note that, like BuildIndexValueDescription, if the user does not have
2414 : : * permission to view any of the columns involved, a NULL is returned. Unlike
2415 : : * BuildIndexValueDescription, if the user has access to view a subset of the
2416 : : * column involved, that subset will be returned with a key identifying which
2417 : : * columns they are.
2418 : : */
2419 : : char *
4131 2420 : 1055 : ExecBuildSlotValueDescription(Oid reloid,
2421 : : TupleTableSlot *slot,
2422 : : TupleDesc tupdesc,
2423 : : Bitmapset *modifiedCols,
2424 : : int maxfieldlen)
2425 : : {
2426 : : StringInfoData buf;
2427 : : StringInfoData collist;
4562 tgl@sss.pgh.pa.us 2428 : 1055 : bool write_comma = false;
4131 sfrost@snowman.net 2429 : 1055 : bool write_comma_collist = false;
2430 : : int i;
2431 : : AclResult aclresult;
2432 : 1055 : bool table_perm = false;
2433 : 1055 : bool any_perm = false;
2434 : :
2435 : : /*
2436 : : * Check if RLS is enabled and should be active for the relation; if so,
2437 : : * then don't return anything. Otherwise, go through normal permission
2438 : : * checks.
2439 : : */
3934 mail@joeconway.com 2440 [ - + ]: 1055 : if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
4131 sfrost@snowman.net 2441 :UBC 0 : return NULL;
2442 : :
5271 tgl@sss.pgh.pa.us 2443 :CBC 1055 : initStringInfo(&buf);
2444 : :
2445 : 1055 : appendStringInfoChar(&buf, '(');
2446 : :
2447 : : /*
2448 : : * Check if the user has permissions to see the row. Table-level SELECT
2449 : : * allows access to all columns. If the user does not have table-level
2450 : : * SELECT then we check each column and include those the user has SELECT
2451 : : * rights on. Additionally, we always include columns the user provided
2452 : : * data for.
2453 : : */
4131 sfrost@snowman.net 2454 : 1055 : aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2455 [ + + ]: 1055 : if (aclresult != ACLCHECK_OK)
2456 : : {
2457 : : /* Set up the buffer for the column list */
2458 : 40 : initStringInfo(&collist);
2459 : 40 : appendStringInfoChar(&collist, '(');
2460 : : }
2461 : : else
2462 : 1015 : table_perm = any_perm = true;
2463 : :
2464 : : /* Make sure the tuple is fully deconstructed */
2465 : 1055 : slot_getallattrs(slot);
2466 : :
5271 tgl@sss.pgh.pa.us 2467 [ + + ]: 3800 : for (i = 0; i < tupdesc->natts; i++)
2468 : : {
4131 sfrost@snowman.net 2469 : 2745 : bool column_perm = false;
2470 : : char *val;
2471 : : int vallen;
3180 andres@anarazel.de 2472 : 2745 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2473 : :
2474 : : /* ignore dropped columns */
2475 [ + + ]: 2745 : if (att->attisdropped)
4562 tgl@sss.pgh.pa.us 2476 : 25 : continue;
2477 : :
4131 sfrost@snowman.net 2478 [ + + ]: 2720 : if (!table_perm)
2479 : : {
2480 : : /*
2481 : : * No table-level SELECT, so need to make sure they either have
2482 : : * SELECT rights on the column or that they have provided the data
2483 : : * for the column. If not, omit this column from the error
2484 : : * message.
2485 : : */
3180 andres@anarazel.de 2486 : 156 : aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2487 : : GetUserId(), ACL_SELECT);
2488 [ + + ]: 156 : if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
4131 sfrost@snowman.net 2489 [ + + ]: 92 : modifiedCols) || aclresult == ACLCHECK_OK)
2490 : : {
2491 : 96 : column_perm = any_perm = true;
2492 : :
2493 [ + + ]: 96 : if (write_comma_collist)
2494 : 56 : appendStringInfoString(&collist, ", ");
2495 : : else
2496 : 40 : write_comma_collist = true;
2497 : :
3180 andres@anarazel.de 2498 : 96 : appendStringInfoString(&collist, NameStr(att->attname));
2499 : : }
2500 : : }
2501 : :
4131 sfrost@snowman.net 2502 [ + + + + ]: 2720 : if (table_perm || column_perm)
2503 : : {
452 peter@eisentraut.org 2504 [ + + ]: 2660 : if (att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2505 : 40 : val = "virtual";
2506 [ + + ]: 2620 : else if (slot->tts_isnull[i])
4131 sfrost@snowman.net 2507 : 440 : val = "null";
2508 : : else
2509 : : {
2510 : : Oid foutoid;
2511 : : bool typisvarlena;
2512 : :
3180 andres@anarazel.de 2513 : 2180 : getTypeOutputInfo(att->atttypid,
2514 : : &foutoid, &typisvarlena);
4131 sfrost@snowman.net 2515 : 2180 : val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2516 : : }
2517 : :
2518 [ + + ]: 2660 : if (write_comma)
2519 : 1605 : appendStringInfoString(&buf, ", ");
2520 : : else
2521 : 1055 : write_comma = true;
2522 : :
2523 : : /* truncate if needed */
2524 : 2660 : vallen = strlen(val);
2525 [ + + ]: 2660 : if (vallen <= maxfieldlen)
2478 drowley@postgresql.o 2526 : 2651 : appendBinaryStringInfo(&buf, val, vallen);
2527 : : else
2528 : : {
4131 sfrost@snowman.net 2529 : 9 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2530 : 9 : appendBinaryStringInfo(&buf, val, vallen);
2531 : 9 : appendStringInfoString(&buf, "...");
2532 : : }
2533 : : }
2534 : : }
2535 : :
2536 : : /* If we end up with zero columns being returned, then return NULL. */
2537 [ - + ]: 1055 : if (!any_perm)
4131 sfrost@snowman.net 2538 :UBC 0 : return NULL;
2539 : :
5271 tgl@sss.pgh.pa.us 2540 :CBC 1055 : appendStringInfoChar(&buf, ')');
2541 : :
4131 sfrost@snowman.net 2542 [ + + ]: 1055 : if (!table_perm)
2543 : : {
2544 : 40 : appendStringInfoString(&collist, ") = ");
2478 drowley@postgresql.o 2545 : 40 : appendBinaryStringInfo(&collist, buf.data, buf.len);
2546 : :
4131 sfrost@snowman.net 2547 : 40 : return collist.data;
2548 : : }
2549 : :
5271 tgl@sss.pgh.pa.us 2550 : 1015 : return buf.data;
2551 : : }
2552 : :
2553 : :
2554 : : /*
2555 : : * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2556 : : * given ResultRelInfo
2557 : : */
2558 : : LockTupleMode
4015 andres@anarazel.de 2559 : 4373 : ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2560 : : {
2561 : : Bitmapset *keyCols;
2562 : : Bitmapset *updatedCols;
2563 : :
2564 : : /*
2565 : : * Compute lock mode to use. If columns that are part of the key have not
2566 : : * been modified, then we can use a weaker lock, allowing for better
2567 : : * concurrency.
2568 : : */
1912 heikki.linnakangas@i 2569 : 4373 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
4015 andres@anarazel.de 2570 : 4373 : keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2571 : : INDEX_ATTR_BITMAP_KEY);
2572 : :
2573 [ + + ]: 4373 : if (bms_overlap(keyCols, updatedCols))
2574 : 176 : return LockTupleExclusive;
2575 : :
2576 : 4197 : return LockTupleNoKeyExclusive;
2577 : : }
2578 : :
2579 : : /*
2580 : : * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2581 : : *
2582 : : * If no such struct, either return NULL or throw error depending on missing_ok
2583 : : */
2584 : : ExecRowMark *
4011 tgl@sss.pgh.pa.us 2585 : 8257 : ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2586 : : {
2766 2587 [ + - + - ]: 8257 : if (rti > 0 && rti <= estate->es_range_table_size &&
2588 [ + - ]: 8257 : estate->es_rowmarks != NULL)
2589 : : {
2590 : 8257 : ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2591 : :
2592 [ + - ]: 8257 : if (erm)
5592 2593 : 8257 : return erm;
2594 : : }
4011 tgl@sss.pgh.pa.us 2595 [ # # ]:UBC 0 : if (!missing_ok)
2596 [ # # ]: 0 : elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2597 : 0 : return NULL;
2598 : : }
2599 : :
2600 : : /*
2601 : : * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2602 : : *
2603 : : * Inputs are the underlying ExecRowMark struct and the targetlist of the
2604 : : * input plan node (not planstate node!). We need the latter to find out
2605 : : * the column numbers of the resjunk columns.
2606 : : */
2607 : : ExecAuxRowMark *
5592 tgl@sss.pgh.pa.us 2608 :CBC 8257 : ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2609 : : {
146 michael@paquier.xyz 2610 :GNC 8257 : ExecAuxRowMark *aerm = palloc0_object(ExecAuxRowMark);
2611 : : char resname[32];
2612 : :
5592 tgl@sss.pgh.pa.us 2613 :CBC 8257 : aerm->rowmark = erm;
2614 : :
2615 : : /* Look up the resjunk columns associated with this rowmark */
4062 2616 [ + + ]: 8257 : if (erm->markType != ROW_MARK_COPY)
2617 : : {
2618 : : /* need ctid for all methods other than COPY */
5564 2619 : 7790 : snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
5592 2620 : 7790 : aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2621 : : resname);
5564 2622 [ - + ]: 7790 : if (!AttributeNumberIsValid(aerm->ctidAttNo))
5564 tgl@sss.pgh.pa.us 2623 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2624 : : }
2625 : : else
2626 : : {
2627 : : /* need wholerow if COPY */
5564 tgl@sss.pgh.pa.us 2628 :CBC 467 : snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
5592 2629 : 467 : aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2630 : : resname);
5564 2631 [ - + ]: 467 : if (!AttributeNumberIsValid(aerm->wholeAttNo))
5564 tgl@sss.pgh.pa.us 2632 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2633 : : }
2634 : :
2635 : : /* if child rel, need tableoid */
4062 tgl@sss.pgh.pa.us 2636 [ + + ]:CBC 8257 : if (erm->rti != erm->prti)
2637 : : {
2638 : 1308 : snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2639 : 1308 : aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2640 : : resname);
2641 [ - + ]: 1308 : if (!AttributeNumberIsValid(aerm->toidAttNo))
4062 tgl@sss.pgh.pa.us 2642 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2643 : : }
2644 : :
5592 tgl@sss.pgh.pa.us 2645 :CBC 8257 : return aerm;
2646 : : }
2647 : :
2648 : :
2649 : : /*
2650 : : * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2651 : : * process the updated version under READ COMMITTED rules.
2652 : : *
2653 : : * See backend/executor/README for some info about how this works.
2654 : : */
2655 : :
2656 : :
2657 : : /*
2658 : : * Check the updated version of a tuple to see if we want to process it under
2659 : : * READ COMMITTED rules.
2660 : : *
2661 : : * epqstate - state for EvalPlanQual rechecking
2662 : : * relation - table containing tuple
2663 : : * rti - rangetable index of table containing tuple
2664 : : * inputslot - tuple for processing - this can be the slot from
2665 : : * EvalPlanQualSlot() for this rel, for increased efficiency.
2666 : : *
2667 : : * This tests whether the tuple in inputslot still matches the relevant
2668 : : * quals. For that result to be useful, typically the input tuple has to be
2669 : : * last row version (otherwise the result isn't particularly useful) and
2670 : : * locked (otherwise the result might be out of date). That's typically
2671 : : * achieved by using table_tuple_lock() with the
2672 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2673 : : *
2674 : : * Returns a slot containing the new candidate update/delete tuple, or
2675 : : * NULL if we determine we shouldn't process the row.
2676 : : */
2677 : : TupleTableSlot *
2434 andres@anarazel.de 2678 : 150 : EvalPlanQual(EPQState *epqstate, Relation relation,
2679 : : Index rti, TupleTableSlot *inputslot)
2680 : : {
2681 : : TupleTableSlot *slot;
2682 : : TupleTableSlot *testslot;
2683 : :
6035 tgl@sss.pgh.pa.us 2684 [ - + ]: 150 : Assert(rti > 0);
2685 : :
2686 : : /*
2687 : : * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2688 : : */
2434 andres@anarazel.de 2689 : 150 : EvalPlanQualBegin(epqstate);
2690 : :
2691 : : /*
2692 : : * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2693 : : * an unnecessary copy.
2694 : : */
2622 2695 : 150 : testslot = EvalPlanQualSlot(epqstate, relation, rti);
2600 2696 [ + + ]: 150 : if (testslot != inputslot)
2697 : 6 : ExecCopySlot(testslot, inputslot);
2698 : :
2699 : : /*
2700 : : * Mark that an EPQ tuple is available for this relation. (If there is
2701 : : * more than one result relation, the others remain marked as having no
2702 : : * tuple available.)
2703 : : */
1082 tgl@sss.pgh.pa.us 2704 : 150 : epqstate->relsubs_done[rti - 1] = false;
2705 : 150 : epqstate->relsubs_blocked[rti - 1] = false;
2706 : :
2707 : : /*
2708 : : * Run the EPQ query. We assume it will return at most one tuple.
2709 : : */
6035 2710 : 150 : slot = EvalPlanQualNext(epqstate);
2711 : :
2712 : : /*
2713 : : * If we got a tuple, force the slot to materialize the tuple so that it
2714 : : * is not dependent on any local state in the EPQ query (in particular,
2715 : : * it's highly likely that the slot contains references to any pass-by-ref
2716 : : * datums that may be present in copyTuple). As with the next step, this
2717 : : * is to guard against early re-use of the EPQ query.
2718 : : */
5989 2719 [ + + + + ]: 150 : if (!TupIsNull(slot))
2728 andres@anarazel.de 2720 : 112 : ExecMaterializeSlot(slot);
2721 : :
2722 : : /*
2723 : : * Clear out the test tuple, and mark that no tuple is available here.
2724 : : * This is needed in case the EPQ state is re-used to test a tuple for a
2725 : : * different target relation.
2726 : : */
2622 2727 : 150 : ExecClearTuple(testslot);
1082 tgl@sss.pgh.pa.us 2728 : 150 : epqstate->relsubs_blocked[rti - 1] = true;
2729 : :
6049 2730 : 150 : return slot;
2731 : : }
2732 : :
2733 : : /*
2734 : : * EvalPlanQualInit -- initialize during creation of a plan state node
2735 : : * that might need to invoke EPQ processing.
2736 : : *
2737 : : * If the caller intends to use EvalPlanQual(), resultRelations should be
2738 : : * a list of RT indexes of potential target relations for EvalPlanQual(),
2739 : : * and we will arrange that the other listed relations don't return any
2740 : : * tuple during an EvalPlanQual() call. Otherwise resultRelations
2741 : : * should be NIL.
2742 : : *
2743 : : * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2744 : : * with EvalPlanQualSetPlan.
2745 : : */
2746 : : void
2434 andres@anarazel.de 2747 : 151133 : EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2748 : : Plan *subplan, List *auxrowmarks,
2749 : : int epqParam, List *resultRelations)
2750 : : {
2751 : 151133 : Index rtsize = parentestate->es_range_table_size;
2752 : :
2753 : : /* initialize data not changing over EPQState's lifetime */
2754 : 151133 : epqstate->parentestate = parentestate;
2755 : 151133 : epqstate->epqParam = epqParam;
1082 tgl@sss.pgh.pa.us 2756 : 151133 : epqstate->resultRelations = resultRelations;
2757 : :
2758 : : /*
2759 : : * Allocate space to reference a slot for each potential rti - do so now
2760 : : * rather than in EvalPlanQualBegin(), as done for other dynamically
2761 : : * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2762 : : * that *may* need EPQ later, without forcing the overhead of
2763 : : * EvalPlanQualBegin().
2764 : : */
2434 andres@anarazel.de 2765 : 151133 : epqstate->tuple_table = NIL;
146 michael@paquier.xyz 2766 :GNC 151133 : epqstate->relsubs_slot = palloc0_array(TupleTableSlot *, rtsize);
2767 : :
2768 : : /* ... and remember data that EvalPlanQualBegin will need */
6035 tgl@sss.pgh.pa.us 2769 :CBC 151133 : epqstate->plan = subplan;
5592 2770 : 151133 : epqstate->arowMarks = auxrowmarks;
2771 : :
2772 : : /* ... and mark the EPQ state inactive */
2434 andres@anarazel.de 2773 : 151133 : epqstate->origslot = NULL;
2774 : 151133 : epqstate->recheckestate = NULL;
2775 : 151133 : epqstate->recheckplanstate = NULL;
2776 : 151133 : epqstate->relsubs_rowmark = NULL;
2777 : 151133 : epqstate->relsubs_done = NULL;
1082 tgl@sss.pgh.pa.us 2778 : 151133 : epqstate->relsubs_blocked = NULL;
6035 2779 : 151133 : }
2780 : :
2781 : : /*
2782 : : * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2783 : : *
2784 : : * We used to need this so that ModifyTable could deal with multiple subplans.
2785 : : * It could now be refactored out of existence.
2786 : : */
2787 : : void
5592 2788 : 72461 : EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2789 : : {
2790 : : /* If we have a live EPQ query, shut it down */
6035 2791 : 72461 : EvalPlanQualEnd(epqstate);
2792 : : /* And set/change the plan pointer */
2793 : 72461 : epqstate->plan = subplan;
2794 : : /* The rowmarks depend on the plan, too */
5592 2795 : 72461 : epqstate->arowMarks = auxrowmarks;
6035 2796 : 72461 : }
2797 : :
2798 : : /*
2799 : : * Return, and create if necessary, a slot for an EPQ test tuple.
2800 : : *
2801 : : * Note this only requires EvalPlanQualInit() to have been called,
2802 : : * EvalPlanQualBegin() is not necessary.
2803 : : */
2804 : : TupleTableSlot *
2622 andres@anarazel.de 2805 : 81502 : EvalPlanQualSlot(EPQState *epqstate,
2806 : : Relation relation, Index rti)
2807 : : {
2808 : : TupleTableSlot **slot;
2809 : :
2434 2810 [ - + ]: 81502 : Assert(relation);
2811 [ + - - + ]: 81502 : Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2812 : 81502 : slot = &epqstate->relsubs_slot[rti - 1];
2813 : :
2622 2814 [ + + ]: 81502 : if (*slot == NULL)
2815 : : {
2816 : : MemoryContext oldcontext;
2817 : :
2434 2818 : 4965 : oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2819 : 4965 : *slot = table_slot_create(relation, &epqstate->tuple_table);
2622 2820 : 4965 : MemoryContextSwitchTo(oldcontext);
2821 : : }
2822 : :
2823 : 81502 : return *slot;
2824 : : }
2825 : :
2826 : : /*
2827 : : * Fetch the current row value for a non-locked relation, identified by rti,
2828 : : * that needs to be scanned by an EvalPlanQual operation. origslot must have
2829 : : * been set to contain the current result row (top-level row) that we need to
2830 : : * recheck. Returns true if a substitution tuple was found, false if not.
2831 : : */
2832 : : bool
2434 2833 : 22 : EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2834 : : {
2835 : 22 : ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2836 : : ExecRowMark *erm;
2837 : : Datum datum;
2838 : : bool isNull;
2839 : :
2840 [ - + ]: 22 : Assert(earm != NULL);
6035 tgl@sss.pgh.pa.us 2841 [ - + ]: 22 : Assert(epqstate->origslot != NULL);
2842 : :
445 dgustafsson@postgres 2843 : 22 : erm = earm->rowmark;
2844 : :
2434 andres@anarazel.de 2845 [ - + ]: 22 : if (RowMarkRequiresRowShareLock(erm->markType))
2434 andres@anarazel.de 2846 [ # # ]:UBC 0 : elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2847 : :
2848 : : /* if child rel, must check whether it produced this row */
2434 andres@anarazel.de 2849 [ - + ]:CBC 22 : if (erm->rti != erm->prti)
2850 : : {
2851 : : Oid tableoid;
2852 : :
2434 andres@anarazel.de 2853 :UBC 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2854 : 0 : earm->toidAttNo,
2855 : : &isNull);
2856 : : /* non-locked rels could be on the inside of outer joins */
2857 [ # # ]: 0 : if (isNull)
2858 : 0 : return false;
2859 : :
2860 : 0 : tableoid = DatumGetObjectId(datum);
2861 : :
2862 [ # # ]: 0 : Assert(OidIsValid(erm->relid));
2863 [ # # ]: 0 : if (tableoid != erm->relid)
2864 : : {
2865 : : /* this child is inactive right now */
2866 : 0 : return false;
2867 : : }
2868 : : }
2869 : :
2434 andres@anarazel.de 2870 [ + + ]:CBC 22 : if (erm->markType == ROW_MARK_REFERENCE)
2871 : : {
2872 [ - + ]: 13 : Assert(erm->relation != NULL);
2873 : :
2874 : : /* fetch the tuple's ctid */
2875 : 13 : datum = ExecGetJunkAttribute(epqstate->origslot,
2876 : 13 : earm->ctidAttNo,
2877 : : &isNull);
2878 : : /* non-locked rels could be on the inside of outer joins */
2879 [ - + ]: 13 : if (isNull)
2434 andres@anarazel.de 2880 :UBC 0 : return false;
2881 : :
2882 : : /* fetch requests on foreign tables must be passed to their FDW */
2434 andres@anarazel.de 2883 [ - + ]:CBC 13 : if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2884 : : {
2885 : : FdwRoutine *fdwroutine;
2434 andres@anarazel.de 2886 :UBC 0 : bool updated = false;
2887 : :
2888 : 0 : fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2889 : : /* this should have been checked already, but let's be safe */
2890 [ # # ]: 0 : if (fdwroutine->RefetchForeignRow == NULL)
2891 [ # # ]: 0 : ereport(ERROR,
2892 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2893 : : errmsg("cannot lock rows in foreign table \"%s\"",
2894 : : RelationGetRelationName(erm->relation))));
2895 : :
2896 : 0 : fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2897 : : erm,
2898 : : datum,
2899 : : slot,
2900 : : &updated);
2901 [ # # # # ]: 0 : if (TupIsNull(slot))
2902 [ # # ]: 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2903 : :
2904 : : /*
2905 : : * Ideally we'd insist on updated == false here, but that assumes
2906 : : * that FDWs can track that exactly, which they might not be able
2907 : : * to. So just ignore the flag.
2908 : : */
2909 : 0 : return true;
2910 : : }
2911 : : else
2912 : : {
2913 : : /* ordinary table, fetch the tuple */
2434 andres@anarazel.de 2914 [ - + ]:CBC 13 : if (!table_tuple_fetch_row_version(erm->relation,
2915 : 13 : (ItemPointer) DatumGetPointer(datum),
2916 : : SnapshotAny, slot))
2434 andres@anarazel.de 2917 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2434 andres@anarazel.de 2918 :CBC 13 : return true;
2919 : : }
2920 : : }
2921 : : else
2922 : : {
2923 [ - + ]: 9 : Assert(erm->markType == ROW_MARK_COPY);
2924 : :
2925 : : /* fetch the whole-row Var for the relation */
2926 : 9 : datum = ExecGetJunkAttribute(epqstate->origslot,
2927 : 9 : earm->wholeAttNo,
2928 : : &isNull);
2929 : : /* non-locked rels could be on the inside of outer joins */
2930 [ - + ]: 9 : if (isNull)
2434 andres@anarazel.de 2931 :UBC 0 : return false;
2932 : :
2434 andres@anarazel.de 2933 :CBC 9 : ExecStoreHeapTupleDatum(datum, slot);
2934 : 9 : return true;
2935 : : }
2936 : : }
2937 : :
2938 : : /*
2939 : : * Fetch the next row (if any) from EvalPlanQual testing
2940 : : *
2941 : : * (In practice, there should never be more than one row...)
2942 : : */
2943 : : TupleTableSlot *
6035 tgl@sss.pgh.pa.us 2944 : 190 : EvalPlanQualNext(EPQState *epqstate)
2945 : : {
2946 : : MemoryContext oldcontext;
2947 : : TupleTableSlot *slot;
2948 : :
2434 andres@anarazel.de 2949 : 190 : oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2950 : 190 : slot = ExecProcNode(epqstate->recheckplanstate);
8539 tgl@sss.pgh.pa.us 2951 : 190 : MemoryContextSwitchTo(oldcontext);
2952 : :
6049 2953 : 190 : return slot;
2954 : : }
2955 : :
2956 : : /*
2957 : : * Initialize or reset an EvalPlanQual state tree
2958 : : */
2959 : : void
2434 andres@anarazel.de 2960 : 227 : EvalPlanQualBegin(EPQState *epqstate)
2961 : : {
2962 : 227 : EState *parentestate = epqstate->parentestate;
2963 : 227 : EState *recheckestate = epqstate->recheckestate;
2964 : :
2965 [ + + ]: 227 : if (recheckestate == NULL)
2966 : : {
2967 : : /* First time through, so create a child EState */
2968 : 145 : EvalPlanQualStart(epqstate, epqstate->plan);
2969 : : }
2970 : : else
2971 : : {
2972 : : /*
2973 : : * We already have a suitable child EPQ tree, so just reset it.
2974 : : */
2770 tgl@sss.pgh.pa.us 2975 : 82 : Index rtsize = parentestate->es_range_table_size;
2434 andres@anarazel.de 2976 : 82 : PlanState *rcplanstate = epqstate->recheckplanstate;
2977 : :
2978 : : /*
2979 : : * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
2980 : : * the EPQ run will never attempt to fetch tuples from blocked target
2981 : : * relations.
2982 : : */
1082 tgl@sss.pgh.pa.us 2983 : 82 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2984 : : rtsize * sizeof(bool));
2985 : :
2986 : : /* Recopy current values of parent parameters */
3095 rhaas@postgresql.org 2987 [ + - ]: 82 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2988 : : {
2989 : : int i;
2990 : :
2991 : : /*
2992 : : * Force evaluation of any InitPlan outputs that could be needed
2993 : : * by the subplan, just in case they got reset since
2994 : : * EvalPlanQualStart (see comments therein).
2995 : : */
2434 andres@anarazel.de 2996 : 82 : ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2789 tgl@sss.pgh.pa.us 2997 [ + - ]: 82 : GetPerTupleExprContext(parentestate));
2998 : :
3095 rhaas@postgresql.org 2999 : 82 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3000 : :
6035 tgl@sss.pgh.pa.us 3001 [ + + ]: 175 : while (--i >= 0)
3002 : : {
3003 : : /* copy value if any, but not execPlan link */
2434 andres@anarazel.de 3004 : 93 : recheckestate->es_param_exec_vals[i].value =
6035 tgl@sss.pgh.pa.us 3005 : 93 : parentestate->es_param_exec_vals[i].value;
2434 andres@anarazel.de 3006 : 93 : recheckestate->es_param_exec_vals[i].isnull =
6035 tgl@sss.pgh.pa.us 3007 : 93 : parentestate->es_param_exec_vals[i].isnull;
3008 : : }
3009 : : }
3010 : :
3011 : : /*
3012 : : * Mark child plan tree as needing rescan at all scan nodes. The
3013 : : * first ExecProcNode will take care of actually doing the rescan.
3014 : : */
2434 andres@anarazel.de 3015 : 82 : rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
3016 : : epqstate->epqParam);
3017 : : }
8539 tgl@sss.pgh.pa.us 3018 : 227 : }
3019 : :
3020 : : /*
3021 : : * Start execution of an EvalPlanQual plan tree.
3022 : : *
3023 : : * This is a cut-down version of ExecutorStart(): we copy some state from
3024 : : * the top-level estate rather than initializing it fresh.
3025 : : */
3026 : : static void
2434 andres@anarazel.de 3027 : 145 : EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
3028 : : {
3029 : 145 : EState *parentestate = epqstate->parentestate;
3030 : 145 : Index rtsize = parentestate->es_range_table_size;
3031 : : EState *rcestate;
3032 : : MemoryContext oldcontext;
3033 : : ListCell *l;
3034 : :
3035 : 145 : epqstate->recheckestate = rcestate = CreateExecutorState();
3036 : :
3037 : 145 : oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
3038 : :
3039 : : /* signal that this is an EState for executing EPQ */
3040 : 145 : rcestate->es_epq_active = epqstate;
3041 : :
3042 : : /*
3043 : : * Child EPQ EStates share the parent's copy of unchanging state such as
3044 : : * the snapshot, rangetable, and external Param info. They need their own
3045 : : * copies of local state, including a tuple table, es_param_exec_vals,
3046 : : * result-rel info, etc.
3047 : : */
3048 : 145 : rcestate->es_direction = ForwardScanDirection;
3049 : 145 : rcestate->es_snapshot = parentestate->es_snapshot;
3050 : 145 : rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3051 : 145 : rcestate->es_range_table = parentestate->es_range_table;
3052 : 145 : rcestate->es_range_table_size = parentestate->es_range_table_size;
3053 : 145 : rcestate->es_relations = parentestate->es_relations;
3054 : 145 : rcestate->es_rowmarks = parentestate->es_rowmarks;
1156 tgl@sss.pgh.pa.us 3055 : 145 : rcestate->es_rteperminfos = parentestate->es_rteperminfos;
2434 andres@anarazel.de 3056 : 145 : rcestate->es_plannedstmt = parentestate->es_plannedstmt;
3057 : 145 : rcestate->es_junkFilter = parentestate->es_junkFilter;
3058 : 145 : rcestate->es_output_cid = parentestate->es_output_cid;
1156 tgl@sss.pgh.pa.us 3059 : 145 : rcestate->es_queryEnv = parentestate->es_queryEnv;
3060 : :
3061 : : /*
3062 : : * ResultRelInfos needed by subplans are initialized from scratch when the
3063 : : * subplans themselves are initialized.
3064 : : */
2020 heikki.linnakangas@i 3065 : 145 : rcestate->es_result_relations = NULL;
3066 : : /* es_trig_target_relations must NOT be copied */
2434 andres@anarazel.de 3067 : 145 : rcestate->es_top_eflags = parentestate->es_top_eflags;
3068 : 145 : rcestate->es_instrument = parentestate->es_instrument;
3069 : : /* es_auxmodifytables must NOT be copied */
3070 : :
3071 : : /*
3072 : : * The external param list is simply shared from parent. The internal
3073 : : * param workspace has to be local state, but we copy the initial values
3074 : : * from the parent, so as to have access to any param values that were
3075 : : * already set from other parts of the parent's plan tree.
3076 : : */
3077 : 145 : rcestate->es_param_list_info = parentestate->es_param_list_info;
3095 rhaas@postgresql.org 3078 [ + - ]: 145 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3079 : : {
3080 : : int i;
3081 : :
3082 : : /*
3083 : : * Force evaluation of any InitPlan outputs that could be needed by
3084 : : * the subplan. (With more complexity, maybe we could postpone this
3085 : : * till the subplan actually demands them, but it doesn't seem worth
3086 : : * the trouble; this is a corner case already, since usually the
3087 : : * InitPlans would have been evaluated before reaching EvalPlanQual.)
3088 : : *
3089 : : * This will not touch output params of InitPlans that occur somewhere
3090 : : * within the subplan tree, only those that are attached to the
3091 : : * ModifyTable node or above it and are referenced within the subplan.
3092 : : * That's OK though, because the planner would only attach such
3093 : : * InitPlans to a lower-level SubqueryScan node, and EPQ execution
3094 : : * will not descend into a SubqueryScan.
3095 : : *
3096 : : * The EState's per-output-tuple econtext is sufficiently short-lived
3097 : : * for this, since it should get reset before there is any chance of
3098 : : * doing EvalPlanQual again.
3099 : : */
2789 tgl@sss.pgh.pa.us 3100 : 145 : ExecSetParamPlanMulti(planTree->extParam,
3101 [ + + ]: 145 : GetPerTupleExprContext(parentestate));
3102 : :
3103 : : /* now make the internal param workspace ... */
3095 rhaas@postgresql.org 3104 : 145 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
146 michael@paquier.xyz 3105 :GNC 145 : rcestate->es_param_exec_vals = palloc0_array(ParamExecData, i);
3106 : : /* ... and copy down all values, whether really needed or not */
6035 tgl@sss.pgh.pa.us 3107 [ + + ]:CBC 345 : while (--i >= 0)
3108 : : {
3109 : : /* copy value if any, but not execPlan link */
2434 andres@anarazel.de 3110 : 200 : rcestate->es_param_exec_vals[i].value =
6035 tgl@sss.pgh.pa.us 3111 : 200 : parentestate->es_param_exec_vals[i].value;
2434 andres@anarazel.de 3112 : 200 : rcestate->es_param_exec_vals[i].isnull =
6035 tgl@sss.pgh.pa.us 3113 : 200 : parentestate->es_param_exec_vals[i].isnull;
3114 : : }
3115 : : }
3116 : :
3117 : : /*
3118 : : * Copy es_unpruned_relids so that pruned relations are ignored by
3119 : : * ExecInitLockRows() and ExecInitModifyTable() when initializing the plan
3120 : : * trees below.
3121 : : */
452 amitlan@postgresql.o 3122 : 145 : rcestate->es_unpruned_relids = parentestate->es_unpruned_relids;
3123 : :
3124 : : /*
3125 : : * Also make the PartitionPruneInfo and the results of pruning available.
3126 : : * These need to match exactly so that we initialize all the same Append
3127 : : * and MergeAppend subplans as the parent did.
3128 : : */
228 3129 : 145 : rcestate->es_part_prune_infos = parentestate->es_part_prune_infos;
3130 : 145 : rcestate->es_part_prune_states = parentestate->es_part_prune_states;
3131 : 145 : rcestate->es_part_prune_results = parentestate->es_part_prune_results;
3132 : :
3133 : : /* We'll also borrow the es_partition_directory from the parent state */
201 3134 : 145 : rcestate->es_partition_directory = parentestate->es_partition_directory;
3135 : :
3136 : : /*
3137 : : * Initialize private state information for each SubPlan. We must do this
3138 : : * before running ExecInitNode on the main query tree, since
3139 : : * ExecInitSubPlan expects to be able to find these entries. Some of the
3140 : : * SubPlans might not be used in the part of the plan tree we intend to
3141 : : * run, but since it's not easy to tell which, we just initialize them
3142 : : * all.
3143 : : */
2434 andres@anarazel.de 3144 [ - + ]: 145 : Assert(rcestate->es_subplanstates == NIL);
6035 tgl@sss.pgh.pa.us 3145 [ + + + + : 177 : foreach(l, parentestate->es_plannedstmt->subplans)
+ + ]
3146 : : {
6746 bruce@momjian.us 3147 : 32 : Plan *subplan = (Plan *) lfirst(l);
3148 : : PlanState *subplanstate;
3149 : :
2434 andres@anarazel.de 3150 : 32 : subplanstate = ExecInitNode(subplan, rcestate, 0);
3151 : 32 : rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
3152 : : subplanstate);
3153 : : }
3154 : :
3155 : : /*
3156 : : * Build an RTI indexed array of rowmarks, so that
3157 : : * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
3158 : : * rowmark.
3159 : : */
146 michael@paquier.xyz 3160 :GNC 145 : epqstate->relsubs_rowmark = palloc0_array(ExecAuxRowMark *, rtsize);
2434 andres@anarazel.de 3161 [ + + + + :CBC 162 : foreach(l, epqstate->arowMarks)
+ + ]
3162 : : {
3163 : 17 : ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
3164 : :
3165 : 17 : epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
3166 : : }
3167 : :
3168 : : /*
3169 : : * Initialize per-relation EPQ tuple states. Result relations, if any,
3170 : : * get marked as blocked; others as not-fetched.
3171 : : */
1082 tgl@sss.pgh.pa.us 3172 : 145 : epqstate->relsubs_done = palloc_array(bool, rtsize);
3173 : 145 : epqstate->relsubs_blocked = palloc0_array(bool, rtsize);
3174 : :
3175 [ + + + + : 284 : foreach(l, epqstate->resultRelations)
+ + ]
3176 : : {
3177 : 139 : int rtindex = lfirst_int(l);
3178 : :
3179 [ + - - + ]: 139 : Assert(rtindex > 0 && rtindex <= rtsize);
3180 : 139 : epqstate->relsubs_blocked[rtindex - 1] = true;
3181 : : }
3182 : :
3183 : 145 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
3184 : : rtsize * sizeof(bool));
3185 : :
3186 : : /*
3187 : : * Initialize the private state information for all the nodes in the part
3188 : : * of the plan tree we need to run. This opens files, allocates storage
3189 : : * and leaves us ready to start processing tuples.
3190 : : */
2434 andres@anarazel.de 3191 : 145 : epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
3192 : :
8539 tgl@sss.pgh.pa.us 3193 : 145 : MemoryContextSwitchTo(oldcontext);
3194 : 145 : }
3195 : :
3196 : : /*
3197 : : * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3198 : : * or if we are done with the current EPQ child.
3199 : : *
3200 : : * This is a cut-down version of ExecutorEnd(); basically we want to do most
3201 : : * of the normal cleanup, but *not* close result relations (which we are
3202 : : * just sharing from the outer query). We do, however, have to close any
3203 : : * result and trigger target relations that got opened, since those are not
3204 : : * shared. (There probably shouldn't be any of the latter, but just in
3205 : : * case...)
3206 : : */
3207 : : void
6035 3208 : 224938 : EvalPlanQualEnd(EPQState *epqstate)
3209 : : {
2434 andres@anarazel.de 3210 : 224938 : EState *estate = epqstate->recheckestate;
3211 : : Index rtsize;
3212 : : MemoryContext oldcontext;
3213 : : ListCell *l;
3214 : :
3215 : 224938 : rtsize = epqstate->parentestate->es_range_table_size;
3216 : :
3217 : : /*
3218 : : * We may have a tuple table, even if EPQ wasn't started, because we allow
3219 : : * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
3220 : : */
3221 [ + + ]: 224938 : if (epqstate->tuple_table != NIL)
3222 : : {
3223 : 4806 : memset(epqstate->relsubs_slot, 0,
3224 : : rtsize * sizeof(TupleTableSlot *));
3225 : 4806 : ExecResetTupleTable(epqstate->tuple_table, true);
3226 : 4806 : epqstate->tuple_table = NIL;
3227 : : }
3228 : :
3229 : : /* EPQ wasn't started, nothing further to do */
6035 tgl@sss.pgh.pa.us 3230 [ + + ]: 224938 : if (estate == NULL)
2434 andres@anarazel.de 3231 : 224801 : return;
3232 : :
6035 tgl@sss.pgh.pa.us 3233 : 137 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3234 : :
2434 andres@anarazel.de 3235 : 137 : ExecEndNode(epqstate->recheckplanstate);
3236 : :
6035 tgl@sss.pgh.pa.us 3237 [ + + + + : 166 : foreach(l, estate->es_subplanstates)
+ + ]
3238 : : {
6746 bruce@momjian.us 3239 : 29 : PlanState *subplanstate = (PlanState *) lfirst(l);
3240 : :
7007 tgl@sss.pgh.pa.us 3241 : 29 : ExecEndNode(subplanstate);
3242 : : }
3243 : :
3244 : : /* throw away the per-estate tuple table, some node may have used it */
6035 3245 : 137 : ExecResetTupleTable(estate->es_tupleTable, false);
3246 : :
3247 : : /* Close any result and trigger target relations attached to this EState */
2030 heikki.linnakangas@i 3248 : 137 : ExecCloseResultRelations(estate);
3249 : :
8539 tgl@sss.pgh.pa.us 3250 : 137 : MemoryContextSwitchTo(oldcontext);
3251 : :
3252 : : /*
3253 : : * NULLify the partition directory before freeing the executor state.
3254 : : * Since EvalPlanQualStart() just borrowed the parent EState's directory,
3255 : : * we'd better leave it up to the parent to delete it.
3256 : : */
201 amitlan@postgresql.o 3257 : 137 : estate->es_partition_directory = NULL;
3258 : :
6035 tgl@sss.pgh.pa.us 3259 : 137 : FreeExecutorState(estate);
3260 : :
3261 : : /* Mark EPQState idle */
2289 3262 : 137 : epqstate->origslot = NULL;
2434 andres@anarazel.de 3263 : 137 : epqstate->recheckestate = NULL;
3264 : 137 : epqstate->recheckplanstate = NULL;
2289 tgl@sss.pgh.pa.us 3265 : 137 : epqstate->relsubs_rowmark = NULL;
3266 : 137 : epqstate->relsubs_done = NULL;
1082 3267 : 137 : epqstate->relsubs_blocked = NULL;
3268 : : }
|