Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * trigger.c
4 : : * PostgreSQL TRIGGERs support code.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : * IDENTIFICATION
10 : : * src/backend/commands/trigger.c
11 : : *
12 : : *-------------------------------------------------------------------------
13 : : */
14 : : #include "postgres.h"
15 : :
16 : : #include "access/genam.h"
17 : : #include "access/htup_details.h"
18 : : #include "access/relation.h"
19 : : #include "access/sysattr.h"
20 : : #include "access/table.h"
21 : : #include "access/tableam.h"
22 : : #include "access/xact.h"
23 : : #include "catalog/catalog.h"
24 : : #include "catalog/dependency.h"
25 : : #include "catalog/indexing.h"
26 : : #include "catalog/objectaccess.h"
27 : : #include "catalog/partition.h"
28 : : #include "catalog/pg_constraint.h"
29 : : #include "catalog/pg_inherits.h"
30 : : #include "catalog/pg_proc.h"
31 : : #include "catalog/pg_trigger.h"
32 : : #include "catalog/pg_type.h"
33 : : #include "commands/trigger.h"
34 : : #include "executor/executor.h"
35 : : #include "miscadmin.h"
36 : : #include "nodes/bitmapset.h"
37 : : #include "nodes/makefuncs.h"
38 : : #include "optimizer/optimizer.h"
39 : : #include "parser/parse_clause.h"
40 : : #include "parser/parse_collate.h"
41 : : #include "parser/parse_func.h"
42 : : #include "parser/parse_relation.h"
43 : : #include "partitioning/partdesc.h"
44 : : #include "pgstat.h"
45 : : #include "rewrite/rewriteHandler.h"
46 : : #include "rewrite/rewriteManip.h"
47 : : #include "storage/lmgr.h"
48 : : #include "utils/acl.h"
49 : : #include "utils/builtins.h"
50 : : #include "utils/fmgroids.h"
51 : : #include "utils/guc_hooks.h"
52 : : #include "utils/inval.h"
53 : : #include "utils/lsyscache.h"
54 : : #include "utils/memutils.h"
55 : : #include "utils/plancache.h"
56 : : #include "utils/rel.h"
57 : : #include "utils/snapmgr.h"
58 : : #include "utils/syscache.h"
59 : : #include "utils/tuplestore.h"
60 : :
61 : :
62 : : /* GUC variables */
63 : : int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
64 : :
65 : : /* How many levels deep into trigger execution are we? */
66 : : static int MyTriggerDepth = 0;
67 : :
68 : : /* Local function prototypes */
69 : : static void renametrig_internal(Relation tgrel, Relation targetrel,
70 : : HeapTuple trigtup, const char *newname,
71 : : const char *expected_name);
72 : : static void renametrig_partition(Relation tgrel, Oid partitionId,
73 : : Oid parentTriggerOid, const char *newname,
74 : : const char *expected_name);
75 : : static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
76 : : static bool GetTupleForTrigger(EState *estate,
77 : : EPQState *epqstate,
78 : : ResultRelInfo *relinfo,
79 : : ItemPointer tid,
80 : : LockTupleMode lockmode,
81 : : TupleTableSlot *oldslot,
82 : : bool do_epq_recheck,
83 : : TupleTableSlot **epqslot,
84 : : TM_Result *tmresultp,
85 : : TM_FailureData *tmfdp);
86 : : static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
87 : : Trigger *trigger, TriggerEvent event,
88 : : Bitmapset *modifiedCols,
89 : : TupleTableSlot *oldslot, TupleTableSlot *newslot);
90 : : static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
91 : : int tgindx,
92 : : FmgrInfo *finfo,
93 : : Instrumentation *instr,
94 : : MemoryContext per_tuple_context);
95 : : static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
96 : : ResultRelInfo *src_partinfo,
97 : : ResultRelInfo *dst_partinfo,
98 : : int event, bool row_trigger,
99 : : TupleTableSlot *oldslot, TupleTableSlot *newslot,
100 : : List *recheckIndexes, Bitmapset *modifiedCols,
101 : : TransitionCaptureState *transition_capture,
102 : : bool is_crosspart_update);
103 : : static void AfterTriggerEnlargeQueryState(void);
104 : : static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
105 : : static HeapTuple check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple);
106 : :
107 : :
108 : : /*
109 : : * Create a trigger. Returns the address of the created trigger.
110 : : *
111 : : * queryString is the source text of the CREATE TRIGGER command.
112 : : * This must be supplied if a whenClause is specified, else it can be NULL.
113 : : *
114 : : * relOid, if nonzero, is the relation on which the trigger should be
115 : : * created. If zero, the name provided in the statement will be looked up.
116 : : *
117 : : * refRelOid, if nonzero, is the relation to which the constraint trigger
118 : : * refers. If zero, the constraint relation name provided in the statement
119 : : * will be looked up as needed.
120 : : *
121 : : * constraintOid, if nonzero, says that this trigger is being created
122 : : * internally to implement that constraint. A suitable pg_depend entry will
123 : : * be made to link the trigger to that constraint. constraintOid is zero when
124 : : * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
125 : : * TRIGGER, we build a pg_constraint entry internally.)
126 : : *
127 : : * indexOid, if nonzero, is the OID of an index associated with the constraint.
128 : : * We do nothing with this except store it into pg_trigger.tgconstrindid;
129 : : * but when creating a trigger for a deferrable unique constraint on a
130 : : * partitioned table, its children are looked up. Note we don't cope with
131 : : * invalid indexes in that case.
132 : : *
133 : : * funcoid, if nonzero, is the OID of the function to invoke. When this is
134 : : * given, stmt->funcname is ignored.
135 : : *
136 : : * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
137 : : * if that trigger is dropped, this one should be too. There are two cases
138 : : * when a nonzero value is passed for this: 1) when this function recurses to
139 : : * create the trigger on partitions, 2) when creating child foreign key
140 : : * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
141 : : *
142 : : * If whenClause is passed, it is an already-transformed expression for
143 : : * WHEN. In this case, we ignore any that may come in stmt->whenClause.
144 : : *
145 : : * If isInternal is true then this is an internally-generated trigger.
146 : : * This argument sets the tgisinternal field of the pg_trigger entry, and
147 : : * if true causes us to modify the given trigger name to ensure uniqueness.
148 : : *
149 : : * When isInternal is not true we require ACL_TRIGGER permissions on the
150 : : * relation, as well as ACL_EXECUTE on the trigger function. For internal
151 : : * triggers the caller must apply any required permission checks.
152 : : *
153 : : * When called on partitioned tables, this function recurses to create the
154 : : * trigger on all the partitions, except if isInternal is true, in which
155 : : * case caller is expected to execute recursion on its own. in_partition
156 : : * indicates such a recursive call; outside callers should pass "false"
157 : : * (but see CloneRowTriggersToPartition).
158 : : */
159 : : ObjectAddress
5769 tgl@sss.pgh.pa.us 160 :CBC 7892 : CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
161 : : Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
162 : : Oid funcoid, Oid parentTriggerOid, Node *whenClause,
163 : : bool isInternal, bool in_partition)
164 : : {
165 : : return
1513 alvherre@alvh.no-ip. 166 : 7892 : CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
167 : : constraintOid, indexOid, funcoid,
168 : : parentTriggerOid, whenClause, isInternal,
169 : : in_partition, TRIGGER_FIRES_ON_ORIGIN);
170 : : }
171 : :
172 : : /*
173 : : * Like the above; additionally the firing condition
174 : : * (always/origin/replica/disabled) can be specified.
175 : : */
176 : : ObjectAddress
177 : 8300 : CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
178 : : Oid relOid, Oid refRelOid, Oid constraintOid,
179 : : Oid indexOid, Oid funcoid, Oid parentTriggerOid,
180 : : Node *whenClause, bool isInternal, bool in_partition,
181 : : char trigger_fires_when)
182 : : {
183 : : int16 tgtype;
184 : : int ncolumns;
185 : : int16 *columns;
186 : : int2vector *tgattr;
187 : : List *whenRtable;
188 : : char *qual;
189 : : Datum values[Natts_pg_trigger];
190 : : bool nulls[Natts_pg_trigger];
191 : : Relation rel;
192 : : AclResult aclresult;
193 : : Relation tgrel;
194 : : Relation pgrel;
1757 tgl@sss.pgh.pa.us 195 : 8300 : HeapTuple tuple = NULL;
196 : : Oid funcrettype;
197 : 8300 : Oid trigoid = InvalidOid;
198 : : char internaltrigname[NAMEDATALEN];
199 : : char *trigname;
8374 200 : 8300 : Oid constrrelid = InvalidOid;
201 : : ObjectAddress myself,
202 : : referenced;
3228 kgrittn@postgresql.o 203 : 8300 : char *oldtablename = NULL;
204 : 8300 : char *newtablename = NULL;
205 : : bool partition_recurse;
1757 tgl@sss.pgh.pa.us 206 : 8300 : bool trigger_exists = false;
207 : 8300 : Oid existing_constraint_oid = InvalidOid;
208 : 8300 : bool existing_isInternal = false;
1340 alvherre@alvh.no-ip. 209 : 8300 : bool existing_isClone = false;
210 : :
4219 rhaas@postgresql.org 211 [ + + ]: 8300 : if (OidIsValid(relOid))
2420 andres@anarazel.de 212 : 6704 : rel = table_open(relOid, ShareRowExclusiveLock);
213 : : else
214 : 1596 : rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
215 : :
216 : : /*
217 : : * Triggers must be on tables or views, and there are additional
218 : : * relation-type-specific restrictions.
219 : : */
2724 alvherre@alvh.no-ip. 220 [ + + ]: 8300 : if (rel->rd_rel->relkind == RELKIND_RELATION)
221 : : {
222 : : /* Tables can't have INSTEAD OF triggers */
5445 tgl@sss.pgh.pa.us 223 [ + + ]: 6769 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
224 [ + + ]: 6113 : stmt->timing != TRIGGER_TYPE_AFTER)
225 [ + - ]: 9 : ereport(ERROR,
226 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
227 : : errmsg("\"%s\" is a table",
228 : : RelationGetRelationName(rel)),
229 : : errdetail("Tables cannot have INSTEAD OF triggers.")));
230 : : }
2724 alvherre@alvh.no-ip. 231 [ + + ]: 1531 : else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
232 : : {
233 : : /* Partitioned tables can't have INSTEAD OF triggers */
234 [ + + ]: 1374 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
235 [ + + ]: 1323 : stmt->timing != TRIGGER_TYPE_AFTER)
3195 rhaas@postgresql.org 236 [ + - ]: 3 : ereport(ERROR,
237 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238 : : errmsg("\"%s\" is a table",
239 : : RelationGetRelationName(rel)),
240 : : errdetail("Tables cannot have INSTEAD OF triggers.")));
241 : :
242 : : /*
243 : : * FOR EACH ROW triggers have further restrictions
244 : : */
2724 alvherre@alvh.no-ip. 245 [ + + ]: 1371 : if (stmt->row)
246 : : {
247 : : /*
248 : : * Disallow use of transition tables.
249 : : *
250 : : * Note that we have another restriction about transition tables
251 : : * in partitions; search for 'has_superclass' below for an
252 : : * explanation. The check here is just to protect from the fact
253 : : * that if we allowed it here, the creation would succeed for a
254 : : * partitioned table with no partitions, but would be blocked by
255 : : * the other restriction when the first partition was created,
256 : : * which is very unfriendly behavior.
257 : : */
258 [ + + ]: 1253 : if (stmt->transitionRels != NIL)
259 [ + - ]: 3 : ereport(ERROR,
260 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
261 : : errmsg("\"%s\" is a partitioned table",
262 : : RelationGetRelationName(rel)),
263 : : errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
264 : : }
265 : : }
5445 tgl@sss.pgh.pa.us 266 [ + + ]: 157 : else if (rel->rd_rel->relkind == RELKIND_VIEW)
267 : : {
268 : : /*
269 : : * Views can have INSTEAD OF triggers (which we check below are
270 : : * row-level), or statement-level BEFORE/AFTER triggers.
271 : : */
272 [ + + + + ]: 105 : if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
273 [ + - ]: 18 : ereport(ERROR,
274 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
275 : : errmsg("\"%s\" is a view",
276 : : RelationGetRelationName(rel)),
277 : : errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
278 : : /* Disallow TRUNCATE triggers on VIEWs */
279 [ + + ]: 87 : if (TRIGGER_FOR_TRUNCATE(stmt->events))
280 [ + - ]: 6 : ereport(ERROR,
281 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
282 : : errmsg("\"%s\" is a view",
283 : : RelationGetRelationName(rel)),
284 : : errdetail("Views cannot have TRUNCATE triggers.")));
285 : : }
4185 noah@leadboat.com 286 [ + - ]: 52 : else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
287 : : {
288 [ + + ]: 52 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
289 [ - + ]: 27 : stmt->timing != TRIGGER_TYPE_AFTER)
4185 noah@leadboat.com 290 [ # # ]:UBC 0 : ereport(ERROR,
291 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
292 : : errmsg("\"%s\" is a foreign table",
293 : : RelationGetRelationName(rel)),
294 : : errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
295 : :
296 : : /*
297 : : * We disallow constraint triggers to protect the assumption that
298 : : * triggers on FKs can't be deferred. See notes with AfterTriggers
299 : : * data structures, below.
300 : : */
4185 noah@leadboat.com 301 [ + + ]:CBC 52 : if (stmt->isconstraint)
302 [ + - ]: 3 : ereport(ERROR,
303 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
304 : : errmsg("\"%s\" is a foreign table",
305 : : RelationGetRelationName(rel)),
306 : : errdetail("Foreign tables cannot have constraint triggers.")));
307 : : }
308 : : else
8084 tgl@sss.pgh.pa.us 309 [ # # ]:UBC 0 : ereport(ERROR,
310 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
311 : : errmsg("relation \"%s\" cannot have triggers",
312 : : RelationGetRelationName(rel)),
313 : : errdetail_relkind_not_supported(rel->rd_rel->relkind)));
314 : :
8548 tgl@sss.pgh.pa.us 315 [ + + + + ]:CBC 8258 : if (!allowSystemTableMods && IsSystemRelation(rel))
8084 316 [ + - ]: 1 : ereport(ERROR,
317 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
318 : : errmsg("permission denied: \"%s\" is a system catalog",
319 : : RelationGetRelationName(rel))));
320 : :
4219 rhaas@postgresql.org 321 [ + + ]: 8257 : if (stmt->isconstraint)
322 : : {
323 : : /*
324 : : * We must take a lock on the target relation to protect against
325 : : * concurrent drop. It's not clear that AccessShareLock is strong
326 : : * enough, but we certainly need at least that much... otherwise, we
327 : : * might end up creating a pg_constraint entry referencing a
328 : : * nonexistent table.
329 : : */
330 [ + + ]: 6375 : if (OidIsValid(refRelOid))
331 : : {
332 : 6239 : LockRelationOid(refRelOid, AccessShareLock);
333 : 6239 : constrrelid = refRelOid;
334 : : }
335 [ + + ]: 136 : else if (stmt->constrrel != NULL)
336 : 12 : constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
337 : : false);
338 : : }
339 : :
340 : : /* permission checks */
5711 tgl@sss.pgh.pa.us 341 [ + + ]: 8257 : if (!isInternal)
342 : : {
8323 bruce@momjian.us 343 : 1961 : aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
344 : : ACL_TRIGGER);
8420 peter_e@gmx.net 345 [ - + ]: 1961 : if (aclresult != ACLCHECK_OK)
2835 peter_e@gmx.net 346 :UBC 0 : aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
8072 tgl@sss.pgh.pa.us 347 : 0 : RelationGetRelationName(rel));
348 : :
6071 tgl@sss.pgh.pa.us 349 [ + + ]:CBC 1961 : if (OidIsValid(constrrelid))
350 : : {
8323 bruce@momjian.us 351 : 21 : aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
352 : : ACL_TRIGGER);
8420 peter_e@gmx.net 353 [ - + ]: 21 : if (aclresult != ACLCHECK_OK)
2835 peter_e@gmx.net 354 :UBC 0 : aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
8072 tgl@sss.pgh.pa.us 355 : 0 : get_rel_name(constrrelid));
356 : : }
357 : : }
358 : :
359 : : /*
360 : : * When called on a partitioned table to create a FOR EACH ROW trigger
361 : : * that's not internal, we create one trigger for each partition, too.
362 : : *
363 : : * For that, we'd better hold lock on all of them ahead of time.
364 : : */
2724 alvherre@alvh.no-ip. 365 [ + + + + ]:CBC 9710 : partition_recurse = !isInternal && stmt->row &&
366 [ + + ]: 1453 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
367 [ + + ]: 8257 : if (partition_recurse)
368 : 199 : list_free(find_all_inheritors(RelationGetRelid(rel),
369 : : ShareRowExclusiveLock, NULL));
370 : :
371 : : /* Compute tgtype */
10226 bruce@momjian.us 372 : 8257 : TRIGGER_CLEAR_TYPE(tgtype);
373 [ + + ]: 8257 : if (stmt->row)
374 : 7749 : TRIGGER_SETT_ROW(tgtype);
5445 tgl@sss.pgh.pa.us 375 : 8257 : tgtype |= stmt->timing;
5924 376 : 8257 : tgtype |= stmt->events;
377 : :
378 : : /* Disallow ROW-level TRUNCATE triggers */
379 [ + + - + ]: 8257 : if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
5924 tgl@sss.pgh.pa.us 380 [ # # ]:UBC 0 : ereport(ERROR,
381 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
382 : : errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
383 : :
384 : : /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
5445 tgl@sss.pgh.pa.us 385 [ + + ]:CBC 8257 : if (TRIGGER_FOR_INSTEAD(tgtype))
386 : : {
387 [ + + ]: 60 : if (!TRIGGER_FOR_ROW(tgtype))
388 [ + - ]: 3 : ereport(ERROR,
389 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
390 : : errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
391 [ + + ]: 57 : if (stmt->whenClause)
392 [ + - ]: 3 : ereport(ERROR,
393 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
394 : : errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
395 [ + + ]: 54 : if (stmt->columns != NIL)
396 [ + - ]: 3 : ereport(ERROR,
397 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
398 : : errmsg("INSTEAD OF triggers cannot have column lists")));
399 : : }
400 : :
401 : : /*
402 : : * We don't yet support naming ROW transition variables, but the parser
403 : : * recognizes the syntax so we can give a nicer message here.
404 : : *
405 : : * Per standard, REFERENCING TABLE names are only allowed on AFTER
406 : : * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
407 : : * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
408 : : * only allowed once. Per standard, OLD may not be specified when
409 : : * creating a trigger only for INSERT, and NEW may not be specified when
410 : : * creating a trigger only for DELETE.
411 : : *
412 : : * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
413 : : * reference both ROW and TABLE transition data.
414 : : */
3228 kgrittn@postgresql.o 415 [ + + ]: 8248 : if (stmt->transitionRels != NIL)
416 : : {
417 : 229 : List *varList = stmt->transitionRels;
418 : : ListCell *lc;
419 : :
420 [ + - + + : 499 : foreach(lc, varList)
+ + ]
421 : : {
3034 bruce@momjian.us 422 : 294 : TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
423 : :
3228 kgrittn@postgresql.o 424 [ - + ]: 294 : if (!(tt->isTable))
3228 kgrittn@postgresql.o 425 [ # # ]:UBC 0 : ereport(ERROR,
426 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
427 : : errmsg("ROW variable naming in the REFERENCING clause is not supported"),
428 : : errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
429 : :
430 : : /*
431 : : * Because of the above test, we omit further ROW-related testing
432 : : * below. If we later allow naming OLD and NEW ROW variables,
433 : : * adjustments will be needed below.
434 : : */
435 : :
3042 rhaas@postgresql.org 436 [ + + ]:CBC 294 : if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
437 [ + - ]: 3 : ereport(ERROR,
438 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
439 : : errmsg("\"%s\" is a foreign table",
440 : : RelationGetRelationName(rel)),
441 : : errdetail("Triggers on foreign tables cannot have transition tables.")));
442 : :
443 [ + + ]: 291 : if (rel->rd_rel->relkind == RELKIND_VIEW)
444 [ + - ]: 3 : ereport(ERROR,
445 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
446 : : errmsg("\"%s\" is a view",
447 : : RelationGetRelationName(rel)),
448 : : errdetail("Triggers on views cannot have transition tables.")));
449 : :
450 : : /*
451 : : * We currently don't allow row-level triggers with transition
452 : : * tables on partition or inheritance children. Such triggers
453 : : * would somehow need to see tuples converted to the format of the
454 : : * table they're attached to, and it's not clear which subset of
455 : : * tuples each child should see. See also the prohibitions in
456 : : * ATExecAttachPartition() and ATExecAddInherit().
457 : : */
2992 rhodiumtoad@postgres 458 [ + + + + ]: 288 : if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
459 : : {
460 : : /* Use appropriate error message. */
461 [ + + ]: 6 : if (rel->rd_rel->relispartition)
462 [ + - ]: 3 : ereport(ERROR,
463 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
464 : : errmsg("ROW triggers with transition tables are not supported on partitions")));
465 : : else
466 [ + - ]: 3 : ereport(ERROR,
467 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
468 : : errmsg("ROW triggers with transition tables are not supported on inheritance children")));
469 : : }
470 : :
3228 kgrittn@postgresql.o 471 [ - + ]: 282 : if (stmt->timing != TRIGGER_TYPE_AFTER)
3228 kgrittn@postgresql.o 472 [ # # ]:UBC 0 : ereport(ERROR,
473 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
474 : : errmsg("transition table name can only be specified for an AFTER trigger")));
475 : :
3042 rhaas@postgresql.org 476 [ + + ]:CBC 282 : if (TRIGGER_FOR_TRUNCATE(tgtype))
477 [ + - ]: 3 : ereport(ERROR,
478 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
479 : : errmsg("TRUNCATE triggers with transition tables are not supported")));
480 : :
481 : : /*
482 : : * We currently don't allow multi-event triggers ("INSERT OR
483 : : * UPDATE") with transition tables, because it's not clear how to
484 : : * handle INSERT ... ON CONFLICT statements which can fire both
485 : : * INSERT and UPDATE triggers. We show the inserted tuples to
486 : : * INSERT triggers and the updated tuples to UPDATE triggers, but
487 : : * it's not yet clear what INSERT OR UPDATE trigger should see.
488 : : * This restriction could be lifted if we can decide on the right
489 : : * semantics in a later release.
490 : : */
2992 rhodiumtoad@postgres 491 : 279 : if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
492 : 279 : (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
493 [ + + ]: 279 : (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
494 [ + - ]: 3 : ereport(ERROR,
495 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
496 : : errmsg("transition tables cannot be specified for triggers with more than one event")));
497 : :
498 : : /*
499 : : * We currently don't allow column-specific triggers with
500 : : * transition tables. Per spec, that seems to require
501 : : * accumulating separate transition tables for each combination of
502 : : * columns, which is a lot of work for a rather marginal feature.
503 : : */
2912 tgl@sss.pgh.pa.us 504 [ + + ]: 276 : if (stmt->columns != NIL)
505 [ + - ]: 3 : ereport(ERROR,
506 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
507 : : errmsg("transition tables cannot be specified for triggers with column lists")));
508 : :
509 : : /*
510 : : * We disallow constraint triggers with transition tables, to
511 : : * protect the assumption that such triggers can't be deferred.
512 : : * See notes with AfterTriggers data structures, below.
513 : : *
514 : : * Currently this is enforced by the grammar, so just Assert here.
515 : : */
516 [ - + ]: 273 : Assert(!stmt->isconstraint);
517 : :
3228 kgrittn@postgresql.o 518 [ + + ]: 273 : if (tt->isNew)
519 : : {
520 [ + + ]: 144 : if (!(TRIGGER_FOR_INSERT(tgtype) ||
521 [ - + ]: 78 : TRIGGER_FOR_UPDATE(tgtype)))
3228 kgrittn@postgresql.o 522 [ # # ]:UBC 0 : ereport(ERROR,
523 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
524 : : errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
525 : :
3228 kgrittn@postgresql.o 526 [ - + ]:CBC 144 : if (newtablename != NULL)
3228 kgrittn@postgresql.o 527 [ # # ]:UBC 0 : ereport(ERROR,
528 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
529 : : errmsg("NEW TABLE cannot be specified multiple times")));
530 : :
3228 kgrittn@postgresql.o 531 :CBC 144 : newtablename = tt->name;
532 : : }
533 : : else
534 : : {
535 [ + + ]: 129 : if (!(TRIGGER_FOR_DELETE(tgtype) ||
536 [ + + ]: 75 : TRIGGER_FOR_UPDATE(tgtype)))
537 [ + - ]: 3 : ereport(ERROR,
538 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
539 : : errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
540 : :
541 [ - + ]: 126 : if (oldtablename != NULL)
3228 kgrittn@postgresql.o 542 [ # # ]:UBC 0 : ereport(ERROR,
543 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
544 : : errmsg("OLD TABLE cannot be specified multiple times")));
545 : :
3228 kgrittn@postgresql.o 546 :CBC 126 : oldtablename = tt->name;
547 : : }
548 : : }
549 : :
550 [ + + + + ]: 205 : if (newtablename != NULL && oldtablename != NULL &&
551 [ - + ]: 65 : strcmp(newtablename, oldtablename) == 0)
3228 kgrittn@postgresql.o 552 [ # # ]:UBC 0 : ereport(ERROR,
553 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
554 : : errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
555 : : }
556 : :
557 : : /*
558 : : * Parse the WHEN clause, if any and we weren't passed an already
559 : : * transformed one.
560 : : *
561 : : * Note that as a side effect, we fill whenRtable when parsing. If we got
562 : : * an already parsed clause, this does not occur, which is what we want --
563 : : * no point in adding redundant dependencies below.
564 : : */
2724 alvherre@alvh.no-ip. 565 [ + + + + ]:CBC 8224 : if (!whenClause && stmt->whenClause)
5769 tgl@sss.pgh.pa.us 566 : 68 : {
567 : : ParseState *pstate;
568 : : ParseNamespaceItem *nsitem;
569 : : List *varList;
570 : : ListCell *lc;
571 : :
572 : : /* Set up a pstate to parse with */
573 : 92 : pstate = make_parsestate(NULL);
574 : 92 : pstate->p_sourcetext = queryString;
575 : :
576 : : /*
577 : : * Set up nsitems for OLD and NEW references.
578 : : *
579 : : * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
580 : : */
2074 581 : 92 : nsitem = addRangeTableEntryForRelation(pstate, rel,
582 : : AccessShareLock,
583 : : makeAlias("old", NIL),
584 : : false, false);
585 : 92 : addNSItemToQuery(pstate, nsitem, false, true, true);
586 : 92 : nsitem = addRangeTableEntryForRelation(pstate, rel,
587 : : AccessShareLock,
588 : : makeAlias("new", NIL),
589 : : false, false);
590 : 92 : addNSItemToQuery(pstate, nsitem, false, true, true);
591 : :
592 : : /* Transform expression. Copy to be sure we don't modify original */
5769 593 : 92 : whenClause = transformWhereClause(pstate,
594 : 92 : copyObject(stmt->whenClause),
595 : : EXPR_KIND_TRIGGER_WHEN,
596 : : "WHEN");
597 : : /* we have to fix its collations too */
5266 598 : 92 : assign_expr_collations(pstate, whenClause);
599 : :
600 : : /*
601 : : * Check for disallowed references to OLD/NEW.
602 : : *
603 : : * NB: pull_var_clause is okay here only because we don't allow
604 : : * subselects in WHEN clauses; it would fail to examine the contents
605 : : * of subselects.
606 : : */
3467 607 : 92 : varList = pull_var_clause(whenClause, 0);
5769 608 [ + + + + : 182 : foreach(lc, varList)
+ + ]
609 : : {
610 : 114 : Var *var = (Var *) lfirst(lc);
611 : :
612 [ + + - ]: 114 : switch (var->varno)
613 : : {
614 : 43 : case PRS2_OLD_VARNO:
615 [ + + ]: 43 : if (!TRIGGER_FOR_ROW(tgtype))
616 [ + - ]: 3 : ereport(ERROR,
617 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
618 : : errmsg("statement trigger's WHEN condition cannot reference column values"),
619 : : parser_errposition(pstate, var->location)));
620 [ + + ]: 40 : if (TRIGGER_FOR_INSERT(tgtype))
621 [ + - ]: 3 : ereport(ERROR,
622 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
623 : : errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
624 : : parser_errposition(pstate, var->location)));
625 : : /* system columns are okay here */
626 : 37 : break;
627 : 71 : case PRS2_NEW_VARNO:
628 [ - + ]: 71 : if (!TRIGGER_FOR_ROW(tgtype))
5769 tgl@sss.pgh.pa.us 629 [ # # ]:UBC 0 : ereport(ERROR,
630 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
631 : : errmsg("statement trigger's WHEN condition cannot reference column values"),
632 : : parser_errposition(pstate, var->location)));
5769 tgl@sss.pgh.pa.us 633 [ + + ]:CBC 71 : if (TRIGGER_FOR_DELETE(tgtype))
634 [ + - ]: 3 : ereport(ERROR,
635 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
636 : : errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
637 : : parser_errposition(pstate, var->location)));
638 [ + + + + ]: 68 : if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
639 [ + - ]: 3 : ereport(ERROR,
640 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
641 : : errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
642 : : parser_errposition(pstate, var->location)));
2352 peter@eisentraut.org 643 [ + + ]: 65 : if (TRIGGER_FOR_BEFORE(tgtype) &&
644 [ + + ]: 26 : var->varattno == 0 &&
645 [ + + ]: 9 : RelationGetDescr(rel)->constr &&
211 646 [ + + ]: 6 : (RelationGetDescr(rel)->constr->has_generated_stored ||
647 [ + - ]: 3 : RelationGetDescr(rel)->constr->has_generated_virtual))
2352 648 [ + - ]: 6 : ereport(ERROR,
649 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
650 : : errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
651 : : errdetail("A whole-row reference is used and the table contains generated columns."),
652 : : parser_errposition(pstate, var->location)));
653 [ + + ]: 59 : if (TRIGGER_FOR_BEFORE(tgtype) &&
654 [ + + ]: 20 : var->varattno > 0 &&
655 [ + + ]: 17 : TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
656 [ + - ]: 6 : ereport(ERROR,
657 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
658 : : errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
659 : : errdetail("Column \"%s\" is a generated column.",
660 : : NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
661 : : parser_errposition(pstate, var->location)));
5769 tgl@sss.pgh.pa.us 662 : 53 : break;
5769 tgl@sss.pgh.pa.us 663 :UBC 0 : default:
664 : : /* can't happen without add_missing_from, so just elog */
665 [ # # ]: 0 : elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
666 : : break;
667 : : }
668 : : }
669 : :
670 : : /* we'll need the rtable for recordDependencyOnExpr */
5769 tgl@sss.pgh.pa.us 671 :CBC 68 : whenRtable = pstate->p_rtable;
672 : :
673 : 68 : qual = nodeToString(whenClause);
674 : :
675 : 68 : free_parsestate(pstate);
676 : : }
2724 alvherre@alvh.no-ip. 677 [ + + ]: 8132 : else if (!whenClause)
678 : : {
5769 tgl@sss.pgh.pa.us 679 : 8111 : whenClause = NULL;
680 : 8111 : whenRtable = NIL;
681 : 8111 : qual = NULL;
682 : : }
683 : : else
684 : : {
2724 alvherre@alvh.no-ip. 685 : 21 : qual = nodeToString(whenClause);
686 : 21 : whenRtable = NIL;
687 : : }
688 : :
689 : : /*
690 : : * Find and validate the trigger function.
691 : : */
692 [ + + ]: 8200 : if (!OidIsValid(funcoid))
2125 693 : 7792 : funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
4944 tgl@sss.pgh.pa.us 694 [ + + ]: 8200 : if (!isInternal)
695 : : {
1028 peter@eisentraut.org 696 : 1904 : aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
4944 tgl@sss.pgh.pa.us 697 [ - + ]: 1904 : if (aclresult != ACLCHECK_OK)
2835 peter_e@gmx.net 698 :UBC 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
4944 tgl@sss.pgh.pa.us 699 : 0 : NameListToString(stmt->funcname));
700 : : }
6516 tgl@sss.pgh.pa.us 701 :CBC 8200 : funcrettype = get_func_rettype(funcoid);
702 [ - + ]: 8200 : if (funcrettype != TRIGGEROID)
2011 tgl@sss.pgh.pa.us 703 [ # # ]:UBC 0 : ereport(ERROR,
704 : : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
705 : : errmsg("function %s must return type %s",
706 : : NameListToString(stmt->funcname), "trigger")));
707 : :
708 : : /*
709 : : * Scan pg_trigger to see if there is already a trigger of the same name.
710 : : * Skip this for internally generated triggers, since we'll modify the
711 : : * name to be unique below.
712 : : *
713 : : * NOTE that this is cool only because we have ShareRowExclusiveLock on
714 : : * the relation, so the trigger set won't be changing underneath us.
715 : : */
1757 tgl@sss.pgh.pa.us 716 :CBC 8200 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
717 [ + + ]: 8200 : if (!isInternal)
718 : : {
719 : : ScanKeyData skeys[2];
720 : : SysScanDesc tgscan;
721 : :
722 : 1904 : ScanKeyInit(&skeys[0],
723 : : Anum_pg_trigger_tgrelid,
724 : : BTEqualStrategyNumber, F_OIDEQ,
725 : : ObjectIdGetDatum(RelationGetRelid(rel)));
726 : :
727 : 1904 : ScanKeyInit(&skeys[1],
728 : : Anum_pg_trigger_tgname,
729 : : BTEqualStrategyNumber, F_NAMEEQ,
730 : 1904 : CStringGetDatum(stmt->trigname));
731 : :
732 : 1904 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
733 : : NULL, 2, skeys);
734 : :
735 : : /* There should be at most one matching tuple */
736 [ + + ]: 1904 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
737 : : {
738 : 51 : Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
739 : :
740 : 51 : trigoid = oldtrigger->oid;
741 : 51 : existing_constraint_oid = oldtrigger->tgconstraint;
742 : 51 : existing_isInternal = oldtrigger->tgisinternal;
1340 alvherre@alvh.no-ip. 743 : 51 : existing_isClone = OidIsValid(oldtrigger->tgparentid);
1757 tgl@sss.pgh.pa.us 744 : 51 : trigger_exists = true;
745 : : /* copy the tuple to use in CatalogTupleUpdate() */
746 : 51 : tuple = heap_copytuple(tuple);
747 : : }
748 : 1904 : systable_endscan(tgscan);
749 : : }
750 : :
751 [ + + ]: 8200 : if (!trigger_exists)
752 : : {
753 : : /* Generate the OID for the new trigger. */
754 : 8149 : trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
755 : : Anum_pg_trigger_oid);
756 : : }
757 : : else
758 : : {
759 : : /*
760 : : * If OR REPLACE was specified, we'll replace the old trigger;
761 : : * otherwise complain about the duplicate name.
762 : : */
763 [ + + ]: 51 : if (!stmt->replace)
764 [ + - ]: 9 : ereport(ERROR,
765 : : (errcode(ERRCODE_DUPLICATE_OBJECT),
766 : : errmsg("trigger \"%s\" for relation \"%s\" already exists",
767 : : stmt->trigname, RelationGetRelationName(rel))));
768 : :
769 : : /*
770 : : * An internal trigger or a child trigger (isClone) cannot be replaced
771 : : * by a user-defined trigger. However, skip this test when
772 : : * in_partition, because then we're recursing from a partitioned table
773 : : * and the check was made at the parent level.
774 : : */
1340 alvherre@alvh.no-ip. 775 [ + - + + ]: 42 : if ((existing_isInternal || existing_isClone) &&
776 [ + - + + ]: 30 : !isInternal && !in_partition)
1757 tgl@sss.pgh.pa.us 777 [ + - ]: 3 : ereport(ERROR,
778 : : (errcode(ERRCODE_DUPLICATE_OBJECT),
779 : : errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
780 : : stmt->trigname, RelationGetRelationName(rel))));
781 : :
782 : : /*
783 : : * It is not allowed to replace with a constraint trigger; gram.y
784 : : * should have enforced this already.
785 : : */
786 [ - + ]: 39 : Assert(!stmt->isconstraint);
787 : :
788 : : /*
789 : : * It is not allowed to replace an existing constraint trigger,
790 : : * either. (The reason for these restrictions is partly that it seems
791 : : * difficult to deal with pending trigger events in such cases, and
792 : : * partly that the command might imply changing the constraint's
793 : : * properties as well, which doesn't seem nice.)
794 : : */
795 [ - + ]: 39 : if (OidIsValid(existing_constraint_oid))
1757 tgl@sss.pgh.pa.us 796 [ # # ]:UBC 0 : ereport(ERROR,
797 : : (errcode(ERRCODE_DUPLICATE_OBJECT),
798 : : errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
799 : : stmt->trigname, RelationGetRelationName(rel))));
800 : : }
801 : :
802 : : /*
803 : : * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
804 : : * corresponding pg_constraint entry.
805 : : */
5711 tgl@sss.pgh.pa.us 806 [ + + + + ]:CBC 8188 : if (stmt->isconstraint && !OidIsValid(constraintOid))
807 : : {
808 : : /* Internal callers should have made their own constraints */
809 [ - + ]: 79 : Assert(!isInternal);
810 : 79 : constraintOid = CreateConstraintEntry(stmt->trigname,
811 : 79 : RelationGetNamespace(rel),
812 : : CONSTRAINT_TRIGGER,
813 : 79 : stmt->deferrable,
814 : 79 : stmt->initdeferred,
815 : : true, /* Is Enforced */
816 : : true,
817 : : InvalidOid, /* no parent */
818 : : RelationGetRelid(rel),
819 : : NULL, /* no conkey */
820 : : 0,
821 : : 0,
822 : : InvalidOid, /* no domain */
823 : : InvalidOid, /* no index */
824 : : InvalidOid, /* no foreign key */
825 : : NULL,
826 : : NULL,
827 : : NULL,
828 : : NULL,
829 : : 0,
830 : : ' ',
831 : : ' ',
832 : : NULL,
833 : : 0,
834 : : ' ',
835 : : NULL, /* no exclusion */
836 : : NULL, /* no check constraint */
837 : : NULL,
838 : : true, /* islocal */
839 : : 0, /* inhcount */
840 : : true, /* noinherit */
841 : : false, /* conperiod */
842 : : isInternal); /* is_internal */
843 : : }
844 : :
845 : : /*
846 : : * If trigger is internally generated, modify the provided trigger name to
847 : : * ensure uniqueness by appending the trigger OID. (Callers will usually
848 : : * supply a simple constant trigger name in these cases.)
849 : : */
850 [ + + ]: 8188 : if (isInternal)
851 : : {
852 : 6296 : snprintf(internaltrigname, sizeof(internaltrigname),
853 : : "%s_%u", stmt->trigname, trigoid);
854 : 6296 : trigname = internaltrigname;
855 : : }
856 : : else
857 : : {
858 : : /* user-defined trigger; use the specified trigger name as-is */
6516 859 : 1892 : trigname = stmt->trigname;
860 : : }
861 : :
862 : : /*
863 : : * Build the new pg_trigger tuple.
864 : : */
6152 865 : 8188 : memset(nulls, false, sizeof(nulls));
866 : :
2482 andres@anarazel.de 867 : 8188 : values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
9880 bruce@momjian.us 868 : 8188 : values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
2018 alvherre@alvh.no-ip. 869 : 8188 : values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
9165 tgl@sss.pgh.pa.us 870 : 8188 : values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
871 : : CStringGetDatum(trigname));
9196 872 : 8188 : values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
10226 bruce@momjian.us 873 : 8188 : values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
29 peter@eisentraut.org 874 :GNC 8188 : values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(trigger_fires_when);
1340 alvherre@alvh.no-ip. 875 :CBC 8188 : values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
9196 tgl@sss.pgh.pa.us 876 : 8188 : values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
5884 877 : 8188 : values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
6779 878 : 8188 : values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
9196 879 : 8188 : values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
880 : 8188 : values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
881 : :
10226 bruce@momjian.us 882 [ + + ]: 8188 : if (stmt->args)
883 : : {
884 : : ListCell *le;
885 : : char *args;
7773 neilc@samurai.com 886 : 228 : int16 nargs = list_length(stmt->args);
10225 bruce@momjian.us 887 : 228 : int len = 0;
888 : :
10226 889 [ + - + + : 546 : foreach(le, stmt->args)
+ + ]
890 : : {
8413 tgl@sss.pgh.pa.us 891 : 318 : char *ar = strVal(lfirst(le));
892 : :
9196 893 : 318 : len += strlen(ar) + 4;
10175 vadim4o@yahoo.com 894 [ + + ]: 2682 : for (; *ar; ar++)
895 : : {
10201 896 [ - + ]: 2364 : if (*ar == '\\')
10201 vadim4o@yahoo.com 897 :UBC 0 : len++;
898 : : }
899 : : }
10226 bruce@momjian.us 900 :CBC 228 : args = (char *) palloc(len + 1);
9157 tgl@sss.pgh.pa.us 901 : 228 : args[0] = '\0';
10226 bruce@momjian.us 902 [ + - + + : 546 : foreach(le, stmt->args)
+ + ]
903 : : {
8413 tgl@sss.pgh.pa.us 904 : 318 : char *s = strVal(lfirst(le));
10175 vadim4o@yahoo.com 905 : 318 : char *d = args + strlen(args);
906 : :
10201 907 [ + + ]: 2682 : while (*s)
908 : : {
909 [ - + ]: 2364 : if (*s == '\\')
10201 vadim4o@yahoo.com 910 :UBC 0 : *d++ = '\\';
10201 vadim4o@yahoo.com 911 :CBC 2364 : *d++ = *s++;
912 : : }
9157 tgl@sss.pgh.pa.us 913 : 318 : strcpy(d, "\\000");
914 : : }
10226 bruce@momjian.us 915 : 228 : values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
9170 tgl@sss.pgh.pa.us 916 : 228 : values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
917 : : CStringGetDatum(args));
918 : : }
919 : : else
920 : : {
10226 bruce@momjian.us 921 : 7960 : values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
9170 tgl@sss.pgh.pa.us 922 : 7960 : values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
923 : : CStringGetDatum(""));
924 : : }
925 : :
926 : : /* build column number array if it's a column-specific trigger */
5806 927 : 8188 : ncolumns = list_length(stmt->columns);
928 [ + + ]: 8188 : if (ncolumns == 0)
929 : 8135 : columns = NULL;
930 : : else
931 : : {
932 : : ListCell *cell;
933 : 53 : int i = 0;
934 : :
4821 peter_e@gmx.net 935 : 53 : columns = (int16 *) palloc(ncolumns * sizeof(int16));
5806 tgl@sss.pgh.pa.us 936 [ + - + + : 110 : foreach(cell, stmt->columns)
+ + ]
937 : : {
5671 bruce@momjian.us 938 : 60 : char *name = strVal(lfirst(cell));
939 : : int16 attnum;
940 : : int j;
941 : :
942 : : /* Lookup column name. System columns are not allowed */
5806 tgl@sss.pgh.pa.us 943 : 60 : attnum = attnameAttNum(rel, name, false);
944 [ - + ]: 60 : if (attnum == InvalidAttrNumber)
5806 tgl@sss.pgh.pa.us 945 [ # # ]:UBC 0 : ereport(ERROR,
946 : : (errcode(ERRCODE_UNDEFINED_COLUMN),
947 : : errmsg("column \"%s\" of relation \"%s\" does not exist",
948 : : name, RelationGetRelationName(rel))));
949 : :
950 : : /* Check for duplicates */
5806 tgl@sss.pgh.pa.us 951 [ + + ]:CBC 64 : for (j = i - 1; j >= 0; j--)
952 : : {
953 [ + + ]: 7 : if (columns[j] == attnum)
954 [ + - ]: 3 : ereport(ERROR,
955 : : (errcode(ERRCODE_DUPLICATE_COLUMN),
956 : : errmsg("column \"%s\" specified more than once",
957 : : name)));
958 : : }
959 : :
960 : 57 : columns[i++] = attnum;
961 : : }
962 : : }
963 : 8185 : tgattr = buildint2vector(columns, ncolumns);
10226 bruce@momjian.us 964 : 8185 : values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
965 : :
966 : : /* set tgqual if trigger has WHEN clause */
5769 tgl@sss.pgh.pa.us 967 [ + + ]: 8185 : if (qual)
968 : 89 : values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
969 : : else
970 : 8096 : nulls[Anum_pg_trigger_tgqual - 1] = true;
971 : :
3228 kgrittn@postgresql.o 972 [ + + ]: 8185 : if (oldtablename)
973 : 126 : values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
974 : : CStringGetDatum(oldtablename));
975 : : else
976 : 8059 : nulls[Anum_pg_trigger_tgoldtable - 1] = true;
977 [ + + ]: 8185 : if (newtablename)
978 : 144 : values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
979 : : CStringGetDatum(newtablename));
980 : : else
981 : 8041 : nulls[Anum_pg_trigger_tgnewtable - 1] = true;
982 : :
983 : : /*
984 : : * Insert or replace tuple in pg_trigger.
985 : : */
1757 tgl@sss.pgh.pa.us 986 [ + + ]: 8185 : if (!trigger_exists)
987 : : {
988 : 8146 : tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
989 : 8146 : CatalogTupleInsert(tgrel, tuple);
990 : : }
991 : : else
992 : : {
993 : : HeapTuple newtup;
994 : :
995 : 39 : newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
996 : 39 : CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
997 : 39 : heap_freetuple(newtup);
998 : : }
999 : :
1000 : 8185 : heap_freetuple(tuple); /* free either original or new tuple */
2420 andres@anarazel.de 1001 : 8185 : table_close(tgrel, RowExclusiveLock);
1002 : :
10226 bruce@momjian.us 1003 : 8185 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1004 : 8185 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
5806 tgl@sss.pgh.pa.us 1005 : 8185 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
3228 kgrittn@postgresql.o 1006 [ + + ]: 8185 : if (oldtablename)
1007 : 126 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1008 [ + + ]: 8185 : if (newtablename)
1009 : 144 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1010 : :
1011 : : /*
1012 : : * Update relation's pg_class entry; if necessary; and if not, send an SI
1013 : : * message to make other backends (and this one) rebuild relcache entries.
1014 : : */
2420 andres@anarazel.de 1015 : 8185 : pgrel = table_open(RelationRelationId, RowExclusiveLock);
5683 rhaas@postgresql.org 1016 : 8185 : tuple = SearchSysCacheCopy1(RELOID,
1017 : : ObjectIdGetDatum(RelationGetRelid(rel)));
9880 bruce@momjian.us 1018 [ - + ]: 8185 : if (!HeapTupleIsValid(tuple))
8084 tgl@sss.pgh.pa.us 1019 [ # # ]:UBC 0 : elog(ERROR, "cache lookup failed for relation %u",
1020 : : RelationGetRelid(rel));
2724 alvherre@alvh.no-ip. 1021 [ + + ]:CBC 8185 : if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1022 : : {
1023 : 3076 : ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1024 : :
1025 : 3076 : CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1026 : :
1027 : 3076 : CommandCounterIncrement();
1028 : : }
1029 : : else
1030 : 5109 : CacheInvalidateRelcacheByTuple(tuple);
1031 : :
9396 JanWieck@Yahoo.com 1032 : 8185 : heap_freetuple(tuple);
2420 andres@anarazel.de 1033 : 8185 : table_close(pgrel, RowExclusiveLock);
1034 : :
1035 : : /*
1036 : : * If we're replacing a trigger, flush all the old dependencies before
1037 : : * recording new ones.
1038 : : */
1757 tgl@sss.pgh.pa.us 1039 [ + + ]: 8185 : if (trigger_exists)
1040 : 39 : deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1041 : :
1042 : : /*
1043 : : * Record dependencies for trigger. Always place a normal dependency on
1044 : : * the function.
1045 : : */
6779 1046 : 8185 : myself.classId = TriggerRelationId;
1047 : 8185 : myself.objectId = trigoid;
1048 : 8185 : myself.objectSubId = 0;
1049 : :
7450 1050 : 8185 : referenced.classId = ProcedureRelationId;
8457 1051 : 8185 : referenced.objectId = funcoid;
1052 : 8185 : referenced.objectSubId = 0;
1053 : 8185 : recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1054 : :
5711 1055 [ + + + - ]: 8185 : if (isInternal && OidIsValid(constraintOid))
1056 : : {
1057 : : /*
1058 : : * Internally-generated trigger for a constraint, so make it an
1059 : : * internal dependency of the constraint. We can skip depending on
1060 : : * the relation(s), as there'll be an indirect dependency via the
1061 : : * constraint.
1062 : : */
6779 1063 : 6296 : referenced.classId = ConstraintRelationId;
1064 : 6296 : referenced.objectId = constraintOid;
1065 : 6296 : referenced.objectSubId = 0;
1066 : 6296 : recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1067 : : }
1068 : : else
1069 : : {
1070 : : /*
1071 : : * User CREATE TRIGGER, so place dependencies. We make trigger be
1072 : : * auto-dropped if its relation is dropped or if the FK relation is
1073 : : * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1074 : : */
7450 1075 : 1889 : referenced.classId = RelationRelationId;
8457 1076 : 1889 : referenced.objectId = RelationGetRelid(rel);
1077 : 1889 : referenced.objectSubId = 0;
2399 1078 : 1889 : recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1079 : :
5884 1080 [ + + ]: 1889 : if (OidIsValid(constrrelid))
1081 : : {
7450 1082 : 21 : referenced.classId = RelationRelationId;
8457 1083 : 21 : referenced.objectId = constrrelid;
1084 : 21 : referenced.objectSubId = 0;
1085 : 21 : recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1086 : : }
1087 : : /* Not possible to have an index dependency in this case */
5884 1088 [ - + ]: 1889 : Assert(!OidIsValid(indexOid));
1089 : :
1090 : : /*
1091 : : * If it's a user-specified constraint trigger, make the constraint
1092 : : * internally dependent on the trigger instead of vice versa.
1093 : : */
5711 1094 [ + + ]: 1889 : if (OidIsValid(constraintOid))
1095 : : {
1096 : 79 : referenced.classId = ConstraintRelationId;
1097 : 79 : referenced.objectId = constraintOid;
1098 : 79 : referenced.objectSubId = 0;
1099 : 79 : recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1100 : : }
1101 : :
1102 : : /*
1103 : : * If it's a partition trigger, create the partition dependencies.
1104 : : */
2724 alvherre@alvh.no-ip. 1105 [ + + ]: 1889 : if (OidIsValid(parentTriggerOid))
1106 : : {
1107 : 402 : ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
2399 tgl@sss.pgh.pa.us 1108 : 402 : recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1109 : 402 : ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1110 : 402 : recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1111 : : }
1112 : : }
1113 : :
1114 : : /* If column-specific trigger, add normal dependencies on columns */
5806 1115 [ + + ]: 8185 : if (columns != NULL)
1116 : : {
1117 : : int i;
1118 : :
1119 : 50 : referenced.classId = RelationRelationId;
1120 : 50 : referenced.objectId = RelationGetRelid(rel);
1121 [ + + ]: 104 : for (i = 0; i < ncolumns; i++)
1122 : : {
1123 : 54 : referenced.objectSubId = columns[i];
1124 : 54 : recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1125 : : }
1126 : : }
1127 : :
1128 : : /*
1129 : : * If it has a WHEN clause, add dependencies on objects mentioned in the
1130 : : * expression (eg, functions, as well as any columns used).
1131 : : */
2724 alvherre@alvh.no-ip. 1132 [ + + ]: 8185 : if (whenRtable != NIL)
5769 tgl@sss.pgh.pa.us 1133 : 68 : recordDependencyOnExpr(&myself, whenClause, whenRtable,
1134 : : DEPENDENCY_NORMAL);
1135 : :
1136 : : /* Post creation hook for new trigger */
4556 rhaas@postgresql.org 1137 [ + + ]: 8185 : InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1138 : : isInternal);
1139 : :
1140 : : /*
1141 : : * Lastly, create the trigger on child relations, if needed.
1142 : : */
2724 alvherre@alvh.no-ip. 1143 [ + + ]: 8185 : if (partition_recurse)
1144 : : {
1598 1145 : 193 : PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1146 : : int i;
1147 : : MemoryContext oldcxt,
1148 : : perChildCxt;
1149 : :
2724 1150 : 193 : perChildCxt = AllocSetContextCreate(CurrentMemoryContext,
1151 : : "part trig clone",
1152 : : ALLOCSET_SMALL_SIZES);
1153 : :
1154 : : /*
1155 : : * We don't currently expect to be called with a valid indexOid. If
1156 : : * that ever changes then we'll need to write code here to find the
1157 : : * corresponding child index.
1158 : : */
1096 drowley@postgresql.o 1159 [ - + ]: 193 : Assert(!OidIsValid(indexOid));
1160 : :
2724 alvherre@alvh.no-ip. 1161 : 193 : oldcxt = MemoryContextSwitchTo(perChildCxt);
1162 : :
1163 : : /* Iterate to create the trigger on each existing partition */
1164 [ + + ]: 520 : for (i = 0; i < partdesc->nparts; i++)
1165 : : {
1166 : : CreateTrigStmt *childStmt;
1167 : : Relation childTbl;
1168 : : Node *qual;
1169 : :
2420 andres@anarazel.de 1170 : 330 : childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1171 : :
1172 : : /*
1173 : : * Initialize our fabricated parse node by copying the original
1174 : : * one, then resetting fields that we pass separately.
1175 : : */
324 peter@eisentraut.org 1176 : 330 : childStmt = copyObject(stmt);
2724 alvherre@alvh.no-ip. 1177 : 330 : childStmt->funcname = NIL;
1178 : 330 : childStmt->whenClause = NULL;
1179 : :
1180 : : /* If there is a WHEN clause, create a modified copy of it */
1181 : 330 : qual = copyObject(whenClause);
1182 : : qual = (Node *)
1183 : 330 : map_partition_varattnos((List *) qual, PRS2_OLD_VARNO,
1184 : : childTbl, rel);
1185 : : qual = (Node *)
1186 : 330 : map_partition_varattnos((List *) qual, PRS2_NEW_VARNO,
1187 : : childTbl, rel);
1188 : :
1513 1189 : 330 : CreateTriggerFiringOn(childStmt, queryString,
1190 : 330 : partdesc->oids[i], refRelOid,
1191 : : InvalidOid, InvalidOid,
1192 : : funcoid, trigoid, qual,
1193 : : isInternal, true, trigger_fires_when);
1194 : :
2420 andres@anarazel.de 1195 : 327 : table_close(childTbl, NoLock);
1196 : :
2724 alvherre@alvh.no-ip. 1197 : 327 : MemoryContextReset(perChildCxt);
1198 : : }
1199 : :
1200 : 190 : MemoryContextSwitchTo(oldcxt);
1201 : 190 : MemoryContextDelete(perChildCxt);
1202 : : }
1203 : :
1204 : : /* Keep lock on target rel until end of xact */
2420 andres@anarazel.de 1205 : 8182 : table_close(rel, NoLock);
1206 : :
3840 alvherre@alvh.no-ip. 1207 : 8182 : return myself;
1208 : : }
1209 : :
1210 : : /*
1211 : : * TriggerSetParentTrigger
1212 : : * Set a partition's trigger as child of its parent trigger,
1213 : : * or remove the linkage if parentTrigId is InvalidOid.
1214 : : *
1215 : : * This updates the constraint's pg_trigger row to show it as inherited, and
1216 : : * adds PARTITION dependencies to prevent the trigger from being deleted
1217 : : * on its own. Alternatively, reverse that.
1218 : : */
1219 : : void
1340 1220 : 252 : TriggerSetParentTrigger(Relation trigRel,
1221 : : Oid childTrigId,
1222 : : Oid parentTrigId,
1223 : : Oid childTableId)
1224 : : {
1225 : : SysScanDesc tgscan;
1226 : : ScanKeyData skey[1];
1227 : : Form_pg_trigger trigForm;
1228 : : HeapTuple tuple,
1229 : : newtup;
1230 : : ObjectAddress depender;
1231 : : ObjectAddress referenced;
1232 : :
1233 : : /*
1234 : : * Find the trigger to delete.
1235 : : */
1236 : 252 : ScanKeyInit(&skey[0],
1237 : : Anum_pg_trigger_oid,
1238 : : BTEqualStrategyNumber, F_OIDEQ,
1239 : : ObjectIdGetDatum(childTrigId));
1240 : :
1241 : 252 : tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1242 : : NULL, 1, skey);
1243 : :
1244 : 252 : tuple = systable_getnext(tgscan);
1245 [ - + ]: 252 : if (!HeapTupleIsValid(tuple))
1340 alvherre@alvh.no-ip. 1246 [ # # ]:UBC 0 : elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1340 alvherre@alvh.no-ip. 1247 :CBC 252 : newtup = heap_copytuple(tuple);
1248 : 252 : trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1249 [ + + ]: 252 : if (OidIsValid(parentTrigId))
1250 : : {
1251 : : /* don't allow setting parent for a constraint that already has one */
1252 [ - + ]: 150 : if (OidIsValid(trigForm->tgparentid))
1340 alvherre@alvh.no-ip. 1253 [ # # ]:UBC 0 : elog(ERROR, "trigger %u already has a parent trigger",
1254 : : childTrigId);
1255 : :
1340 alvherre@alvh.no-ip. 1256 :CBC 150 : trigForm->tgparentid = parentTrigId;
1257 : :
1258 : 150 : CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1259 : :
1260 : 150 : ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1261 : :
1262 : 150 : ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1263 : 150 : recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1264 : :
1265 : 150 : ObjectAddressSet(referenced, RelationRelationId, childTableId);
1266 : 150 : recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1267 : : }
1268 : : else
1269 : : {
1270 : 102 : trigForm->tgparentid = InvalidOid;
1271 : :
1272 : 102 : CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1273 : :
1274 : 102 : deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1275 : : TriggerRelationId,
1276 : : DEPENDENCY_PARTITION_PRI);
1277 : 102 : deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1278 : : RelationRelationId,
1279 : : DEPENDENCY_PARTITION_SEC);
1280 : : }
1281 : :
1282 : 252 : heap_freetuple(newtup);
1283 : 252 : systable_endscan(tgscan);
1284 : 252 : }
1285 : :
1286 : :
1287 : : /*
1288 : : * Guts of trigger deletion.
1289 : : */
1290 : : void
8457 tgl@sss.pgh.pa.us 1291 : 7104 : RemoveTriggerById(Oid trigOid)
1292 : : {
1293 : : Relation tgrel;
1294 : : SysScanDesc tgscan;
1295 : : ScanKeyData skey[1];
1296 : : HeapTuple tup;
1297 : : Oid relid;
1298 : : Relation rel;
1299 : :
2420 andres@anarazel.de 1300 : 7104 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1301 : :
1302 : : /*
1303 : : * Find the trigger to delete.
1304 : : */
7969 tgl@sss.pgh.pa.us 1305 : 7104 : ScanKeyInit(&skey[0],
1306 : : Anum_pg_trigger_oid,
1307 : : BTEqualStrategyNumber, F_OIDEQ,
1308 : : ObjectIdGetDatum(trigOid));
1309 : :
7450 1310 : 7104 : tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1311 : : NULL, 1, skey);
1312 : :
8457 1313 : 7104 : tup = systable_getnext(tgscan);
1314 [ - + ]: 7104 : if (!HeapTupleIsValid(tup))
8084 tgl@sss.pgh.pa.us 1315 [ # # ]:UBC 0 : elog(ERROR, "could not find tuple for trigger %u", trigOid);
1316 : :
1317 : : /*
1318 : : * Open and exclusive-lock the relation the trigger belongs to.
1319 : : */
8457 tgl@sss.pgh.pa.us 1320 :CBC 7104 : relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1321 : :
2420 andres@anarazel.de 1322 : 7104 : rel = table_open(relid, AccessExclusiveLock);
1323 : :
5445 tgl@sss.pgh.pa.us 1324 [ + + ]: 7104 : if (rel->rd_rel->relkind != RELKIND_RELATION &&
4185 noah@leadboat.com 1325 [ + + ]: 1324 : rel->rd_rel->relkind != RELKIND_VIEW &&
3195 rhaas@postgresql.org 1326 [ + + ]: 1256 : rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1327 [ - + ]: 1210 : rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
8084 tgl@sss.pgh.pa.us 1328 [ # # ]:UBC 0 : ereport(ERROR,
1329 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1330 : : errmsg("relation \"%s\" cannot have triggers",
1331 : : RelationGetRelationName(rel)),
1332 : : errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1333 : :
8548 tgl@sss.pgh.pa.us 1334 [ + + - + ]:CBC 7104 : if (!allowSystemTableMods && IsSystemRelation(rel))
8084 tgl@sss.pgh.pa.us 1335 [ # # ]:UBC 0 : ereport(ERROR,
1336 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1337 : : errmsg("permission denied: \"%s\" is a system catalog",
1338 : : RelationGetRelationName(rel))));
1339 : :
1340 : : /*
1341 : : * Delete the pg_trigger tuple.
1342 : : */
3139 tgl@sss.pgh.pa.us 1343 :CBC 7104 : CatalogTupleDelete(tgrel, &tup->t_self);
1344 : :
8600 1345 : 7104 : systable_endscan(tgscan);
2420 andres@anarazel.de 1346 : 7104 : table_close(tgrel, RowExclusiveLock);
1347 : :
1348 : : /*
1349 : : * We do not bother to try to determine whether any other triggers remain,
1350 : : * which would be needed in order to decide whether it's safe to clear the
1351 : : * relation's relhastriggers. (In any case, there might be a concurrent
1352 : : * process adding new triggers.) Instead, just force a relcache inval to
1353 : : * make other backends (and this one too!) rebuild their relcache entries.
1354 : : * There's no great harm in leaving relhastriggers true even if there are
1355 : : * no triggers left.
1356 : : */
6145 tgl@sss.pgh.pa.us 1357 : 7104 : CacheInvalidateRelcache(rel);
1358 : :
1359 : : /* Keep lock on trigger's rel until end of xact */
2420 andres@anarazel.de 1360 : 7104 : table_close(rel, NoLock);
10233 vadim4o@yahoo.com 1361 : 7104 : }
1362 : :
1363 : : /*
1364 : : * get_trigger_oid - Look up a trigger by name to find its OID.
1365 : : *
1366 : : * If missing_ok is false, throw an error if trigger not found. If
1367 : : * true, just return InvalidOid.
1368 : : */
1369 : : Oid
5511 rhaas@postgresql.org 1370 : 394 : get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1371 : : {
1372 : : Relation tgrel;
1373 : : ScanKeyData skey[2];
1374 : : SysScanDesc tgscan;
1375 : : HeapTuple tup;
1376 : : Oid oid;
1377 : :
1378 : : /*
1379 : : * Find the trigger, verify permissions, set up object address
1380 : : */
2420 andres@anarazel.de 1381 : 394 : tgrel = table_open(TriggerRelationId, AccessShareLock);
1382 : :
5511 rhaas@postgresql.org 1383 : 394 : ScanKeyInit(&skey[0],
1384 : : Anum_pg_trigger_tgrelid,
1385 : : BTEqualStrategyNumber, F_OIDEQ,
1386 : : ObjectIdGetDatum(relid));
1387 : 394 : ScanKeyInit(&skey[1],
1388 : : Anum_pg_trigger_tgname,
1389 : : BTEqualStrategyNumber, F_NAMEEQ,
1390 : : CStringGetDatum(trigname));
1391 : :
1392 : 394 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1393 : : NULL, 2, skey);
1394 : :
1395 : 394 : tup = systable_getnext(tgscan);
1396 : :
1397 [ + + ]: 394 : if (!HeapTupleIsValid(tup))
1398 : : {
1399 [ + + ]: 15 : if (!missing_ok)
1400 [ + - ]: 12 : ereport(ERROR,
1401 : : (errcode(ERRCODE_UNDEFINED_OBJECT),
1402 : : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1403 : : trigname, get_rel_name(relid))));
1404 : 3 : oid = InvalidOid;
1405 : : }
1406 : : else
1407 : : {
2482 andres@anarazel.de 1408 : 379 : oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1409 : : }
1410 : :
5511 rhaas@postgresql.org 1411 : 382 : systable_endscan(tgscan);
2420 andres@anarazel.de 1412 : 382 : table_close(tgrel, AccessShareLock);
5511 rhaas@postgresql.org 1413 : 382 : return oid;
1414 : : }
1415 : :
1416 : : /*
1417 : : * Perform permissions and integrity checks before acquiring a relation lock.
1418 : : */
1419 : : static void
5014 1420 : 20 : RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
1421 : : void *arg)
1422 : : {
1423 : : HeapTuple tuple;
1424 : : Form_pg_class form;
1425 : :
1426 : 20 : tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1427 [ - + ]: 20 : if (!HeapTupleIsValid(tuple))
4836 bruce@momjian.us 1428 :UBC 0 : return; /* concurrently dropped */
5014 rhaas@postgresql.org 1429 :CBC 20 : form = (Form_pg_class) GETSTRUCT(tuple);
1430 : :
1431 : : /* only tables and views can have triggers */
4185 noah@leadboat.com 1432 [ + + + - ]: 20 : if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
3195 rhaas@postgresql.org 1433 [ + - ]: 12 : form->relkind != RELKIND_FOREIGN_TABLE &&
1434 [ - + ]: 12 : form->relkind != RELKIND_PARTITIONED_TABLE)
4836 bruce@momjian.us 1435 [ # # ]:UBC 0 : ereport(ERROR,
1436 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1437 : : errmsg("relation \"%s\" cannot have triggers",
1438 : : rv->relname),
1439 : : errdetail_relkind_not_supported(form->relkind)));
1440 : :
1441 : : /* you must own the table to rename one of its triggers */
1028 peter@eisentraut.org 1442 [ - + ]:CBC 20 : if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
2835 peter_e@gmx.net 1443 :UBC 0 : aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname);
4300 rhaas@postgresql.org 1444 [ + + + + ]:CBC 20 : if (!allowSystemTableMods && IsSystemClass(relid, form))
4836 bruce@momjian.us 1445 [ + - ]: 1 : ereport(ERROR,
1446 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1447 : : errmsg("permission denied: \"%s\" is a system catalog",
1448 : : rv->relname)));
1449 : :
5014 rhaas@postgresql.org 1450 : 19 : ReleaseSysCache(tuple);
1451 : : }
1452 : :
1453 : : /*
1454 : : * renametrig - changes the name of a trigger on a relation
1455 : : *
1456 : : * trigger name is changed in trigger catalog.
1457 : : * No record of the previous name is kept.
1458 : : *
1459 : : * get proper relrelation from relation catalog (if not arg)
1460 : : * scan trigger catalog
1461 : : * for name conflict (within rel)
1462 : : * for original trigger (if not arg)
1463 : : * modify tgname in trigger tuple
1464 : : * update row in catalog
1465 : : */
1466 : : ObjectAddress
1467 : 20 : renametrig(RenameStmt *stmt)
1468 : : {
1469 : : Oid tgoid;
1470 : : Relation targetrel;
1471 : : Relation tgrel;
1472 : : HeapTuple tuple;
1473 : : SysScanDesc tgscan;
1474 : : ScanKeyData key[2];
1475 : : Oid relid;
1476 : : ObjectAddress address;
1477 : :
1478 : : /*
1479 : : * Look up name, check permissions, and acquire lock (which we will NOT
1480 : : * release until end of transaction).
1481 : : */
1482 : 20 : relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
1483 : : 0,
1484 : : RangeVarCallbackForRenameTrigger,
1485 : : NULL);
1486 : :
1487 : : /* Have lock already, so just need to build relcache entry. */
1488 : 19 : targetrel = relation_open(relid, NoLock);
1489 : :
1490 : : /*
1491 : : * On partitioned tables, this operation recurses to partitions. Lock all
1492 : : * tables upfront.
1493 : : */
1507 alvherre@alvh.no-ip. 1494 [ + + ]: 19 : if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1495 : 12 : (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1496 : :
1497 : 19 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1498 : :
1499 : : /*
1500 : : * Search for the trigger to modify.
1501 : : */
7969 tgl@sss.pgh.pa.us 1502 : 19 : ScanKeyInit(&key[0],
1503 : : Anum_pg_trigger_tgrelid,
1504 : : BTEqualStrategyNumber, F_OIDEQ,
1505 : : ObjectIdGetDatum(relid));
1506 : 19 : ScanKeyInit(&key[1],
1507 : : Anum_pg_trigger_tgname,
1508 : : BTEqualStrategyNumber, F_NAMEEQ,
5014 rhaas@postgresql.org 1509 : 19 : PointerGetDatum(stmt->subname));
7450 tgl@sss.pgh.pa.us 1510 : 19 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1511 : : NULL, 2, key);
8530 1512 [ + - ]: 19 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1513 : : {
1514 : : Form_pg_trigger trigform;
1515 : :
2420 andres@anarazel.de 1516 : 19 : trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1517 : 19 : tgoid = trigform->oid;
1518 : :
1519 : : /*
1520 : : * If the trigger descends from a trigger on a parent partitioned
1521 : : * table, reject the rename. We don't allow a trigger in a partition
1522 : : * to differ in name from that of its parent: that would lead to an
1523 : : * inconsistency that pg_dump would not reproduce.
1524 : : */
1507 alvherre@alvh.no-ip. 1525 [ + + ]: 19 : if (OidIsValid(trigform->tgparentid))
1526 [ + - ]: 3 : ereport(ERROR,
1527 : : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1528 : : errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1529 : : stmt->subname, RelationGetRelationName(targetrel)),
1530 : : errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1531 : : get_rel_name(get_partition_parent(relid, false))));
1532 : :
1533 : :
1534 : : /* Rename the trigger on this relation ... */
1535 : 16 : renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1536 : 16 : stmt->subname);
1537 : :
1538 : : /* ... and if it is partitioned, recurse to its partitions */
1539 [ + + ]: 16 : if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1540 : : {
1541 : 9 : PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1542 : :
1543 [ + + ]: 15 : for (int i = 0; i < partdesc->nparts; i++)
1544 : : {
1545 : 9 : Oid partitionId = partdesc->oids[i];
1546 : :
1547 : 9 : renametrig_partition(tgrel, partitionId, trigform->oid,
1548 : 9 : stmt->newname, stmt->subname);
1549 : : }
1550 : : }
1551 : : }
1552 : : else
1553 : : {
8084 tgl@sss.pgh.pa.us 1554 [ # # ]:UBC 0 : ereport(ERROR,
1555 : : (errcode(ERRCODE_UNDEFINED_OBJECT),
1556 : : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1557 : : stmt->subname, RelationGetRelationName(targetrel))));
1558 : : }
1559 : :
3840 alvherre@alvh.no-ip. 1560 :CBC 13 : ObjectAddressSet(address, TriggerRelationId, tgoid);
1561 : :
8530 tgl@sss.pgh.pa.us 1562 : 13 : systable_endscan(tgscan);
1563 : :
2420 andres@anarazel.de 1564 : 13 : table_close(tgrel, RowExclusiveLock);
1565 : :
1566 : : /*
1567 : : * Close rel, but keep exclusive lock!
1568 : : */
5014 rhaas@postgresql.org 1569 : 13 : relation_close(targetrel, NoLock);
1570 : :
3840 alvherre@alvh.no-ip. 1571 : 13 : return address;
1572 : : }
1573 : :
1574 : : /*
1575 : : * Subroutine for renametrig -- perform the actual work of renaming one
1576 : : * trigger on one table.
1577 : : *
1578 : : * If the trigger has a name different from the expected one, raise a
1579 : : * NOTICE about it.
1580 : : */
1581 : : static void
1507 1582 : 28 : renametrig_internal(Relation tgrel, Relation targetrel, HeapTuple trigtup,
1583 : : const char *newname, const char *expected_name)
1584 : : {
1585 : : HeapTuple tuple;
1586 : : Form_pg_trigger tgform;
1587 : : ScanKeyData key[2];
1588 : : SysScanDesc tgscan;
1589 : :
1590 : : /* If the trigger already has the new name, nothing to do. */
1591 : 28 : tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1592 [ - + ]: 28 : if (strcmp(NameStr(tgform->tgname), newname) == 0)
1507 alvherre@alvh.no-ip. 1593 :UBC 0 : return;
1594 : :
1595 : : /*
1596 : : * Before actually trying the rename, search for triggers with the same
1597 : : * name. The update would fail with an ugly message in that case, and it
1598 : : * is better to throw a nicer error.
1599 : : */
1507 alvherre@alvh.no-ip. 1600 :CBC 28 : ScanKeyInit(&key[0],
1601 : : Anum_pg_trigger_tgrelid,
1602 : : BTEqualStrategyNumber, F_OIDEQ,
1603 : : ObjectIdGetDatum(RelationGetRelid(targetrel)));
1604 : 28 : ScanKeyInit(&key[1],
1605 : : Anum_pg_trigger_tgname,
1606 : : BTEqualStrategyNumber, F_NAMEEQ,
1607 : : PointerGetDatum(newname));
1608 : 28 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1609 : : NULL, 2, key);
1610 [ + + ]: 28 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1611 [ + - ]: 3 : ereport(ERROR,
1612 : : (errcode(ERRCODE_DUPLICATE_OBJECT),
1613 : : errmsg("trigger \"%s\" for relation \"%s\" already exists",
1614 : : newname, RelationGetRelationName(targetrel))));
1615 : 25 : systable_endscan(tgscan);
1616 : :
1617 : : /*
1618 : : * The target name is free; update the existing pg_trigger tuple with it.
1619 : : */
1620 : 25 : tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1621 : 25 : tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1622 : :
1623 : : /*
1624 : : * If the trigger has a name different from what we expected, let the user
1625 : : * know. (We can proceed anyway, since we must have reached here following
1626 : : * a tgparentid link.)
1627 : : */
1628 [ - + ]: 25 : if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1507 alvherre@alvh.no-ip. 1629 [ # # ]:UBC 0 : ereport(NOTICE,
1630 : : errmsg("renamed trigger \"%s\" on relation \"%s\"",
1631 : : NameStr(tgform->tgname),
1632 : : RelationGetRelationName(targetrel)));
1633 : :
1507 alvherre@alvh.no-ip. 1634 :CBC 25 : namestrcpy(&tgform->tgname, newname);
1635 : :
1636 : 25 : CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1637 : :
1638 [ - + ]: 25 : InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1639 : :
1640 : : /*
1641 : : * Invalidate relation's relcache entry so that other backends (and this
1642 : : * one too!) are sent SI message to make them rebuild relcache entries.
1643 : : * (Ideally this should happen automatically...)
1644 : : */
1645 : 25 : CacheInvalidateRelcache(targetrel);
1646 : : }
1647 : :
1648 : : /*
1649 : : * Subroutine for renametrig -- Helper for recursing to partitions when
1650 : : * renaming triggers on a partitioned table.
1651 : : */
1652 : : static void
1653 : 15 : renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1654 : : const char *newname, const char *expected_name)
1655 : : {
1656 : : SysScanDesc tgscan;
1657 : : ScanKeyData key;
1658 : : HeapTuple tuple;
1659 : :
1660 : : /*
1661 : : * Given a relation and the OID of a trigger on parent relation, find the
1662 : : * corresponding trigger in the child and rename that trigger to the given
1663 : : * name.
1664 : : */
1665 : 15 : ScanKeyInit(&key,
1666 : : Anum_pg_trigger_tgrelid,
1667 : : BTEqualStrategyNumber, F_OIDEQ,
1668 : : ObjectIdGetDatum(partitionId));
1669 : 15 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1670 : : NULL, 1, &key);
1671 [ + + ]: 24 : while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1672 : : {
1673 : 21 : Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1674 : : Relation partitionRel;
1675 : :
1676 [ + + ]: 21 : if (tgform->tgparentid != parentTriggerOid)
1677 : 9 : continue; /* not our trigger */
1678 : :
1679 : 12 : partitionRel = table_open(partitionId, NoLock);
1680 : :
1681 : : /* Rename the trigger on this partition */
1682 : 12 : renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1683 : :
1684 : : /* And if this relation is partitioned, recurse to its partitions */
1685 [ + + ]: 9 : if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1686 : : {
1687 : 3 : PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1688 : : true);
1689 : :
1690 [ + + ]: 9 : for (int i = 0; i < partdesc->nparts; i++)
1691 : : {
1067 drowley@postgresql.o 1692 : 6 : Oid partoid = partdesc->oids[i];
1693 : :
1694 : 6 : renametrig_partition(tgrel, partoid, tgform->oid, newname,
1507 alvherre@alvh.no-ip. 1695 : 6 : NameStr(tgform->tgname));
1696 : : }
1697 : : }
1698 : 9 : table_close(partitionRel, NoLock);
1699 : :
1700 : : /* There should be at most one matching tuple */
1503 1701 : 9 : break;
1702 : : }
1507 1703 : 12 : systable_endscan(tgscan);
1704 : 12 : }
1705 : :
1706 : : /*
1707 : : * EnableDisableTrigger()
1708 : : *
1709 : : * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1710 : : * to change 'tgenabled' field for the specified trigger(s)
1711 : : *
1712 : : * rel: relation to process (caller must hold suitable lock on it)
1713 : : * tgname: name of trigger to process, or NULL to scan all triggers
1714 : : * tgparent: if not zero, process only triggers with this tgparentid
1715 : : * fires_when: new value for tgenabled field. In addition to generic
1716 : : * enablement/disablement, this also defines when the trigger
1717 : : * should be fired in session replication roles.
1718 : : * skip_system: if true, skip "system" triggers (constraint triggers)
1719 : : * recurse: if true, recurse to partitions
1720 : : *
1721 : : * Caller should have checked permissions for the table; here we also
1722 : : * enforce that superuser privilege is required to alter the state of
1723 : : * system triggers
1724 : : */
1725 : : void
917 tgl@sss.pgh.pa.us 1726 : 226 : EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1727 : : char fires_when, bool skip_system, bool recurse,
1728 : : LOCKMODE lockmode)
1729 : : {
1730 : : Relation tgrel;
1731 : : int nkeys;
1732 : : ScanKeyData keys[2];
1733 : : SysScanDesc tgscan;
1734 : : HeapTuple tuple;
1735 : : bool found;
1736 : : bool changed;
1737 : :
1738 : : /* Scan the relevant entries in pg_triggers */
2420 andres@anarazel.de 1739 : 226 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1740 : :
7319 tgl@sss.pgh.pa.us 1741 : 226 : ScanKeyInit(&keys[0],
1742 : : Anum_pg_trigger_tgrelid,
1743 : : BTEqualStrategyNumber, F_OIDEQ,
1744 : : ObjectIdGetDatum(RelationGetRelid(rel)));
1745 [ + + ]: 226 : if (tgname)
1746 : : {
1747 : 159 : ScanKeyInit(&keys[1],
1748 : : Anum_pg_trigger_tgname,
1749 : : BTEqualStrategyNumber, F_NAMEEQ,
1750 : : CStringGetDatum(tgname));
1751 : 159 : nkeys = 2;
1752 : : }
1753 : : else
1754 : 67 : nkeys = 1;
1755 : :
1756 : 226 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1757 : : NULL, nkeys, keys);
1758 : :
1759 : 226 : found = changed = false;
1760 : :
1761 [ + + ]: 590 : while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1762 : : {
1763 : 364 : Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1764 : :
917 1765 [ + + + + ]: 364 : if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1766 : 96 : continue;
1767 : :
5711 1768 [ + + ]: 268 : if (oldtrig->tgisinternal)
1769 : : {
1770 : : /* system trigger ... ok to process? */
7319 1771 [ + + ]: 36 : if (skip_system)
1772 : 6 : continue;
1773 [ - + ]: 30 : if (!superuser())
7319 tgl@sss.pgh.pa.us 1774 [ # # ]:UBC 0 : ereport(ERROR,
1775 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1776 : : errmsg("permission denied: \"%s\" is a system trigger",
1777 : : NameStr(oldtrig->tgname))));
1778 : : }
1779 : :
7319 tgl@sss.pgh.pa.us 1780 :CBC 262 : found = true;
1781 : :
6746 JanWieck@Yahoo.com 1782 [ + + ]: 262 : if (oldtrig->tgenabled != fires_when)
1783 : : {
1784 : : /* need to change this one ... make a copy to scribble on */
7266 bruce@momjian.us 1785 : 247 : HeapTuple newtup = heap_copytuple(tuple);
7319 tgl@sss.pgh.pa.us 1786 : 247 : Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1787 : :
6746 JanWieck@Yahoo.com 1788 : 247 : newtrig->tgenabled = fires_when;
1789 : :
3140 alvherre@alvh.no-ip. 1790 : 247 : CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1791 : :
7319 tgl@sss.pgh.pa.us 1792 : 247 : heap_freetuple(newtup);
1793 : :
1794 : 247 : changed = true;
1795 : : }
1796 : :
1797 : : /*
1798 : : * When altering FOR EACH ROW triggers on a partitioned table, do the
1799 : : * same on the partitions as well, unless ONLY is specified.
1800 : : *
1801 : : * Note that we recurse even if we didn't change the trigger above,
1802 : : * because the partitions' copy of the trigger may have a different
1803 : : * value of tgenabled than the parent's trigger and thus might need to
1804 : : * be changed.
1805 : : */
1129 alvherre@alvh.no-ip. 1806 [ + + ]: 262 : if (recurse &&
1807 [ + + ]: 248 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1808 [ + + ]: 43 : (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1809 : : {
1810 : 37 : PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1811 : : int i;
1812 : :
1813 [ + + ]: 92 : for (i = 0; i < partdesc->nparts; i++)
1814 : : {
1815 : : Relation part;
1816 : :
1817 : 55 : part = relation_open(partdesc->oids[i], lockmode);
1818 : : /* Match on child triggers' tgparentid, not their name */
917 tgl@sss.pgh.pa.us 1819 : 55 : EnableDisableTrigger(part, NULL, oldtrig->oid,
1820 : : fires_when, skip_system, recurse,
1821 : : lockmode);
1129 alvherre@alvh.no-ip. 1822 : 55 : table_close(part, NoLock); /* keep lock till commit */
1823 : : }
1824 : : }
1825 : :
4556 rhaas@postgresql.org 1826 [ + + ]: 262 : InvokeObjectPostAlterHook(TriggerRelationId,
1827 : : oldtrig->oid, 0);
1828 : : }
1829 : :
7319 tgl@sss.pgh.pa.us 1830 : 226 : systable_endscan(tgscan);
1831 : :
2420 andres@anarazel.de 1832 : 226 : table_close(tgrel, RowExclusiveLock);
1833 : :
7319 tgl@sss.pgh.pa.us 1834 [ + + - + ]: 226 : if (tgname && !found)
7319 tgl@sss.pgh.pa.us 1835 [ # # ]:UBC 0 : ereport(ERROR,
1836 : : (errcode(ERRCODE_UNDEFINED_OBJECT),
1837 : : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1838 : : tgname, RelationGetRelationName(rel))));
1839 : :
1840 : : /*
1841 : : * If we changed anything, broadcast a SI inval message to force each
1842 : : * backend (including our own!) to rebuild relation's relcache entry.
1843 : : * Otherwise they will fail to apply the change promptly.
1844 : : */
7319 tgl@sss.pgh.pa.us 1845 [ + + ]:CBC 226 : if (changed)
1846 : 223 : CacheInvalidateRelcache(rel);
1847 : 226 : }
1848 : :
1849 : :
1850 : : /*
1851 : : * Build trigger data to attach to the given relcache entry.
1852 : : *
1853 : : * Note that trigger data attached to a relcache entry must be stored in
1854 : : * CacheMemoryContext to ensure it survives as long as the relcache entry.
1855 : : * But we should be running in a less long-lived working context. To avoid
1856 : : * leaking cache memory if this routine fails partway through, we build a
1857 : : * temporary TriggerDesc in working memory and then copy the completed
1858 : : * structure into cache memory.
1859 : : */
1860 : : void
10226 bruce@momjian.us 1861 : 31130 : RelationBuildTriggers(Relation relation)
1862 : : {
1863 : : TriggerDesc *trigdesc;
1864 : : int numtrigs;
1865 : : int maxtrigs;
1866 : : Trigger *triggers;
1867 : : Relation tgrel;
1868 : : ScanKeyData skey;
1869 : : SysScanDesc tgscan;
1870 : : HeapTuple htup;
1871 : : MemoryContext oldContext;
1872 : : int i;
1873 : :
1874 : : /*
1875 : : * Allocate a working array to hold the triggers (the array is extended if
1876 : : * necessary)
1877 : : */
6145 tgl@sss.pgh.pa.us 1878 : 31130 : maxtrigs = 16;
1879 : 31130 : triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1880 : 31130 : numtrigs = 0;
1881 : :
1882 : : /*
1883 : : * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1884 : : * be reading the triggers in name order, except possibly during
1885 : : * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1886 : : * ensures that triggers will be fired in name order.
1887 : : */
7969 1888 : 31130 : ScanKeyInit(&skey,
1889 : : Anum_pg_trigger_tgrelid,
1890 : : BTEqualStrategyNumber, F_OIDEQ,
1891 : : ObjectIdGetDatum(RelationGetRelid(relation)));
1892 : :
2420 andres@anarazel.de 1893 : 31130 : tgrel = table_open(TriggerRelationId, AccessShareLock);
7450 tgl@sss.pgh.pa.us 1894 : 31130 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1895 : : NULL, 1, &skey);
1896 : :
8600 1897 [ + + ]: 88107 : while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1898 : : {
1899 : 56977 : Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1900 : : Trigger *build;
1901 : : Datum datum;
1902 : : bool isnull;
1903 : :
6145 1904 [ + + ]: 56977 : if (numtrigs >= maxtrigs)
1905 : : {
1906 : 24 : maxtrigs *= 2;
1907 : 24 : triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1908 : : }
1909 : 56977 : build = &(triggers[numtrigs]);
1910 : :
2482 andres@anarazel.de 1911 : 56977 : build->tgoid = pg_trigger->oid;
8363 tgl@sss.pgh.pa.us 1912 : 56977 : build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
1913 : : NameGetDatum(&pg_trigger->tgname)));
10226 bruce@momjian.us 1914 : 56977 : build->tgfoid = pg_trigger->tgfoid;
1915 : 56977 : build->tgtype = pg_trigger->tgtype;
9474 JanWieck@Yahoo.com 1916 : 56977 : build->tgenabled = pg_trigger->tgenabled;
5711 tgl@sss.pgh.pa.us 1917 : 56977 : build->tgisinternal = pg_trigger->tgisinternal;
1998 alvherre@alvh.no-ip. 1918 : 56977 : build->tgisclone = OidIsValid(pg_trigger->tgparentid);
8559 tgl@sss.pgh.pa.us 1919 : 56977 : build->tgconstrrelid = pg_trigger->tgconstrrelid;
5884 1920 : 56977 : build->tgconstrindid = pg_trigger->tgconstrindid;
6779 1921 : 56977 : build->tgconstraint = pg_trigger->tgconstraint;
9474 JanWieck@Yahoo.com 1922 : 56977 : build->tgdeferrable = pg_trigger->tgdeferrable;
1923 : 56977 : build->tginitdeferred = pg_trigger->tginitdeferred;
10226 bruce@momjian.us 1924 : 56977 : build->tgnargs = pg_trigger->tgnargs;
1925 : : /* tgattr is first var-width field, so OK to access directly */
7466 tgl@sss.pgh.pa.us 1926 : 56977 : build->tgnattr = pg_trigger->tgattr.dim1;
1927 [ + + ]: 56977 : if (build->tgnattr > 0)
1928 : : {
4821 peter_e@gmx.net 1929 : 269 : build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
7466 tgl@sss.pgh.pa.us 1930 : 269 : memcpy(build->tgattr, &(pg_trigger->tgattr.values),
4821 peter_e@gmx.net 1931 : 269 : build->tgnattr * sizeof(int16));
1932 : : }
1933 : : else
7466 tgl@sss.pgh.pa.us 1934 : 56708 : build->tgattr = NULL;
10226 bruce@momjian.us 1935 [ + + ]: 56977 : if (build->tgnargs > 0)
1936 : : {
1937 : : bytea *val;
1938 : : char *p;
1939 : :
3100 noah@leadboat.com 1940 : 1430 : val = DatumGetByteaPP(fastgetattr(htup,
1941 : : Anum_pg_trigger_tgargs,
1942 : : tgrel->rd_att, &isnull));
10226 bruce@momjian.us 1943 [ - + ]: 1430 : if (isnull)
8084 tgl@sss.pgh.pa.us 1944 [ # # ]:UBC 0 : elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1945 : : RelationGetRelationName(relation));
3100 noah@leadboat.com 1946 [ + - ]:CBC 1430 : p = (char *) VARDATA_ANY(val);
8363 tgl@sss.pgh.pa.us 1947 : 1430 : build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
10226 bruce@momjian.us 1948 [ + + ]: 3096 : for (i = 0; i < build->tgnargs; i++)
1949 : : {
8363 tgl@sss.pgh.pa.us 1950 : 1666 : build->tgargs[i] = pstrdup(p);
10226 bruce@momjian.us 1951 : 1666 : p += strlen(p) + 1;
1952 : : }
1953 : : }
1954 : : else
1955 : 55547 : build->tgargs = NULL;
1956 : :
3228 kgrittn@postgresql.o 1957 : 56977 : datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1958 : : tgrel->rd_att, &isnull);
1959 [ + + ]: 56977 : if (!isnull)
1960 : 513 : build->tgoldtable =
1961 : 513 : DatumGetCString(DirectFunctionCall1(nameout, datum));
1962 : : else
1963 : 56464 : build->tgoldtable = NULL;
1964 : :
1965 : 56977 : datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1966 : : tgrel->rd_att, &isnull);
1967 [ + + ]: 56977 : if (!isnull)
1968 : 673 : build->tgnewtable =
1969 : 673 : DatumGetCString(DirectFunctionCall1(nameout, datum));
1970 : : else
1971 : 56304 : build->tgnewtable = NULL;
1972 : :
5769 tgl@sss.pgh.pa.us 1973 : 56977 : datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1974 : : tgrel->rd_att, &isnull);
1975 [ + + ]: 56977 : if (!isnull)
1976 : 466 : build->tgqual = TextDatumGetCString(datum);
1977 : : else
1978 : 56511 : build->tgqual = NULL;
1979 : :
6145 1980 : 56977 : numtrigs++;
1981 : : }
1982 : :
8600 1983 : 31130 : systable_endscan(tgscan);
2420 andres@anarazel.de 1984 : 31130 : table_close(tgrel, AccessShareLock);
1985 : :
1986 : : /* There might not be any triggers */
6145 tgl@sss.pgh.pa.us 1987 [ + + ]: 31130 : if (numtrigs == 0)
1988 : : {
1989 : 7020 : pfree(triggers);
1990 : 7020 : return;
1991 : : }
1992 : :
1993 : : /* Build trigdesc */
8333 bruce@momjian.us 1994 : 24110 : trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
10226 1995 : 24110 : trigdesc->triggers = triggers;
6145 tgl@sss.pgh.pa.us 1996 : 24110 : trigdesc->numtriggers = numtrigs;
1997 [ + + ]: 81087 : for (i = 0; i < numtrigs; i++)
5445 1998 : 56977 : SetTriggerFlags(trigdesc, &(triggers[i]));
1999 : :
2000 : : /* Copy completed trigdesc into cache storage */
8363 2001 : 24110 : oldContext = MemoryContextSwitchTo(CacheMemoryContext);
2002 : 24110 : relation->trigdesc = CopyTriggerDesc(trigdesc);
2003 : 24110 : MemoryContextSwitchTo(oldContext);
2004 : :
2005 : : /* Release working memory */
2006 : 24110 : FreeTriggerDesc(trigdesc);
2007 : : }
2008 : :
2009 : : /*
2010 : : * Update the TriggerDesc's hint flags to include the specified trigger
2011 : : */
2012 : : static void
5445 2013 : 56977 : SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger)
2014 : : {
2015 : 56977 : int16 tgtype = trigger->tgtype;
2016 : :
2017 : 56977 : trigdesc->trig_insert_before_row |=
2018 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2019 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2020 : 56977 : trigdesc->trig_insert_after_row |=
2021 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2022 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2023 : 56977 : trigdesc->trig_insert_instead_row |=
2024 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2025 : : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2026 : 56977 : trigdesc->trig_insert_before_statement |=
2027 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2028 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2029 : 56977 : trigdesc->trig_insert_after_statement |=
2030 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2031 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2032 : 56977 : trigdesc->trig_update_before_row |=
2033 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2035 : 56977 : trigdesc->trig_update_after_row |=
2036 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2037 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2038 : 56977 : trigdesc->trig_update_instead_row |=
2039 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2040 : : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2041 : 56977 : trigdesc->trig_update_before_statement |=
2042 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2043 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2044 : 56977 : trigdesc->trig_update_after_statement |=
2045 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2046 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2047 : 56977 : trigdesc->trig_delete_before_row |=
2048 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2050 : 56977 : trigdesc->trig_delete_after_row |=
2051 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2052 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2053 : 56977 : trigdesc->trig_delete_instead_row |=
2054 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2055 : : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2056 : 56977 : trigdesc->trig_delete_before_statement |=
2057 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2058 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2059 : 56977 : trigdesc->trig_delete_after_statement |=
2060 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2061 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2062 : : /* there are no row-level truncate triggers */
2063 : 56977 : trigdesc->trig_truncate_before_statement |=
2064 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2065 : : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2066 : 56977 : trigdesc->trig_truncate_after_statement |=
2067 : 56977 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2068 : : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2069 : :
3228 kgrittn@postgresql.o 2070 : 113954 : trigdesc->trig_insert_new_table |=
2071 [ + + ]: 75584 : (TRIGGER_FOR_INSERT(tgtype) &&
2072 [ + + ]: 18607 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2073 : 113954 : trigdesc->trig_update_old_table |=
2074 [ + + ]: 83000 : (TRIGGER_FOR_UPDATE(tgtype) &&
2075 [ + + ]: 26023 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2076 : 113954 : trigdesc->trig_update_new_table |=
2077 [ + + ]: 83000 : (TRIGGER_FOR_UPDATE(tgtype) &&
2078 [ + + ]: 26023 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2079 : 113954 : trigdesc->trig_delete_old_table |=
2080 [ + + ]: 72847 : (TRIGGER_FOR_DELETE(tgtype) &&
2081 [ + + ]: 15870 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
10232 vadim4o@yahoo.com 2082 : 56977 : }
2083 : :
2084 : : /*
2085 : : * Copy a TriggerDesc data structure.
2086 : : *
2087 : : * The copy is allocated in the current memory context.
2088 : : */
2089 : : TriggerDesc *
8363 tgl@sss.pgh.pa.us 2090 : 237357 : CopyTriggerDesc(TriggerDesc *trigdesc)
2091 : : {
2092 : : TriggerDesc *newdesc;
2093 : : Trigger *trigger;
2094 : : int i;
2095 : :
2096 [ + + - + ]: 237357 : if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2097 : 204644 : return NULL;
2098 : :
2099 : 32713 : newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2100 : 32713 : memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2101 : :
2102 : 32713 : trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2103 : 32713 : memcpy(trigger, trigdesc->triggers,
2104 : 32713 : trigdesc->numtriggers * sizeof(Trigger));
2105 : 32713 : newdesc->triggers = trigger;
2106 : :
2107 [ + + ]: 113727 : for (i = 0; i < trigdesc->numtriggers; i++)
2108 : : {
2109 : 81014 : trigger->tgname = pstrdup(trigger->tgname);
7466 2110 [ + + ]: 81014 : if (trigger->tgnattr > 0)
2111 : : {
2112 : : int16 *newattr;
2113 : :
4821 peter_e@gmx.net 2114 : 512 : newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
7466 tgl@sss.pgh.pa.us 2115 : 512 : memcpy(newattr, trigger->tgattr,
4821 peter_e@gmx.net 2116 : 512 : trigger->tgnattr * sizeof(int16));
7466 tgl@sss.pgh.pa.us 2117 : 512 : trigger->tgattr = newattr;
2118 : : }
8363 2119 [ + + ]: 81014 : if (trigger->tgnargs > 0)
2120 : : {
2121 : : char **newargs;
2122 : : int16 j;
2123 : :
2124 : 4511 : newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2125 [ + + ]: 9605 : for (j = 0; j < trigger->tgnargs; j++)
2126 : 5094 : newargs[j] = pstrdup(trigger->tgargs[j]);
2127 : 4511 : trigger->tgargs = newargs;
2128 : : }
5769 2129 [ + + ]: 81014 : if (trigger->tgqual)
2130 : 751 : trigger->tgqual = pstrdup(trigger->tgqual);
3228 kgrittn@postgresql.o 2131 [ + + ]: 81014 : if (trigger->tgoldtable)
2132 : 1127 : trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2133 [ + + ]: 81014 : if (trigger->tgnewtable)
2134 : 1306 : trigger->tgnewtable = pstrdup(trigger->tgnewtable);
8363 tgl@sss.pgh.pa.us 2135 : 81014 : trigger++;
2136 : : }
2137 : :
2138 : 32713 : return newdesc;
2139 : : }
2140 : :
2141 : : /*
2142 : : * Free a TriggerDesc data structure.
2143 : : */
2144 : : void
9350 2145 : 596522 : FreeTriggerDesc(TriggerDesc *trigdesc)
2146 : : {
2147 : : Trigger *trigger;
2148 : : int i;
2149 : :
2150 [ + + ]: 596522 : if (trigdesc == NULL)
2151 : 550360 : return;
2152 : :
2153 : 46162 : trigger = trigdesc->triggers;
2154 [ + + ]: 153975 : for (i = 0; i < trigdesc->numtriggers; i++)
2155 : : {
2156 : 107813 : pfree(trigger->tgname);
7466 2157 [ + + ]: 107813 : if (trigger->tgnattr > 0)
2158 : 505 : pfree(trigger->tgattr);
9350 2159 [ + + ]: 107813 : if (trigger->tgnargs > 0)
2160 : : {
2161 [ + + ]: 5788 : while (--(trigger->tgnargs) >= 0)
2162 : 3115 : pfree(trigger->tgargs[trigger->tgnargs]);
2163 : 2673 : pfree(trigger->tgargs);
2164 : : }
5769 2165 [ + + ]: 107813 : if (trigger->tgqual)
2166 : 859 : pfree(trigger->tgqual);
3228 kgrittn@postgresql.o 2167 [ + + ]: 107813 : if (trigger->tgoldtable)
2168 : 982 : pfree(trigger->tgoldtable);
2169 [ + + ]: 107813 : if (trigger->tgnewtable)
2170 : 1292 : pfree(trigger->tgnewtable);
9350 tgl@sss.pgh.pa.us 2171 : 107813 : trigger++;
2172 : : }
2173 : 46162 : pfree(trigdesc->triggers);
2174 : 46162 : pfree(trigdesc);
2175 : : }
2176 : :
2177 : : /*
2178 : : * Compare two TriggerDesc structures for logical equality.
2179 : : */
2180 : : #ifdef NOT_USED
2181 : : bool
2182 : : equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2183 : : {
2184 : : int i,
2185 : : j;
2186 : :
2187 : : /*
2188 : : * We need not examine the hint flags, just the trigger array itself; if
2189 : : * we have the same triggers with the same types, the flags should match.
2190 : : *
2191 : : * As of 7.3 we assume trigger set ordering is significant in the
2192 : : * comparison; so we just compare corresponding slots of the two sets.
2193 : : *
2194 : : * Note: comparing the stringToNode forms of the WHEN clauses means that
2195 : : * parse column locations will affect the result. This is okay as long as
2196 : : * this function is only used for detecting exact equality, as for example
2197 : : * in checking for staleness of a cache entry.
2198 : : */
2199 : : if (trigdesc1 != NULL)
2200 : : {
2201 : : if (trigdesc2 == NULL)
2202 : : return false;
2203 : : if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2204 : : return false;
2205 : : for (i = 0; i < trigdesc1->numtriggers; i++)
2206 : : {
2207 : : Trigger *trig1 = trigdesc1->triggers + i;
2208 : : Trigger *trig2 = trigdesc2->triggers + i;
2209 : :
2210 : : if (trig1->tgoid != trig2->tgoid)
2211 : : return false;
2212 : : if (strcmp(trig1->tgname, trig2->tgname) != 0)
2213 : : return false;
2214 : : if (trig1->tgfoid != trig2->tgfoid)
2215 : : return false;
2216 : : if (trig1->tgtype != trig2->tgtype)
2217 : : return false;
2218 : : if (trig1->tgenabled != trig2->tgenabled)
2219 : : return false;
2220 : : if (trig1->tgisinternal != trig2->tgisinternal)
2221 : : return false;
2222 : : if (trig1->tgisclone != trig2->tgisclone)
2223 : : return false;
2224 : : if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2225 : : return false;
2226 : : if (trig1->tgconstrindid != trig2->tgconstrindid)
2227 : : return false;
2228 : : if (trig1->tgconstraint != trig2->tgconstraint)
2229 : : return false;
2230 : : if (trig1->tgdeferrable != trig2->tgdeferrable)
2231 : : return false;
2232 : : if (trig1->tginitdeferred != trig2->tginitdeferred)
2233 : : return false;
2234 : : if (trig1->tgnargs != trig2->tgnargs)
2235 : : return false;
2236 : : if (trig1->tgnattr != trig2->tgnattr)
2237 : : return false;
2238 : : if (trig1->tgnattr > 0 &&
2239 : : memcmp(trig1->tgattr, trig2->tgattr,
2240 : : trig1->tgnattr * sizeof(int16)) != 0)
2241 : : return false;
2242 : : for (j = 0; j < trig1->tgnargs; j++)
2243 : : if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2244 : : return false;
2245 : : if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2246 : : /* ok */ ;
2247 : : else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2248 : : return false;
2249 : : else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2250 : : return false;
2251 : : if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2252 : : /* ok */ ;
2253 : : else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2254 : : return false;
2255 : : else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2256 : : return false;
2257 : : if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2258 : : /* ok */ ;
2259 : : else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2260 : : return false;
2261 : : else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2262 : : return false;
2263 : : }
2264 : : }
2265 : : else if (trigdesc2 != NULL)
2266 : : return false;
2267 : : return true;
2268 : : }
2269 : : #endif /* NOT_USED */
2270 : :
2271 : : /*
2272 : : * Check if there is a row-level trigger with transition tables that prevents
2273 : : * a table from becoming an inheritance child or partition. Return the name
2274 : : * of the first such incompatible trigger, or NULL if there is none.
2275 : : */
2276 : : const char *
2992 rhodiumtoad@postgres 2277 : 1349 : FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
2278 : : {
2279 [ + + ]: 1349 : if (trigdesc != NULL)
2280 : : {
2281 : : int i;
2282 : :
2283 [ + + ]: 306 : for (i = 0; i < trigdesc->numtriggers; ++i)
2284 : : {
2945 tgl@sss.pgh.pa.us 2285 : 219 : Trigger *trigger = &trigdesc->triggers[i];
2286 : :
29 efujita@postgresql.o 2287 [ + + ]: 219 : if (!TRIGGER_FOR_ROW(trigger->tgtype))
2288 : 18 : continue;
2992 rhodiumtoad@postgres 2289 [ + - + + ]: 201 : if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2290 : 6 : return trigger->tgname;
2291 : : }
2292 : : }
2293 : :
2294 : 1343 : return NULL;
2295 : : }
2296 : :
2297 : : /*
2298 : : * Call a trigger function.
2299 : : *
2300 : : * trigdata: trigger descriptor.
2301 : : * tgindx: trigger's index in finfo and instr arrays.
2302 : : * finfo: array of cached trigger function call information.
2303 : : * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2304 : : * per_tuple_context: memory context to execute the function in.
2305 : : *
2306 : : * Returns the tuple (or NULL) as returned by the function.
2307 : : */
2308 : : static HeapTuple
8863 tgl@sss.pgh.pa.us 2309 : 10918 : ExecCallTriggerFunc(TriggerData *trigdata,
2310 : : int tgindx,
2311 : : FmgrInfo *finfo,
2312 : : Instrumentation *instr,
2313 : : MemoryContext per_tuple_context)
2314 : : {
2415 andres@anarazel.de 2315 : 10918 : LOCAL_FCINFO(fcinfo, 0);
2316 : : PgStat_FunctionCallUsage fcusage;
2317 : : Datum result;
2318 : : MemoryContext oldContext;
2319 : :
2320 : : /*
2321 : : * Protect against code paths that may fail to initialize transition table
2322 : : * info.
2323 : : */
3228 kgrittn@postgresql.o 2324 [ + + + + : 10918 : Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
+ + + + +
- - + + -
- + ]
2325 : : TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2326 : : TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2327 : : TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2328 : : !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2329 : : !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2330 : : (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2331 : :
7470 tgl@sss.pgh.pa.us 2332 : 10918 : finfo += tgindx;
2333 : :
2334 : : /*
2335 : : * We cache fmgr lookup info, to avoid making the lookup again on each
2336 : : * call.
2337 : : */
8863 2338 [ + + ]: 10918 : if (finfo->fn_oid == InvalidOid)
2339 : 9296 : fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2340 : :
2341 [ - + ]: 10918 : Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2342 : :
2343 : : /*
2344 : : * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2345 : : */
7470 2346 [ - + ]: 10918 : if (instr)
7470 tgl@sss.pgh.pa.us 2347 :UBC 0 : InstrStartNode(instr + tgindx);
2348 : :
2349 : : /*
2350 : : * Do the function evaluation in the per-tuple memory context, so that
2351 : : * leaked memory will be reclaimed once per tuple. Note in particular that
2352 : : * any new tuple created by the trigger function will live till the end of
2353 : : * the tuple cycle.
2354 : : */
8993 tgl@sss.pgh.pa.us 2355 :CBC 10918 : oldContext = MemoryContextSwitchTo(per_tuple_context);
2356 : :
2357 : : /*
2358 : : * Call the function, passing no arguments but setting a context.
2359 : : */
2415 andres@anarazel.de 2360 : 10918 : InitFunctionCallInfoData(*fcinfo, finfo, 0,
2361 : : InvalidOid, (Node *) trigdata, NULL);
2362 : :
2363 : 10918 : pgstat_init_function_usage(fcinfo, &fcusage);
2364 : :
4973 alvherre@alvh.no-ip. 2365 : 10918 : MyTriggerDepth++;
2366 [ + + ]: 10918 : PG_TRY();
2367 : : {
2415 andres@anarazel.de 2368 : 10918 : result = FunctionCallInvoke(fcinfo);
2369 : : }
2136 peter@eisentraut.org 2370 : 691 : PG_FINALLY();
2371 : : {
4973 alvherre@alvh.no-ip. 2372 : 10918 : MyTriggerDepth--;
2373 : : }
2374 [ + + ]: 10918 : PG_END_TRY();
2375 : :
6323 tgl@sss.pgh.pa.us 2376 : 10227 : pgstat_end_function_usage(&fcusage, true);
2377 : :
8993 2378 : 10227 : MemoryContextSwitchTo(oldContext);
2379 : :
2380 : : /*
2381 : : * Trigger protocol allows function to return a null pointer, but NOT to
2382 : : * set the isnull result flag.
2383 : : */
2415 andres@anarazel.de 2384 [ - + ]: 10227 : if (fcinfo->isnull)
8084 tgl@sss.pgh.pa.us 2385 [ # # ]:UBC 0 : ereport(ERROR,
2386 : : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2387 : : errmsg("trigger function %u returned null value",
2388 : : fcinfo->flinfo->fn_oid)));
2389 : :
2390 : : /*
2391 : : * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2392 : : * one "tuple returned" (really the number of firings).
2393 : : */
7470 tgl@sss.pgh.pa.us 2394 [ - + ]:CBC 10227 : if (instr)
7039 bruce@momjian.us 2395 :UBC 0 : InstrStopNode(instr + tgindx, 1);
2396 : :
9231 tgl@sss.pgh.pa.us 2397 :CBC 10227 : return (HeapTuple) DatumGetPointer(result);
2398 : : }
2399 : :
2400 : : void
8323 bruce@momjian.us 2401 : 42594 : ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
2402 : : {
2403 : : TriggerDesc *trigdesc;
2404 : : int i;
2021 peter@eisentraut.org 2405 : 42594 : TriggerData LocTriggerData = {0};
2406 : :
8323 bruce@momjian.us 2407 : 42594 : trigdesc = relinfo->ri_TrigDesc;
2408 : :
2409 [ + + ]: 42594 : if (trigdesc == NULL)
2410 : 42488 : return;
5445 tgl@sss.pgh.pa.us 2411 [ + + ]: 3555 : if (!trigdesc->trig_insert_before_statement)
8323 bruce@momjian.us 2412 : 3449 : return;
2413 : :
2414 : : /* no-op if we already fired BS triggers in this context */
2911 tgl@sss.pgh.pa.us 2415 [ - + ]: 106 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2416 : : CMD_INSERT))
2911 tgl@sss.pgh.pa.us 2417 :UBC 0 : return;
2418 : :
8323 bruce@momjian.us 2419 :CBC 106 : LocTriggerData.type = T_TriggerData;
2420 : 106 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2421 : : TRIGGER_EVENT_BEFORE;
8069 2422 : 106 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
5445 tgl@sss.pgh.pa.us 2423 [ + + ]: 916 : for (i = 0; i < trigdesc->numtriggers; i++)
2424 : : {
2425 : 816 : Trigger *trigger = &trigdesc->triggers[i];
2426 : : HeapTuple newtuple;
2427 : :
2428 [ + + ]: 816 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2429 : : TRIGGER_TYPE_STATEMENT,
2430 : : TRIGGER_TYPE_BEFORE,
2431 : : TRIGGER_TYPE_INSERT))
2432 : 704 : continue;
5769 2433 [ + + ]: 112 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2434 : : NULL, NULL, NULL))
5806 2435 : 15 : continue;
2436 : :
8323 bruce@momjian.us 2437 : 97 : LocTriggerData.tg_trigger = trigger;
2438 : 97 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2439 : : i,
2440 : : relinfo->ri_TrigFunctions,
2441 : : relinfo->ri_TrigInstrument,
2442 [ + + ]: 97 : GetPerTupleMemoryContext(estate));
2443 : :
2444 [ - + ]: 91 : if (newtuple)
8084 tgl@sss.pgh.pa.us 2445 [ # # ]:UBC 0 : ereport(ERROR,
2446 : : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2447 : : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2448 : : }
2449 : : }
2450 : :
2451 : : void
2992 rhodiumtoad@postgres 2452 :CBC 41302 : ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2453 : : TransitionCaptureState *transition_capture)
2454 : : {
8323 bruce@momjian.us 2455 : 41302 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2456 : :
5445 tgl@sss.pgh.pa.us 2457 [ + + + + ]: 41302 : if (trigdesc && trigdesc->trig_insert_after_statement)
1266 alvherre@alvh.no-ip. 2458 : 221 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2459 : : TRIGGER_EVENT_INSERT,
2460 : : false, NULL, NULL, NIL, NULL, transition_capture,
2461 : : false);
8323 bruce@momjian.us 2462 : 41302 : }
2463 : :
2464 : : bool
8863 tgl@sss.pgh.pa.us 2465 : 1171 : ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2466 : : TupleTableSlot *slot)
2467 : : {
2468 : 1171 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2124 2469 : 1171 : HeapTuple newtuple = NULL;
2470 : : bool should_free;
2021 peter@eisentraut.org 2471 : 1171 : TriggerData LocTriggerData = {0};
2472 : : int i;
2473 : :
9231 tgl@sss.pgh.pa.us 2474 : 1171 : LocTriggerData.type = T_TriggerData;
8323 bruce@momjian.us 2475 : 1171 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2476 : : TRIGGER_EVENT_ROW |
2477 : : TRIGGER_EVENT_BEFORE;
8863 tgl@sss.pgh.pa.us 2478 : 1171 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
5445 2479 [ + + ]: 5451 : for (i = 0; i < trigdesc->numtriggers; i++)
2480 : : {
2481 : 4438 : Trigger *trigger = &trigdesc->triggers[i];
2482 : : HeapTuple oldtuple;
2483 : :
2484 [ + + ]: 4438 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2485 : : TRIGGER_TYPE_ROW,
2486 : : TRIGGER_TYPE_BEFORE,
2487 : : TRIGGER_TYPE_INSERT))
2488 : 2122 : continue;
5769 2489 [ + + ]: 2316 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2490 : : NULL, NULL, slot))
5806 2491 : 31 : continue;
2492 : :
2384 andres@anarazel.de 2493 [ + + ]: 2285 : if (!newtuple)
2494 : 1154 : newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2495 : :
2496 : 2285 : LocTriggerData.tg_trigslot = slot;
9231 tgl@sss.pgh.pa.us 2497 : 2285 : LocTriggerData.tg_trigtuple = oldtuple = newtuple;
8863 2498 : 2285 : LocTriggerData.tg_trigger = trigger;
2499 : 2285 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2500 : : i,
2501 : : relinfo->ri_TrigFunctions,
2502 : : relinfo->ri_TrigInstrument,
8993 2503 [ + + ]: 2285 : GetPerTupleMemoryContext(estate));
10226 bruce@momjian.us 2504 [ + + ]: 2248 : if (newtuple == NULL)
2505 : : {
2487 andres@anarazel.de 2506 [ + + ]: 109 : if (should_free)
2384 2507 : 10 : heap_freetuple(oldtuple);
2508 : 109 : return false; /* "do nothing" */
2509 : : }
2510 [ + + ]: 2139 : else if (newtuple != oldtuple)
2511 : : {
211 peter@eisentraut.org 2512 : 372 : newtuple = check_modified_virtual_generated(RelationGetDescr(relinfo->ri_RelationDesc), newtuple);
2513 : :
2332 andres@anarazel.de 2514 : 372 : ExecForceStoreHeapTuple(newtuple, slot, false);
2515 : :
2516 : : /*
2517 : : * After a tuple in a partition goes through a trigger, the user
2518 : : * could have changed the partition key enough that the tuple no
2519 : : * longer fits the partition. Verify that.
2520 : : */
1998 alvherre@alvh.no-ip. 2521 [ + + ]: 372 : if (trigger->tgisclone &&
2522 [ + + ]: 33 : !ExecPartitionCheck(relinfo, slot, estate, false))
2523 [ + - ]: 12 : ereport(ERROR,
2524 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2525 : : errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2526 : : errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2527 : : trigger->tgname,
2528 : : get_namespace_name(RelationGetNamespace(relinfo->ri_RelationDesc)),
2529 : : RelationGetRelationName(relinfo->ri_RelationDesc))));
2530 : :
2384 andres@anarazel.de 2531 [ + + ]: 360 : if (should_free)
2532 : 20 : heap_freetuple(oldtuple);
2533 : :
2534 : : /* signal tuple should be re-fetched if used */
2535 : 360 : newtuple = NULL;
2536 : : }
2537 : : }
2538 : :
2539 : 1013 : return true;
2540 : : }
2541 : :
2542 : : void
8863 tgl@sss.pgh.pa.us 2543 : 6450898 : ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2544 : : TupleTableSlot *slot, List *recheckIndexes,
2545 : : TransitionCaptureState *transition_capture)
2546 : : {
2547 : 6450898 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2548 : :
29 efujita@postgresql.o 2549 [ + + + + ]: 6450898 : if (relinfo->ri_FdwRoutine && transition_capture &&
2550 [ + - ]: 4 : transition_capture->tcs_insert_new_table)
2551 : : {
2552 [ - + ]: 4 : Assert(relinfo->ri_RootResultRelInfo);
2553 [ + - ]: 4 : ereport(ERROR,
2554 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2555 : : errmsg("cannot collect transition tuples from child foreign tables")));
2556 : : }
2557 : :
2992 rhodiumtoad@postgres 2558 [ + + + + : 6450894 : if ((trigdesc && trigdesc->trig_insert_after_row) ||
+ + ]
2559 [ + + ]: 30162 : (transition_capture && transition_capture->tcs_insert_new_table))
1266 alvherre@alvh.no-ip. 2560 : 32823 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2561 : : TRIGGER_EVENT_INSERT,
2562 : : true, NULL, slot,
2563 : : recheckIndexes, NULL,
2564 : : transition_capture,
2565 : : false);
8323 bruce@momjian.us 2566 : 6450894 : }
2567 : :
2568 : : bool
5445 tgl@sss.pgh.pa.us 2569 : 90 : ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2570 : : TupleTableSlot *slot)
2571 : : {
2572 : 90 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2384 andres@anarazel.de 2573 : 90 : HeapTuple newtuple = NULL;
2574 : : bool should_free;
2021 peter@eisentraut.org 2575 : 90 : TriggerData LocTriggerData = {0};
2576 : : int i;
2577 : :
5445 tgl@sss.pgh.pa.us 2578 : 90 : LocTriggerData.type = T_TriggerData;
2579 : 90 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2580 : : TRIGGER_EVENT_ROW |
2581 : : TRIGGER_EVENT_INSTEAD;
2582 : 90 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2583 [ + + ]: 273 : for (i = 0; i < trigdesc->numtriggers; i++)
2584 : : {
2585 : 192 : Trigger *trigger = &trigdesc->triggers[i];
2586 : : HeapTuple oldtuple;
2587 : :
2588 [ + + ]: 192 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2589 : : TRIGGER_TYPE_ROW,
2590 : : TRIGGER_TYPE_INSTEAD,
2591 : : TRIGGER_TYPE_INSERT))
2592 : 102 : continue;
2593 [ - + ]: 90 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2594 : : NULL, NULL, slot))
5445 tgl@sss.pgh.pa.us 2595 :UBC 0 : continue;
2596 : :
2384 andres@anarazel.de 2597 [ + - ]:CBC 90 : if (!newtuple)
2598 : 90 : newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2599 : :
2600 : 90 : LocTriggerData.tg_trigslot = slot;
5445 tgl@sss.pgh.pa.us 2601 : 90 : LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2602 : 90 : LocTriggerData.tg_trigger = trigger;
2603 : 90 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2604 : : i,
2605 : : relinfo->ri_TrigFunctions,
2606 : : relinfo->ri_TrigInstrument,
2607 [ + + ]: 90 : GetPerTupleMemoryContext(estate));
2608 [ + + ]: 90 : if (newtuple == NULL)
2609 : : {
2487 andres@anarazel.de 2610 [ + - ]: 9 : if (should_free)
2384 2611 : 9 : heap_freetuple(oldtuple);
2612 : 9 : return false; /* "do nothing" */
2613 : : }
2614 [ + + ]: 81 : else if (newtuple != oldtuple)
2615 : : {
2332 2616 : 27 : ExecForceStoreHeapTuple(newtuple, slot, false);
2617 : :
2384 2618 [ + - ]: 27 : if (should_free)
2619 : 27 : heap_freetuple(oldtuple);
2620 : :
2621 : : /* signal tuple should be re-fetched if used */
2622 : 27 : newtuple = NULL;
2623 : : }
2624 : : }
2625 : :
2626 : 81 : return true;
2627 : : }
2628 : :
2629 : : void
8323 bruce@momjian.us 2630 : 6239 : ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2631 : : {
2632 : : TriggerDesc *trigdesc;
2633 : : int i;
2021 peter@eisentraut.org 2634 : 6239 : TriggerData LocTriggerData = {0};
2635 : :
8323 bruce@momjian.us 2636 : 6239 : trigdesc = relinfo->ri_TrigDesc;
2637 : :
2638 [ + + ]: 6239 : if (trigdesc == NULL)
2639 : 6200 : return;
5445 tgl@sss.pgh.pa.us 2640 [ + + ]: 762 : if (!trigdesc->trig_delete_before_statement)
8323 bruce@momjian.us 2641 : 702 : return;
2642 : :
2643 : : /* no-op if we already fired BS triggers in this context */
2911 tgl@sss.pgh.pa.us 2644 [ + + ]: 60 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2645 : : CMD_DELETE))
2646 : 21 : return;
2647 : :
8323 bruce@momjian.us 2648 : 39 : LocTriggerData.type = T_TriggerData;
2649 : 39 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2650 : : TRIGGER_EVENT_BEFORE;
8069 2651 : 39 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
5445 tgl@sss.pgh.pa.us 2652 [ + + ]: 354 : for (i = 0; i < trigdesc->numtriggers; i++)
2653 : : {
2654 : 315 : Trigger *trigger = &trigdesc->triggers[i];
2655 : : HeapTuple newtuple;
2656 : :
2657 [ + + ]: 315 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2658 : : TRIGGER_TYPE_STATEMENT,
2659 : : TRIGGER_TYPE_BEFORE,
2660 : : TRIGGER_TYPE_DELETE))
2661 : 276 : continue;
5769 2662 [ + + ]: 39 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2663 : : NULL, NULL, NULL))
5806 2664 : 6 : continue;
2665 : :
8323 bruce@momjian.us 2666 : 33 : LocTriggerData.tg_trigger = trigger;
2667 : 33 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2668 : : i,
2669 : : relinfo->ri_TrigFunctions,
2670 : : relinfo->ri_TrigInstrument,
2671 [ + + ]: 33 : GetPerTupleMemoryContext(estate));
2672 : :
2673 [ - + ]: 33 : if (newtuple)
8084 tgl@sss.pgh.pa.us 2674 [ # # ]:UBC 0 : ereport(ERROR,
2675 : : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2676 : : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2677 : : }
2678 : : }
2679 : :
2680 : : void
2992 rhodiumtoad@postgres 2681 :CBC 6166 : ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2682 : : TransitionCaptureState *transition_capture)
2683 : : {
8323 bruce@momjian.us 2684 : 6166 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2685 : :
5445 tgl@sss.pgh.pa.us 2686 [ + + + + ]: 6166 : if (trigdesc && trigdesc->trig_delete_after_statement)
1266 alvherre@alvh.no-ip. 2687 : 118 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2688 : : TRIGGER_EVENT_DELETE,
2689 : : false, NULL, NULL, NIL, NULL, transition_capture,
2690 : : false);
10232 vadim4o@yahoo.com 2691 : 6166 : }
2692 : :
2693 : : /*
2694 : : * Execute BEFORE ROW DELETE triggers.
2695 : : *
2696 : : * True indicates caller can proceed with the delete. False indicates caller
2697 : : * need to suppress the delete and additionally if requested, we need to pass
2698 : : * back the concurrently updated tuple if any.
2699 : : */
2700 : : bool
5794 tgl@sss.pgh.pa.us 2701 : 173 : ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
2702 : : ResultRelInfo *relinfo,
2703 : : ItemPointer tupleid,
2704 : : HeapTuple fdw_trigtuple,
2705 : : TupleTableSlot **epqslot,
2706 : : TM_Result *tmresult,
2707 : : TM_FailureData *tmfd,
2708 : : bool is_merge_delete)
2709 : : {
2384 andres@anarazel.de 2710 : 173 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
8863 tgl@sss.pgh.pa.us 2711 : 173 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
7318 2712 : 173 : bool result = true;
2021 peter@eisentraut.org 2713 : 173 : TriggerData LocTriggerData = {0};
2714 : : HeapTuple trigtuple;
2384 andres@anarazel.de 2715 : 173 : bool should_free = false;
2716 : : int i;
2717 : :
4185 noah@leadboat.com 2718 [ - + ]: 173 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2719 [ + + ]: 173 : if (fdw_trigtuple == NULL)
2720 : : {
2164 andres@anarazel.de 2721 : 165 : TupleTableSlot *epqslot_candidate = NULL;
2722 : :
2723 : : /*
2724 : : * Get a copy of the on-disk tuple we are planning to delete. In
2725 : : * general, if the tuple has been concurrently updated, we should
2726 : : * recheck it using EPQ. However, if this is a MERGE DELETE action,
2727 : : * we skip this EPQ recheck and leave it to the caller (it must do
2728 : : * additional rechecking, and might end up executing a different
2729 : : * action entirely).
2730 : : */
2384 2731 [ + + ]: 162 : if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
50 dean.a.rasheed@gmail 2732 : 165 : LockTupleExclusive, slot, !is_merge_delete,
2733 : 165 : &epqslot_candidate, tmresult, tmfd))
4185 noah@leadboat.com 2734 : 6 : return false;
2735 : :
2736 : : /*
2737 : : * If the tuple was concurrently updated and the caller of this
2738 : : * function requested for the updated tuple, skip the trigger
2739 : : * execution.
2740 : : */
2164 andres@anarazel.de 2741 [ + + + - ]: 157 : if (epqslot_candidate != NULL && epqslot != NULL)
2742 : : {
2743 : 1 : *epqslot = epqslot_candidate;
2613 akapila@postgresql.o 2744 : 1 : return false;
2745 : : }
2746 : :
2384 andres@anarazel.de 2747 : 156 : trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2748 : : }
2749 : : else
2750 : : {
4185 noah@leadboat.com 2751 : 8 : trigtuple = fdw_trigtuple;
2332 andres@anarazel.de 2752 : 8 : ExecForceStoreHeapTuple(trigtuple, slot, false);
2753 : : }
2754 : :
9231 tgl@sss.pgh.pa.us 2755 : 164 : LocTriggerData.type = T_TriggerData;
8323 bruce@momjian.us 2756 : 164 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2757 : : TRIGGER_EVENT_ROW |
2758 : : TRIGGER_EVENT_BEFORE;
8863 tgl@sss.pgh.pa.us 2759 : 164 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
5445 2760 [ + + ]: 614 : for (i = 0; i < trigdesc->numtriggers; i++)
2761 : : {
2762 : : HeapTuple newtuple;
2763 : 481 : Trigger *trigger = &trigdesc->triggers[i];
2764 : :
2765 [ + + ]: 481 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2766 : : TRIGGER_TYPE_ROW,
2767 : : TRIGGER_TYPE_BEFORE,
2768 : : TRIGGER_TYPE_DELETE))
2769 : 314 : continue;
5769 2770 [ + + ]: 167 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2771 : : NULL, slot, NULL))
5806 2772 : 7 : continue;
2773 : :
2384 andres@anarazel.de 2774 : 160 : LocTriggerData.tg_trigslot = slot;
9231 tgl@sss.pgh.pa.us 2775 : 160 : LocTriggerData.tg_trigtuple = trigtuple;
8863 2776 : 160 : LocTriggerData.tg_trigger = trigger;
2777 : 160 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2778 : : i,
2779 : : relinfo->ri_TrigFunctions,
2780 : : relinfo->ri_TrigInstrument,
8993 2781 [ + + ]: 160 : GetPerTupleMemoryContext(estate));
10222 vadim4o@yahoo.com 2782 [ + + ]: 155 : if (newtuple == NULL)
2783 : : {
7318 tgl@sss.pgh.pa.us 2784 : 26 : result = false; /* tell caller to suppress delete */
10222 vadim4o@yahoo.com 2785 : 26 : break;
2786 : : }
9714 JanWieck@Yahoo.com 2787 [ + + ]: 129 : if (newtuple != trigtuple)
9396 2788 : 28 : heap_freetuple(newtuple);
2789 : : }
2384 andres@anarazel.de 2790 [ - + ]: 159 : if (should_free)
4185 noah@leadboat.com 2791 :UBC 0 : heap_freetuple(trigtuple);
2792 : :
7318 tgl@sss.pgh.pa.us 2793 :CBC 159 : return result;
2794 : : }
2795 : :
2796 : : /*
2797 : : * Note: is_crosspart_update must be true if the DELETE is being performed
2798 : : * as part of a cross-partition update.
2799 : : */
2800 : : void
1266 alvherre@alvh.no-ip. 2801 : 809406 : ExecARDeleteTriggers(EState *estate,
2802 : : ResultRelInfo *relinfo,
2803 : : ItemPointer tupleid,
2804 : : HeapTuple fdw_trigtuple,
2805 : : TransitionCaptureState *transition_capture,
2806 : : bool is_crosspart_update)
2807 : : {
8863 tgl@sss.pgh.pa.us 2808 : 809406 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2809 : :
29 efujita@postgresql.o 2810 [ + + + + ]: 809406 : if (relinfo->ri_FdwRoutine && transition_capture &&
2811 [ + - ]: 2 : transition_capture->tcs_delete_old_table)
2812 : : {
2813 [ - + ]: 2 : Assert(relinfo->ri_RootResultRelInfo);
2814 [ + - ]: 2 : ereport(ERROR,
2815 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2816 : : errmsg("cannot collect transition tuples from child foreign tables")));
2817 : : }
2818 : :
2992 rhodiumtoad@postgres 2819 [ + + + + : 809404 : if ((trigdesc && trigdesc->trig_delete_after_row) ||
+ + ]
2820 [ + + ]: 2508 : (transition_capture && transition_capture->tcs_delete_old_table))
2821 : : {
513 akorotkov@postgresql 2822 : 3090 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2823 : :
2824 [ - + ]: 3090 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2825 [ + + ]: 3090 : if (fdw_trigtuple == NULL)
2826 : 3082 : GetTupleForTrigger(estate,
2827 : : NULL,
2828 : : relinfo,
2829 : : tupleid,
2830 : : LockTupleExclusive,
2831 : : slot,
2832 : : false,
2833 : : NULL,
2834 : : NULL,
2835 : : NULL);
2836 : : else
2332 andres@anarazel.de 2837 : 8 : ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2838 : :
1266 alvherre@alvh.no-ip. 2839 : 3090 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2840 : : TRIGGER_EVENT_DELETE,
2841 : : true, slot, NULL, NIL, NULL,
2842 : : transition_capture,
2843 : : is_crosspart_update);
2844 : : }
10232 vadim4o@yahoo.com 2845 : 809404 : }
2846 : :
2847 : : bool
5445 tgl@sss.pgh.pa.us 2848 : 30 : ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2849 : : HeapTuple trigtuple)
2850 : : {
2851 : 30 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2384 andres@anarazel.de 2852 : 30 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2021 peter@eisentraut.org 2853 : 30 : TriggerData LocTriggerData = {0};
2854 : : int i;
2855 : :
5445 tgl@sss.pgh.pa.us 2856 : 30 : LocTriggerData.type = T_TriggerData;
2857 : 30 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2858 : : TRIGGER_EVENT_ROW |
2859 : : TRIGGER_EVENT_INSTEAD;
2860 : 30 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2861 : :
2332 andres@anarazel.de 2862 : 30 : ExecForceStoreHeapTuple(trigtuple, slot, false);
2863 : :
5445 tgl@sss.pgh.pa.us 2864 [ + + ]: 177 : for (i = 0; i < trigdesc->numtriggers; i++)
2865 : : {
2866 : : HeapTuple rettuple;
2867 : 150 : Trigger *trigger = &trigdesc->triggers[i];
2868 : :
2869 [ + + ]: 150 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2870 : : TRIGGER_TYPE_ROW,
2871 : : TRIGGER_TYPE_INSTEAD,
2872 : : TRIGGER_TYPE_DELETE))
2873 : 120 : continue;
2874 [ - + ]: 30 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2875 : : NULL, slot, NULL))
5445 tgl@sss.pgh.pa.us 2876 :UBC 0 : continue;
2877 : :
2384 andres@anarazel.de 2878 :CBC 30 : LocTriggerData.tg_trigslot = slot;
5445 tgl@sss.pgh.pa.us 2879 : 30 : LocTriggerData.tg_trigtuple = trigtuple;
2880 : 30 : LocTriggerData.tg_trigger = trigger;
2881 : 30 : rettuple = ExecCallTriggerFunc(&LocTriggerData,
2882 : : i,
2883 : : relinfo->ri_TrigFunctions,
2884 : : relinfo->ri_TrigInstrument,
2885 [ + + ]: 30 : GetPerTupleMemoryContext(estate));
2886 [ + + ]: 30 : if (rettuple == NULL)
2887 : 3 : return false; /* Delete was suppressed */
2888 [ - + ]: 27 : if (rettuple != trigtuple)
5445 tgl@sss.pgh.pa.us 2889 :UBC 0 : heap_freetuple(rettuple);
2890 : : }
5445 tgl@sss.pgh.pa.us 2891 :CBC 27 : return true;
2892 : : }
2893 : :
2894 : : void
8323 bruce@momjian.us 2895 : 7697 : ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2896 : : {
2897 : : TriggerDesc *trigdesc;
2898 : : int i;
2021 peter@eisentraut.org 2899 : 7697 : TriggerData LocTriggerData = {0};
2900 : : Bitmapset *updatedCols;
2901 : :
8323 bruce@momjian.us 2902 : 7697 : trigdesc = relinfo->ri_TrigDesc;
2903 : :
2904 [ + + ]: 7697 : if (trigdesc == NULL)
2905 : 7608 : return;
5445 tgl@sss.pgh.pa.us 2906 [ + + ]: 2063 : if (!trigdesc->trig_update_before_statement)
8323 bruce@momjian.us 2907 : 1974 : return;
2908 : :
2909 : : /* no-op if we already fired BS triggers in this context */
2911 tgl@sss.pgh.pa.us 2910 [ - + ]: 89 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2911 : : CMD_UPDATE))
2911 tgl@sss.pgh.pa.us 2912 :UBC 0 : return;
2913 : :
2914 : : /* statement-level triggers operate on the parent table */
1671 heikki.linnakangas@i 2915 [ - + ]:CBC 89 : Assert(relinfo->ri_RootResultRelInfo == NULL);
2916 : :
2917 : 89 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2918 : :
8323 bruce@momjian.us 2919 : 89 : LocTriggerData.type = T_TriggerData;
2920 : 89 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2921 : : TRIGGER_EVENT_BEFORE;
8069 2922 : 89 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2007 peter@eisentraut.org 2923 : 89 : LocTriggerData.tg_updatedcols = updatedCols;
5445 tgl@sss.pgh.pa.us 2924 [ + + ]: 800 : for (i = 0; i < trigdesc->numtriggers; i++)
2925 : : {
2926 : 711 : Trigger *trigger = &trigdesc->triggers[i];
2927 : : HeapTuple newtuple;
2928 : :
2929 [ + + ]: 711 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2930 : : TRIGGER_TYPE_STATEMENT,
2931 : : TRIGGER_TYPE_BEFORE,
2932 : : TRIGGER_TYPE_UPDATE))
2933 : 622 : continue;
5769 2934 [ + + ]: 89 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2935 : : updatedCols, NULL, NULL))
5806 2936 : 3 : continue;
2937 : :
8323 bruce@momjian.us 2938 : 86 : LocTriggerData.tg_trigger = trigger;
2939 : 86 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2940 : : i,
2941 : : relinfo->ri_TrigFunctions,
2942 : : relinfo->ri_TrigInstrument,
2943 [ + - ]: 86 : GetPerTupleMemoryContext(estate));
2944 : :
2945 [ - + ]: 86 : if (newtuple)
8084 tgl@sss.pgh.pa.us 2946 [ # # ]:UBC 0 : ereport(ERROR,
2947 : : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2948 : : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2949 : : }
2950 : : }
2951 : :
2952 : : void
2992 rhodiumtoad@postgres 2953 :CBC 7237 : ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2954 : : TransitionCaptureState *transition_capture)
2955 : : {
8323 bruce@momjian.us 2956 : 7237 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2957 : :
2958 : : /* statement-level triggers operate on the parent table */
1671 heikki.linnakangas@i 2959 [ - + ]: 7237 : Assert(relinfo->ri_RootResultRelInfo == NULL);
2960 : :
5445 tgl@sss.pgh.pa.us 2961 [ + + + + ]: 7237 : if (trigdesc && trigdesc->trig_update_after_statement)
1266 alvherre@alvh.no-ip. 2962 : 204 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2963 : : TRIGGER_EVENT_UPDATE,
2964 : : false, NULL, NULL, NIL,
2965 : : ExecGetAllUpdatedCols(relinfo, estate),
2966 : : transition_capture,
2967 : : false);
8323 bruce@momjian.us 2968 : 7237 : }
2969 : :
2970 : : bool
5794 tgl@sss.pgh.pa.us 2971 : 1287 : ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
2972 : : ResultRelInfo *relinfo,
2973 : : ItemPointer tupleid,
2974 : : HeapTuple fdw_trigtuple,
2975 : : TupleTableSlot *newslot,
2976 : : TM_Result *tmresult,
2977 : : TM_FailureData *tmfd,
2978 : : bool is_merge_update)
2979 : : {
8863 2980 : 1287 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2384 andres@anarazel.de 2981 : 1287 : TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2982 : 1287 : HeapTuple newtuple = NULL;
2983 : : HeapTuple trigtuple;
2984 : 1287 : bool should_free_trig = false;
2985 : 1287 : bool should_free_new = false;
2021 peter@eisentraut.org 2986 : 1287 : TriggerData LocTriggerData = {0};
2987 : : int i;
2988 : : Bitmapset *updatedCols;
2989 : : LockTupleMode lockmode;
2990 : :
2991 : : /* Determine lock mode to use */
3774 andres@anarazel.de 2992 : 1287 : lockmode = ExecUpdateLockMode(estate, relinfo);
2993 : :
4185 noah@leadboat.com 2994 [ - + ]: 1287 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2995 [ + + ]: 1287 : if (fdw_trigtuple == NULL)
2996 : : {
2164 andres@anarazel.de 2997 : 1268 : TupleTableSlot *epqslot_candidate = NULL;
2998 : :
2999 : : /*
3000 : : * Get a copy of the on-disk tuple we are planning to update. In
3001 : : * general, if the tuple has been concurrently updated, we should
3002 : : * recheck it using EPQ. However, if this is a MERGE UPDATE action,
3003 : : * we skip this EPQ recheck and leave it to the caller (it must do
3004 : : * additional rechecking, and might end up executing a different
3005 : : * action entirely).
3006 : : */
2384 3007 [ + + ]: 1264 : if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
50 dean.a.rasheed@gmail 3008 : 1268 : lockmode, oldslot, !is_merge_update,
3009 : 1268 : &epqslot_candidate, tmresult, tmfd))
2384 andres@anarazel.de 3010 : 12 : return false; /* cancel the update action */
3011 : :
3012 : : /*
3013 : : * In READ COMMITTED isolation level it's possible that target tuple
3014 : : * was changed due to concurrent update. In that case we have a raw
3015 : : * subplan output tuple in epqslot_candidate, and need to form a new
3016 : : * insertable tuple using ExecGetUpdateNewTuple to replace the one we
3017 : : * received in newslot. Neither we nor our callers have any further
3018 : : * interest in the passed-in tuple, so it's okay to overwrite newslot
3019 : : * with the newer data.
3020 : : */
2164 3021 [ + + ]: 1252 : if (epqslot_candidate != NULL)
3022 : : {
3023 : : TupleTableSlot *epqslot_clean;
3024 : :
1620 tgl@sss.pgh.pa.us 3025 : 3 : epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
3026 : : oldslot);
3027 : :
3028 : : /*
3029 : : * Typically, the caller's newslot was also generated by
3030 : : * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
3031 : : * slot and copying is not needed. But do the right thing if it
3032 : : * isn't.
3033 : : */
601 3034 [ - + ]: 3 : if (unlikely(newslot != epqslot_clean))
2164 andres@anarazel.de 3035 :UBC 0 : ExecCopySlot(newslot, epqslot_clean);
3036 : :
3037 : : /*
3038 : : * At this point newslot contains a virtual tuple that may
3039 : : * reference some fields of oldslot's tuple in some disk buffer.
3040 : : * If that tuple is in a different page than the original target
3041 : : * tuple, then our only pin on that buffer is oldslot's, and we're
3042 : : * about to release it. Hence we'd better materialize newslot to
3043 : : * ensure it doesn't contain references into an unpinned buffer.
3044 : : * (We'd materialize it below anyway, but too late for safety.)
3045 : : */
601 tgl@sss.pgh.pa.us 3046 :CBC 3 : ExecMaterializeSlot(newslot);
3047 : : }
3048 : :
3049 : : /*
3050 : : * Here we convert oldslot to a materialized slot holding trigtuple.
3051 : : * Neither slot passed to the triggers will hold any buffer pin.
3052 : : */
1776 3053 : 1252 : trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3054 : : }
3055 : : else
3056 : : {
3057 : : /* Put the FDW-supplied tuple into oldslot to unify the cases */
2332 andres@anarazel.de 3058 : 19 : ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
4185 noah@leadboat.com 3059 : 19 : trigtuple = fdw_trigtuple;
3060 : : }
3061 : :
9231 tgl@sss.pgh.pa.us 3062 : 1271 : LocTriggerData.type = T_TriggerData;
8277 bruce@momjian.us 3063 : 1271 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3064 : : TRIGGER_EVENT_ROW |
3065 : : TRIGGER_EVENT_BEFORE;
8863 tgl@sss.pgh.pa.us 3066 : 1271 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
1671 heikki.linnakangas@i 3067 : 1271 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2007 peter@eisentraut.org 3068 : 1271 : LocTriggerData.tg_updatedcols = updatedCols;
5445 tgl@sss.pgh.pa.us 3069 [ + + ]: 5729 : for (i = 0; i < trigdesc->numtriggers; i++)
3070 : : {
3071 : 4532 : Trigger *trigger = &trigdesc->triggers[i];
3072 : : HeapTuple oldtuple;
3073 : :
3074 [ + + ]: 4532 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3075 : : TRIGGER_TYPE_ROW,
3076 : : TRIGGER_TYPE_BEFORE,
3077 : : TRIGGER_TYPE_UPDATE))
3078 : 2231 : continue;
5769 3079 [ + + ]: 2301 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3080 : : updatedCols, oldslot, newslot))
5806 3081 : 49 : continue;
3082 : :
2384 andres@anarazel.de 3083 [ + + ]: 2252 : if (!newtuple)
3084 : 1265 : newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3085 : :
3086 : 2252 : LocTriggerData.tg_trigslot = oldslot;
9231 tgl@sss.pgh.pa.us 3087 : 2252 : LocTriggerData.tg_trigtuple = trigtuple;
3088 : 2252 : LocTriggerData.tg_newtuple = oldtuple = newtuple;
2384 andres@anarazel.de 3089 : 2252 : LocTriggerData.tg_newslot = newslot;
8863 tgl@sss.pgh.pa.us 3090 : 2252 : LocTriggerData.tg_trigger = trigger;
3091 : 2252 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3092 : : i,
3093 : : relinfo->ri_TrigFunctions,
3094 : : relinfo->ri_TrigInstrument,
8993 3095 [ + - ]: 2252 : GetPerTupleMemoryContext(estate));
3096 : :
10222 vadim4o@yahoo.com 3097 [ + + ]: 2244 : if (newtuple == NULL)
3098 : : {
2384 andres@anarazel.de 3099 [ - + ]: 66 : if (should_free_trig)
4185 noah@leadboat.com 3100 :UBC 0 : heap_freetuple(trigtuple);
2384 andres@anarazel.de 3101 [ + + ]:CBC 66 : if (should_free_new)
3102 : 2 : heap_freetuple(oldtuple);
3103 : 66 : return false; /* "do nothing" */
3104 : : }
3105 [ + + ]: 2178 : else if (newtuple != oldtuple)
3106 : : {
211 peter@eisentraut.org 3107 : 652 : newtuple = check_modified_virtual_generated(RelationGetDescr(relinfo->ri_RelationDesc), newtuple);
3108 : :
2332 andres@anarazel.de 3109 : 652 : ExecForceStoreHeapTuple(newtuple, newslot, false);
3110 : :
3111 : : /*
3112 : : * If the tuple returned by the trigger / being stored, is the old
3113 : : * row version, and the heap tuple passed to the trigger was
3114 : : * allocated locally, materialize the slot. Otherwise we might
3115 : : * free it while still referenced by the slot.
3116 : : */
2333 3117 [ - + - - ]: 652 : if (should_free_trig && newtuple == trigtuple)
2333 andres@anarazel.de 3118 :UBC 0 : ExecMaterializeSlot(newslot);
3119 : :
2384 andres@anarazel.de 3120 [ + + ]:CBC 652 : if (should_free_new)
3121 : 1 : heap_freetuple(oldtuple);
3122 : :
3123 : : /* signal tuple should be re-fetched if used */
3124 : 652 : newtuple = NULL;
3125 : : }
3126 : : }
3127 [ - + ]: 1197 : if (should_free_trig)
2384 andres@anarazel.de 3128 :UBC 0 : heap_freetuple(trigtuple);
3129 : :
2384 andres@anarazel.de 3130 :CBC 1197 : return true;
3131 : : }
3132 : :
3133 : : /*
3134 : : * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3135 : : * and destination partitions, respectively, of a cross-partition update of
3136 : : * the root partitioned table mentioned in the query, given by 'relinfo'.
3137 : : * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3138 : : * partition, and 'newslot' contains the "new" tuple in the destination
3139 : : * partition. This interface allows to support the requirements of
3140 : : * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3141 : : * that case.
3142 : : */
3143 : : void
8863 tgl@sss.pgh.pa.us 3144 : 193654 : ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3145 : : ResultRelInfo *src_partinfo,
3146 : : ResultRelInfo *dst_partinfo,
3147 : : ItemPointer tupleid,
3148 : : HeapTuple fdw_trigtuple,
3149 : : TupleTableSlot *newslot,
3150 : : List *recheckIndexes,
3151 : : TransitionCaptureState *transition_capture,
3152 : : bool is_crosspart_update)
3153 : : {
3154 : 193654 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3155 : :
29 efujita@postgresql.o 3156 [ + + + + ]: 193654 : if (relinfo->ri_FdwRoutine && transition_capture &&
3157 [ - + ]: 2 : (transition_capture->tcs_update_old_table ||
29 efujita@postgresql.o 3158 [ # # ]:UBC 0 : transition_capture->tcs_update_new_table))
3159 : : {
29 efujita@postgresql.o 3160 [ - + ]:CBC 2 : Assert(relinfo->ri_RootResultRelInfo);
3161 [ + - ]: 2 : ereport(ERROR,
3162 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3163 : : errmsg("cannot collect transition tuples from child foreign tables")));
3164 : : }
3165 : :
2992 rhodiumtoad@postgres 3166 [ + + + + : 193652 : if ((trigdesc && trigdesc->trig_update_after_row) ||
+ + ]
3167 : 186 : (transition_capture &&
3168 [ + + ]: 186 : (transition_capture->tcs_update_old_table ||
3169 [ + - ]: 9 : transition_capture->tcs_update_new_table)))
3170 : : {
3171 : : /*
3172 : : * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3173 : : * update-partition-key operation, then this function is also called
3174 : : * separately for DELETE and INSERT to capture transition table rows.
3175 : : * In such case, either old tuple or new tuple can be NULL.
3176 : : */
3177 : : TupleTableSlot *oldslot;
3178 : : ResultRelInfo *tupsrc;
3179 : :
1266 alvherre@alvh.no-ip. 3180 [ + + - + : 1861 : Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
- + ]
3181 : : !is_crosspart_update);
3182 : :
513 akorotkov@postgresql 3183 [ + + ]: 1861 : tupsrc = src_partinfo ? src_partinfo : relinfo;
3184 : 1861 : oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3185 : :
3186 [ + + + + ]: 1861 : if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3187 : 1827 : GetTupleForTrigger(estate,
3188 : : NULL,
3189 : : tupsrc,
3190 : : tupleid,
3191 : : LockTupleExclusive,
3192 : : oldslot,
3193 : : false,
3194 : : NULL,
3195 : : NULL,
3196 : : NULL);
3197 [ + + ]: 34 : else if (fdw_trigtuple != NULL)
2332 andres@anarazel.de 3198 : 10 : ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3199 : : else
513 akorotkov@postgresql 3200 : 24 : ExecClearTuple(oldslot);
3201 : :
1266 alvherre@alvh.no-ip. 3202 : 1861 : AfterTriggerSaveEvent(estate, relinfo,
3203 : : src_partinfo, dst_partinfo,
3204 : : TRIGGER_EVENT_UPDATE,
3205 : : true,
3206 : : oldslot, newslot, recheckIndexes,
3207 : : ExecGetAllUpdatedCols(relinfo, estate),
3208 : : transition_capture,
3209 : : is_crosspart_update);
3210 : : }
10232 vadim4o@yahoo.com 3211 : 193652 : }
3212 : :
3213 : : bool
5445 tgl@sss.pgh.pa.us 3214 : 102 : ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3215 : : HeapTuple trigtuple, TupleTableSlot *newslot)
3216 : : {
3217 : 102 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2384 andres@anarazel.de 3218 : 102 : TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2124 tgl@sss.pgh.pa.us 3219 : 102 : HeapTuple newtuple = NULL;
3220 : : bool should_free;
2021 peter@eisentraut.org 3221 : 102 : TriggerData LocTriggerData = {0};
3222 : : int i;
3223 : :
5445 tgl@sss.pgh.pa.us 3224 : 102 : LocTriggerData.type = T_TriggerData;
3225 : 102 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3226 : : TRIGGER_EVENT_ROW |
3227 : : TRIGGER_EVENT_INSTEAD;
3228 : 102 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3229 : :
2332 andres@anarazel.de 3230 : 102 : ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3231 : :
5445 tgl@sss.pgh.pa.us 3232 [ + + ]: 378 : for (i = 0; i < trigdesc->numtriggers; i++)
3233 : : {
3234 : 291 : Trigger *trigger = &trigdesc->triggers[i];
3235 : : HeapTuple oldtuple;
3236 : :
3237 [ + + ]: 291 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3238 : : TRIGGER_TYPE_ROW,
3239 : : TRIGGER_TYPE_INSTEAD,
3240 : : TRIGGER_TYPE_UPDATE))
3241 : 189 : continue;
3242 [ - + ]: 102 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3243 : : NULL, oldslot, newslot))
5445 tgl@sss.pgh.pa.us 3244 :UBC 0 : continue;
3245 : :
2384 andres@anarazel.de 3246 [ + - ]:CBC 102 : if (!newtuple)
3247 : 102 : newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3248 : :
3249 : 102 : LocTriggerData.tg_trigslot = oldslot;
5311 tgl@sss.pgh.pa.us 3250 : 102 : LocTriggerData.tg_trigtuple = trigtuple;
2384 andres@anarazel.de 3251 : 102 : LocTriggerData.tg_newslot = newslot;
5311 tgl@sss.pgh.pa.us 3252 : 102 : LocTriggerData.tg_newtuple = oldtuple = newtuple;
3253 : :
5445 3254 : 102 : LocTriggerData.tg_trigger = trigger;
5311 3255 : 102 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3256 : : i,
3257 : : relinfo->ri_TrigFunctions,
3258 : : relinfo->ri_TrigInstrument,
5445 3259 [ + + ]: 102 : GetPerTupleMemoryContext(estate));
3260 [ + + ]: 96 : if (newtuple == NULL)
3261 : : {
2384 andres@anarazel.de 3262 : 9 : return false; /* "do nothing" */
3263 : : }
3264 [ + + ]: 87 : else if (newtuple != oldtuple)
3265 : : {
2332 3266 : 69 : ExecForceStoreHeapTuple(newtuple, newslot, false);
3267 : :
2384 3268 [ + - ]: 69 : if (should_free)
3269 : 69 : heap_freetuple(oldtuple);
3270 : :
3271 : : /* signal tuple should be re-fetched if used */
3272 : 69 : newtuple = NULL;
3273 : : }
3274 : : }
3275 : :
3276 : 87 : return true;
3277 : : }
3278 : :
3279 : : void
6371 tgl@sss.pgh.pa.us 3280 : 1833 : ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3281 : : {
3282 : : TriggerDesc *trigdesc;
3283 : : int i;
2021 peter@eisentraut.org 3284 : 1833 : TriggerData LocTriggerData = {0};
3285 : :
6371 tgl@sss.pgh.pa.us 3286 : 1833 : trigdesc = relinfo->ri_TrigDesc;
3287 : :
3288 [ + + ]: 1833 : if (trigdesc == NULL)
3289 : 1827 : return;
5445 3290 [ + + ]: 367 : if (!trigdesc->trig_truncate_before_statement)
6371 3291 : 361 : return;
3292 : :
3293 : 6 : LocTriggerData.type = T_TriggerData;
3294 : 6 : LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3295 : : TRIGGER_EVENT_BEFORE;
3296 : 6 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3297 : :
5445 3298 [ + + ]: 18 : for (i = 0; i < trigdesc->numtriggers; i++)
3299 : : {
3300 : 12 : Trigger *trigger = &trigdesc->triggers[i];
3301 : : HeapTuple newtuple;
3302 : :
3303 [ + + ]: 12 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3304 : : TRIGGER_TYPE_STATEMENT,
3305 : : TRIGGER_TYPE_BEFORE,
3306 : : TRIGGER_TYPE_TRUNCATE))
3307 : 6 : continue;
5769 3308 [ - + ]: 6 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3309 : : NULL, NULL, NULL))
5806 tgl@sss.pgh.pa.us 3310 :UBC 0 : continue;
3311 : :
6371 tgl@sss.pgh.pa.us 3312 :CBC 6 : LocTriggerData.tg_trigger = trigger;
3313 : 6 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3314 : : i,
3315 : : relinfo->ri_TrigFunctions,
3316 : : relinfo->ri_TrigInstrument,
3317 [ - + ]: 6 : GetPerTupleMemoryContext(estate));
3318 : :
3319 [ - + ]: 6 : if (newtuple)
6371 tgl@sss.pgh.pa.us 3320 [ # # ]:UBC 0 : ereport(ERROR,
3321 : : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3322 : : errmsg("BEFORE STATEMENT trigger cannot return a value")));
3323 : : }
3324 : : }
3325 : :
3326 : : void
6371 tgl@sss.pgh.pa.us 3327 :CBC 1829 : ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3328 : : {
3329 : 1829 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3330 : :
5445 3331 [ + + + + ]: 1829 : if (trigdesc && trigdesc->trig_truncate_after_statement)
1266 alvherre@alvh.no-ip. 3332 : 4 : AfterTriggerSaveEvent(estate, relinfo,
3333 : : NULL, NULL,
3334 : : TRIGGER_EVENT_TRUNCATE,
3335 : : false, NULL, NULL, NIL, NULL, NULL,
3336 : : false);
6371 tgl@sss.pgh.pa.us 3337 : 1829 : }
3338 : :
3339 : :
3340 : : /*
3341 : : * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3342 : : */
3343 : : static bool
5810 3344 : 6342 : GetTupleForTrigger(EState *estate,
3345 : : EPQState *epqstate,
3346 : : ResultRelInfo *relinfo,
3347 : : ItemPointer tid,
3348 : : LockTupleMode lockmode,
3349 : : TupleTableSlot *oldslot,
3350 : : bool do_epq_recheck,
3351 : : TupleTableSlot **epqslot,
3352 : : TM_Result *tmresultp,
3353 : : TM_FailureData *tmfdp)
3354 : : {
8863 3355 : 6342 : Relation relation = relinfo->ri_RelationDesc;
3356 : :
2164 andres@anarazel.de 3357 [ + + ]: 6342 : if (epqslot != NULL)
3358 : : {
3359 : : TM_Result test;
3360 : : TM_FailureData tmfd;
2359 3361 : 1433 : int lockflags = 0;
3362 : :
2164 3363 : 1433 : *epqslot = NULL;
3364 : :
3365 : : /* caller must pass an epqstate if EvalPlanQual is possible */
5794 tgl@sss.pgh.pa.us 3366 [ - + ]: 1433 : Assert(epqstate != NULL);
3367 : :
3368 : : /*
3369 : : * lock tuple for update
3370 : : */
2359 andres@anarazel.de 3371 [ + + ]: 1433 : if (!IsolationUsesXactSnapshot())
3372 : 1001 : lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
2298 3373 : 1433 : test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3374 : : estate->es_output_cid,
3375 : : lockmode, LockWaitBlock,
3376 : : lockflags,
3377 : : &tmfd);
3378 : :
3379 : : /* Let the caller know about the status of this operation */
908 dean.a.rasheed@gmail 3380 [ + + ]: 1431 : if (tmresultp)
3381 : 110 : *tmresultp = test;
1258 alvherre@alvh.no-ip. 3382 [ + + ]: 1431 : if (tmfdp)
3383 : 1428 : *tmfdp = tmfd;
3384 : :
9762 vadim4o@yahoo.com 3385 [ + + + + : 1431 : switch (test)
- - ]
3386 : : {
2359 andres@anarazel.de 3387 : 3 : case TM_SelfModified:
3388 : :
3389 : : /*
3390 : : * The target tuple was already updated or deleted by the
3391 : : * current command, or by a later command in the current
3392 : : * transaction. We ignore the tuple in the former case, and
3393 : : * throw error in the latter case, for the same reasons
3394 : : * enumerated in ExecUpdate and ExecDelete in
3395 : : * nodeModifyTable.c.
3396 : : */
3397 [ + - ]: 3 : if (tmfd.cmax != estate->es_output_cid)
4698 kgrittn@postgresql.o 3398 [ + - ]: 3 : ereport(ERROR,
3399 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3400 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3401 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3402 : :
3403 : : /* treat it as deleted; do not process */
2384 andres@anarazel.de 3404 : 17 : return false;
3405 : :
2359 3406 : 1419 : case TM_Ok:
3407 [ + + ]: 1419 : if (tmfd.traversed)
3408 : : {
3409 : : /*
3410 : : * Recheck the tuple using EPQ, if requested. Otherwise,
3411 : : * just return that it was concurrently updated.
3412 : : */
50 dean.a.rasheed@gmail 3413 [ + + ]: 14 : if (do_epq_recheck)
3414 : : {
3415 : 6 : *epqslot = EvalPlanQual(epqstate,
3416 : : relation,
3417 : : relinfo->ri_RangeTableIndex,
3418 : : oldslot);
3419 : :
3420 : : /*
3421 : : * If PlanQual failed for updated tuple - we must not
3422 : : * process this tuple!
3423 : : */
3424 [ + - + + ]: 6 : if (TupIsNull(*epqslot))
3425 : : {
3426 : 2 : *epqslot = NULL;
3427 : 2 : return false;
3428 : : }
3429 : : }
3430 : : else
3431 : : {
3432 [ + - ]: 8 : if (tmresultp)
3433 : 8 : *tmresultp = TM_Updated;
2359 andres@anarazel.de 3434 : 8 : return false;
3435 : : }
3436 : : }
3437 : 1409 : break;
3438 : :
3439 : 1 : case TM_Updated:
3440 [ + - ]: 1 : if (IsolationUsesXactSnapshot())
3441 [ + - ]: 1 : ereport(ERROR,
3442 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3443 : : errmsg("could not serialize access due to concurrent update")));
2298 andres@anarazel.de 3444 [ # # ]:UBC 0 : elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3445 : : break;
3446 : :
2359 andres@anarazel.de 3447 :CBC 8 : case TM_Deleted:
3448 [ + + ]: 8 : if (IsolationUsesXactSnapshot())
3449 [ + - ]: 1 : ereport(ERROR,
3450 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3451 : : errmsg("could not serialize access due to concurrent delete")));
3452 : : /* tuple was deleted */
2384 3453 : 7 : return false;
3454 : :
2359 andres@anarazel.de 3455 :UBC 0 : case TM_Invisible:
3774 3456 [ # # ]: 0 : elog(ERROR, "attempted to lock invisible tuple");
3457 : : break;
3458 : :
9762 vadim4o@yahoo.com 3459 : 0 : default:
2298 andres@anarazel.de 3460 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3461 : : return false; /* keep compiler quiet */
3462 : : }
3463 : : }
3464 : : else
3465 : : {
3466 : : /*
3467 : : * We expect the tuple to be present, thus very simple error handling
3468 : : * suffices.
3469 : : */
2298 andres@anarazel.de 3470 [ - + ]:CBC 4909 : if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3471 : : oldslot))
2357 andres@anarazel.de 3472 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple for trigger");
3473 : : }
3474 : :
2384 andres@anarazel.de 3475 :CBC 6318 : return true;
3476 : : }
3477 : :
3478 : : /*
3479 : : * Is trigger enabled to fire?
3480 : : */
3481 : : static bool
5769 tgl@sss.pgh.pa.us 3482 : 12231 : TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
3483 : : Trigger *trigger, TriggerEvent event,
3484 : : Bitmapset *modifiedCols,
3485 : : TupleTableSlot *oldslot, TupleTableSlot *newslot)
3486 : : {
3487 : : /* Check replication-role-dependent enable state */
5806 3488 [ + + ]: 12231 : if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA)
3489 : : {
3490 [ + + ]: 64 : if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3491 [ + + ]: 40 : trigger->tgenabled == TRIGGER_DISABLED)
3492 : 42 : return false;
3493 : : }
3494 : : else /* ORIGIN or LOCAL role */
3495 : : {
3496 [ + + ]: 12167 : if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3497 [ + + ]: 12166 : trigger->tgenabled == TRIGGER_DISABLED)
3498 : 79 : return false;
3499 : : }
3500 : :
3501 : : /*
3502 : : * Check for column-specific trigger (only possible for UPDATE, and in
3503 : : * fact we *must* ignore tgattr for other event types)
3504 : : */
3505 [ + + + + ]: 12110 : if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3506 : : {
3507 : : int i;
3508 : : bool modified;
3509 : :
3510 : 215 : modified = false;
3511 [ + + ]: 281 : for (i = 0; i < trigger->tgnattr; i++)
3512 : : {
3513 [ + + ]: 239 : if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber,
3514 : : modifiedCols))
3515 : : {
3516 : 173 : modified = true;
3517 : 173 : break;
3518 : : }
3519 : : }
3520 [ + + ]: 215 : if (!modified)
3521 : 42 : return false;
3522 : : }
3523 : :
3524 : : /* Check for WHEN clause */
5769 3525 [ + + ]: 12068 : if (trigger->tgqual)
3526 : : {
3527 : : ExprState **predicate;
3528 : : ExprContext *econtext;
3529 : : MemoryContext oldContext;
3530 : : int i;
3531 : :
3532 [ - + ]: 285 : Assert(estate != NULL);
3533 : :
3534 : : /*
3535 : : * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3536 : : * matching element of relinfo->ri_TrigWhenExprs[]
3537 : : */
3538 : 285 : i = trigger - relinfo->ri_TrigDesc->triggers;
3539 : 285 : predicate = &relinfo->ri_TrigWhenExprs[i];
3540 : :
3541 : : /*
3542 : : * If first time through for this WHEN expression, build expression
3543 : : * nodetrees for it. Keep them in the per-query memory context so
3544 : : * they'll survive throughout the query.
3545 : : */
3098 andres@anarazel.de 3546 [ + + ]: 285 : if (*predicate == NULL)
3547 : : {
3548 : : Node *tgqual;
3549 : :
5769 tgl@sss.pgh.pa.us 3550 : 151 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3551 : 151 : tgqual = stringToNode(trigger->tgqual);
211 peter@eisentraut.org 3552 : 151 : tgqual = expand_generated_columns_in_expr(tgqual, relinfo->ri_RelationDesc, PRS2_OLD_VARNO);
3553 : 151 : tgqual = expand_generated_columns_in_expr(tgqual, relinfo->ri_RelationDesc, PRS2_NEW_VARNO);
3554 : : /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
5079 tgl@sss.pgh.pa.us 3555 : 151 : ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
3556 : 151 : ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
3557 : : /* ExecPrepareQual wants implicit-AND form */
5769 3558 : 151 : tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3098 andres@anarazel.de 3559 : 151 : *predicate = ExecPrepareQual((List *) tgqual, estate);
5769 tgl@sss.pgh.pa.us 3560 : 151 : MemoryContextSwitchTo(oldContext);
3561 : : }
3562 : :
3563 : : /*
3564 : : * We will use the EState's per-tuple context for evaluating WHEN
3565 : : * expressions (creating it if it's not already there).
3566 : : */
3567 [ + + ]: 285 : econtext = GetPerTupleExprContext(estate);
3568 : :
3569 : : /*
3570 : : * Finally evaluate the expression, making the old and/or new tuples
3571 : : * available as INNER_VAR/OUTER_VAR respectively.
3572 : : */
3573 : 285 : econtext->ecxt_innertuple = oldslot;
3574 : 285 : econtext->ecxt_outertuple = newslot;
3098 andres@anarazel.de 3575 [ + + ]: 285 : if (!ExecQual(*predicate, econtext))
5769 tgl@sss.pgh.pa.us 3576 : 159 : return false;
3577 : : }
3578 : :
5806 3579 : 11909 : return true;
3580 : : }
3581 : :
3582 : :
3583 : : /* ----------
3584 : : * After-trigger stuff
3585 : : *
3586 : : * The AfterTriggersData struct holds data about pending AFTER trigger events
3587 : : * during the current transaction tree. (BEFORE triggers are fired
3588 : : * immediately so we don't need any persistent state about them.) The struct
3589 : : * and most of its subsidiary data are kept in TopTransactionContext; however
3590 : : * some data that can be discarded sooner appears in the CurTransactionContext
3591 : : * of the relevant subtransaction. Also, the individual event records are
3592 : : * kept in a separate sub-context of TopTransactionContext. This is done
3593 : : * mainly so that it's easy to tell from a memory context dump how much space
3594 : : * is being eaten by trigger events.
3595 : : *
3596 : : * Because the list of pending events can grow large, we go to some
3597 : : * considerable effort to minimize per-event memory consumption. The event
3598 : : * records are grouped into chunks and common data for similar events in the
3599 : : * same chunk is only stored once.
3600 : : *
3601 : : * XXX We need to be able to save the per-event data in a file if it grows too
3602 : : * large.
3603 : : * ----------
3604 : : */
3605 : :
3606 : : /* Per-trigger SET CONSTRAINT status */
3607 : : typedef struct SetConstraintTriggerData
3608 : : {
3609 : : Oid sct_tgoid;
3610 : : bool sct_tgisdeferred;
3611 : : } SetConstraintTriggerData;
3612 : :
3613 : : typedef struct SetConstraintTriggerData *SetConstraintTrigger;
3614 : :
3615 : : /*
3616 : : * SET CONSTRAINT intra-transaction status.
3617 : : *
3618 : : * We make this a single palloc'd object so it can be copied and freed easily.
3619 : : *
3620 : : * all_isset and all_isdeferred are used to keep track
3621 : : * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3622 : : *
3623 : : * trigstates[] stores per-trigger tgisdeferred settings.
3624 : : */
3625 : : typedef struct SetConstraintStateData
3626 : : {
3627 : : bool all_isset;
3628 : : bool all_isdeferred;
3629 : : int numstates; /* number of trigstates[] entries in use */
3630 : : int numalloc; /* allocated size of trigstates[] */
3631 : : SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3632 : : } SetConstraintStateData;
3633 : :
3634 : : typedef SetConstraintStateData *SetConstraintState;
3635 : :
3636 : :
3637 : : /*
3638 : : * Per-trigger-event data
3639 : : *
3640 : : * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3641 : : * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3642 : : * Each event record also has an associated AfterTriggerSharedData that is
3643 : : * shared across all instances of similar events within a "chunk".
3644 : : *
3645 : : * For row-level triggers, we arrange not to waste storage on unneeded ctid
3646 : : * fields. Updates of regular tables use two; inserts and deletes of regular
3647 : : * tables use one; foreign tables always use zero and save the tuple(s) to a
3648 : : * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3649 : : * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3650 : : * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3651 : : * tuple(s). This permits storing tuples once regardless of the number of
3652 : : * row-level triggers on a foreign table.
3653 : : *
3654 : : * When updates on partitioned tables cause rows to move between partitions,
3655 : : * the OIDs of both partitions are stored too, so that the tuples can be
3656 : : * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3657 : : * partition update").
3658 : : *
3659 : : * Note that we need triggers on foreign tables to be fired in exactly the
3660 : : * order they were queued, so that the tuples come out of the tuplestore in
3661 : : * the right order. To ensure that, we forbid deferrable (constraint)
3662 : : * triggers on foreign tables. This also ensures that such triggers do not
3663 : : * get deferred into outer trigger query levels, meaning that it's okay to
3664 : : * destroy the tuplestore at the end of the query level.
3665 : : *
3666 : : * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3667 : : * require no ctid field. We lack the flag bit space to neatly represent that
3668 : : * distinct case, and it seems unlikely to be worth much trouble.
3669 : : *
3670 : : * Note: ats_firing_id is initially zero and is set to something else when
3671 : : * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3672 : : * cycle the trigger will be fired in (or was fired in, if DONE is set).
3673 : : * Although this is mutable state, we can keep it in AfterTriggerSharedData
3674 : : * because all instances of the same type of event in a given event list will
3675 : : * be fired at the same time, if they were queued between the same firing
3676 : : * cycles. So we need only ensure that ats_firing_id is zero when attaching
3677 : : * a new event to an existing AfterTriggerSharedData record.
3678 : : */
3679 : : typedef uint32 TriggerFlags;
3680 : :
3681 : : #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3682 : : #define AFTER_TRIGGER_DONE 0x80000000
3683 : : #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3684 : : /* bits describing the size and tuple sources of this event */
3685 : : #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3686 : : #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3687 : : #define AFTER_TRIGGER_1CTID 0x10000000
3688 : : #define AFTER_TRIGGER_2CTID 0x30000000
3689 : : #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3690 : : #define AFTER_TRIGGER_TUP_BITS 0x38000000
3691 : : typedef struct AfterTriggerSharedData *AfterTriggerShared;
3692 : :
3693 : : typedef struct AfterTriggerSharedData
3694 : : {
3695 : : TriggerEvent ats_event; /* event type indicator, see trigger.h */
3696 : : Oid ats_tgoid; /* the trigger's ID */
3697 : : Oid ats_relid; /* the relation it's on */
3698 : : Oid ats_rolid; /* role to execute the trigger */
3699 : : CommandId ats_firing_id; /* ID for firing cycle */
3700 : : struct AfterTriggersTableData *ats_table; /* transition table access */
3701 : : Bitmapset *ats_modifiedcols; /* modified columns */
3702 : : } AfterTriggerSharedData;
3703 : :
3704 : : typedef struct AfterTriggerEventData *AfterTriggerEvent;
3705 : :
3706 : : typedef struct AfterTriggerEventData
3707 : : {
3708 : : TriggerFlags ate_flags; /* status bits and offset to shared data */
3709 : : ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3710 : : ItemPointerData ate_ctid2; /* new updated tuple */
3711 : :
3712 : : /*
3713 : : * During a cross-partition update of a partitioned table, we also store
3714 : : * the OIDs of source and destination partitions that are needed to fetch
3715 : : * the old (ctid1) and the new tuple (ctid2) from, respectively.
3716 : : */
3717 : : Oid ate_src_part;
3718 : : Oid ate_dst_part;
3719 : : } AfterTriggerEventData;
3720 : :
3721 : : /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3722 : : typedef struct AfterTriggerEventDataNoOids
3723 : : {
3724 : : TriggerFlags ate_flags;
3725 : : ItemPointerData ate_ctid1;
3726 : : ItemPointerData ate_ctid2;
3727 : : } AfterTriggerEventDataNoOids;
3728 : :
3729 : : /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3730 : : typedef struct AfterTriggerEventDataOneCtid
3731 : : {
3732 : : TriggerFlags ate_flags; /* status bits and offset to shared data */
3733 : : ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3734 : : } AfterTriggerEventDataOneCtid;
3735 : :
3736 : : /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3737 : : typedef struct AfterTriggerEventDataZeroCtids
3738 : : {
3739 : : TriggerFlags ate_flags; /* status bits and offset to shared data */
3740 : : } AfterTriggerEventDataZeroCtids;
3741 : :
3742 : : #define SizeofTriggerEvent(evt) \
3743 : : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3744 : : sizeof(AfterTriggerEventData) : \
3745 : : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3746 : : sizeof(AfterTriggerEventDataNoOids) : \
3747 : : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3748 : : sizeof(AfterTriggerEventDataOneCtid) : \
3749 : : sizeof(AfterTriggerEventDataZeroCtids))))
3750 : :
3751 : : #define GetTriggerSharedData(evt) \
3752 : : ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3753 : :
3754 : : /*
3755 : : * To avoid palloc overhead, we keep trigger events in arrays in successively-
3756 : : * larger chunks (a slightly more sophisticated version of an expansible
3757 : : * array). The space between CHUNK_DATA_START and freeptr is occupied by
3758 : : * AfterTriggerEventData records; the space between endfree and endptr is
3759 : : * occupied by AfterTriggerSharedData records.
3760 : : */
3761 : : typedef struct AfterTriggerEventChunk
3762 : : {
3763 : : struct AfterTriggerEventChunk *next; /* list link */
3764 : : char *freeptr; /* start of free space in chunk */
3765 : : char *endfree; /* end of free space in chunk */
3766 : : char *endptr; /* end of chunk */
3767 : : /* event data follows here */
3768 : : } AfterTriggerEventChunk;
3769 : :
3770 : : #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3771 : :
3772 : : /* A list of events */
3773 : : typedef struct AfterTriggerEventList
3774 : : {
3775 : : AfterTriggerEventChunk *head;
3776 : : AfterTriggerEventChunk *tail;
3777 : : char *tailfree; /* freeptr of tail chunk */
3778 : : } AfterTriggerEventList;
3779 : :
3780 : : /* Macros to help in iterating over a list of events */
3781 : : #define for_each_chunk(cptr, evtlist) \
3782 : : for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3783 : : #define for_each_event(eptr, cptr) \
3784 : : for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3785 : : (char *) eptr < (cptr)->freeptr; \
3786 : : eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3787 : : /* Use this if no special per-chunk processing is needed */
3788 : : #define for_each_event_chunk(eptr, cptr, evtlist) \
3789 : : for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3790 : :
3791 : : /* Macros for iterating from a start point that might not be list start */
3792 : : #define for_each_chunk_from(cptr) \
3793 : : for (; cptr != NULL; cptr = cptr->next)
3794 : : #define for_each_event_from(eptr, cptr) \
3795 : : for (; \
3796 : : (char *) eptr < (cptr)->freeptr; \
3797 : : eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3798 : :
3799 : :
3800 : : /*
3801 : : * All per-transaction data for the AFTER TRIGGERS module.
3802 : : *
3803 : : * AfterTriggersData has the following fields:
3804 : : *
3805 : : * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3806 : : * We mark firable events with the current firing cycle's ID so that we can
3807 : : * tell which ones to work on. This ensures sane behavior if a trigger
3808 : : * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3809 : : * only fire those events that weren't already scheduled for firing.
3810 : : *
3811 : : * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3812 : : * This is saved and restored across failed subtransactions.
3813 : : *
3814 : : * events is the current list of deferred events. This is global across
3815 : : * all subtransactions of the current transaction. In a subtransaction
3816 : : * abort, we know that the events added by the subtransaction are at the
3817 : : * end of the list, so it is relatively easy to discard them. The event
3818 : : * list chunks themselves are stored in event_cxt.
3819 : : *
3820 : : * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3821 : : * (-1 when the stack is empty).
3822 : : *
3823 : : * query_stack[query_depth] is the per-query-level data, including these fields:
3824 : : *
3825 : : * events is a list of AFTER trigger events queued by the current query.
3826 : : * None of these are valid until the matching AfterTriggerEndQuery call
3827 : : * occurs. At that point we fire immediate-mode triggers, and append any
3828 : : * deferred events to the main events list.
3829 : : *
3830 : : * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3831 : : * needed by events queued by the current query. (Note: we use just one
3832 : : * tuplestore even though more than one foreign table might be involved.
3833 : : * This is okay because tuplestores don't really care what's in the tuples
3834 : : * they store; but it's possible that someday it'd break.)
3835 : : *
3836 : : * tables is a List of AfterTriggersTableData structs for target tables
3837 : : * of the current query (see below).
3838 : : *
3839 : : * maxquerydepth is just the allocated length of query_stack.
3840 : : *
3841 : : * trans_stack holds per-subtransaction data, including these fields:
3842 : : *
3843 : : * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3844 : : * state data. Each subtransaction level that modifies that state first
3845 : : * saves a copy, which we use to restore the state if we abort.
3846 : : *
3847 : : * events is a copy of the events head/tail pointers,
3848 : : * which we use to restore those values during subtransaction abort.
3849 : : *
3850 : : * query_depth is the subtransaction-start-time value of query_depth,
3851 : : * which we similarly use to clean up at subtransaction abort.
3852 : : *
3853 : : * firing_counter is the subtransaction-start-time value of firing_counter.
3854 : : * We use this to recognize which deferred triggers were fired (or marked
3855 : : * for firing) within an aborted subtransaction.
3856 : : *
3857 : : * We use GetCurrentTransactionNestLevel() to determine the correct array
3858 : : * index in trans_stack. maxtransdepth is the number of allocated entries in
3859 : : * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3860 : : * in cases where errors during subxact abort cause multiple invocations
3861 : : * of AfterTriggerEndSubXact() at the same nesting depth.)
3862 : : *
3863 : : * We create an AfterTriggersTableData struct for each target table of the
3864 : : * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3865 : : * either transition tables or statement-level triggers. This is used to
3866 : : * hold the relevant transition tables, as well as info tracking whether
3867 : : * we already queued the statement triggers. (We use that info to prevent
3868 : : * firing the same statement triggers more than once per statement, or really
3869 : : * once per transition table set.) These structs, along with the transition
3870 : : * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3871 : : * That's sufficient lifespan because we don't allow transition tables to be
3872 : : * used by deferrable triggers, so they only need to survive until
3873 : : * AfterTriggerEndQuery.
3874 : : */
3875 : : typedef struct AfterTriggersQueryData AfterTriggersQueryData;
3876 : : typedef struct AfterTriggersTransData AfterTriggersTransData;
3877 : : typedef struct AfterTriggersTableData AfterTriggersTableData;
3878 : :
3879 : : typedef struct AfterTriggersData
3880 : : {
3881 : : CommandId firing_counter; /* next firing ID to assign */
3882 : : SetConstraintState state; /* the active S C state */
3883 : : AfterTriggerEventList events; /* deferred-event list */
3884 : : MemoryContext event_cxt; /* memory context for events, if any */
3885 : :
3886 : : /* per-query-level data: */
3887 : : AfterTriggersQueryData *query_stack; /* array of structs shown below */
3888 : : int query_depth; /* current index in above array */
3889 : : int maxquerydepth; /* allocated len of above array */
3890 : :
3891 : : /* per-subtransaction-level data: */
3892 : : AfterTriggersTransData *trans_stack; /* array of structs shown below */
3893 : : int maxtransdepth; /* allocated len of above array */
3894 : : } AfterTriggersData;
3895 : :
3896 : : struct AfterTriggersQueryData
3897 : : {
3898 : : AfterTriggerEventList events; /* events pending from this query */
3899 : : Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3900 : : List *tables; /* list of AfterTriggersTableData, see below */
3901 : : };
3902 : :
3903 : : struct AfterTriggersTransData
3904 : : {
3905 : : /* these fields are just for resetting at subtrans abort: */
3906 : : SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3907 : : AfterTriggerEventList events; /* saved list pointer */
3908 : : int query_depth; /* saved query_depth */
3909 : : CommandId firing_counter; /* saved firing_counter */
3910 : : };
3911 : :
3912 : : struct AfterTriggersTableData
3913 : : {
3914 : : /* relid + cmdType form the lookup key for these structs: */
3915 : : Oid relid; /* target table's OID */
3916 : : CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3917 : : bool closed; /* true when no longer OK to add tuples */
3918 : : bool before_trig_done; /* did we already queue BS triggers? */
3919 : : bool after_trig_done; /* did we already queue AS triggers? */
3920 : : AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3921 : :
3922 : : /*
3923 : : * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3924 : : * MERGE can run all three actions in a single statement. Note that UPDATE
3925 : : * needs both old and new transition tables whereas INSERT needs only new,
3926 : : * and DELETE needs only old.
3927 : : */
3928 : :
3929 : : /* "old" transition table for UPDATE, if any */
3930 : : Tuplestorestate *old_upd_tuplestore;
3931 : : /* "new" transition table for UPDATE, if any */
3932 : : Tuplestorestate *new_upd_tuplestore;
3933 : : /* "old" transition table for DELETE, if any */
3934 : : Tuplestorestate *old_del_tuplestore;
3935 : : /* "new" transition table for INSERT, if any */
3936 : : Tuplestorestate *new_ins_tuplestore;
3937 : :
3938 : : TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3939 : : };
3940 : :
3941 : : static AfterTriggersData afterTriggers;
3942 : :
3943 : : static void AfterTriggerExecute(EState *estate,
3944 : : AfterTriggerEvent event,
3945 : : ResultRelInfo *relInfo,
3946 : : ResultRelInfo *src_relInfo,
3947 : : ResultRelInfo *dst_relInfo,
3948 : : TriggerDesc *trigdesc,
3949 : : FmgrInfo *finfo,
3950 : : Instrumentation *instr,
3951 : : MemoryContext per_tuple_context,
3952 : : TupleTableSlot *trig_tuple_slot1,
3953 : : TupleTableSlot *trig_tuple_slot2);
3954 : : static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid,
3955 : : CmdType cmdType);
3956 : : static TupleTableSlot *GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
3957 : : TupleDesc tupdesc);
3958 : : static Tuplestorestate *GetAfterTriggersTransitionTable(int event,
3959 : : TupleTableSlot *oldslot,
3960 : : TupleTableSlot *newslot,
3961 : : TransitionCaptureState *transition_capture);
3962 : : static void TransitionTableAddTuple(EState *estate,
3963 : : TransitionCaptureState *transition_capture,
3964 : : ResultRelInfo *relinfo,
3965 : : TupleTableSlot *slot,
3966 : : TupleTableSlot *original_insert_tuple,
3967 : : Tuplestorestate *tuplestore);
3968 : : static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs);
3969 : : static SetConstraintState SetConstraintStateCreate(int numalloc);
3970 : : static SetConstraintState SetConstraintStateCopy(SetConstraintState origstate);
3971 : : static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3972 : : Oid tgoid, bool tgisdeferred);
3973 : : static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3974 : :
3975 : :
3976 : : /*
3977 : : * Get the FDW tuplestore for the current trigger query level, creating it
3978 : : * if necessary.
3979 : : */
3980 : : static Tuplestorestate *
2912 3981 : 50 : GetCurrentFDWTuplestore(void)
3982 : : {
3983 : : Tuplestorestate *ret;
3984 : :
3985 : 50 : ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
4185 noah@leadboat.com 3986 [ + + ]: 50 : if (ret == NULL)
3987 : : {
3988 : : MemoryContext oldcxt;
3989 : : ResourceOwner saveResourceOwner;
3990 : :
3991 : : /*
3992 : : * Make the tuplestore valid until end of subtransaction. We really
3993 : : * only need it until AfterTriggerEndQuery().
3994 : : */
2912 tgl@sss.pgh.pa.us 3995 : 18 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4185 noah@leadboat.com 3996 : 18 : saveResourceOwner = CurrentResourceOwner;
2887 tgl@sss.pgh.pa.us 3997 : 18 : CurrentResourceOwner = CurTransactionResourceOwner;
3998 : :
3999 : 18 : ret = tuplestore_begin_heap(false, false, work_mem);
4000 : :
4185 noah@leadboat.com 4001 : 18 : CurrentResourceOwner = saveResourceOwner;
4002 : 18 : MemoryContextSwitchTo(oldcxt);
4003 : :
2912 tgl@sss.pgh.pa.us 4004 : 18 : afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
4005 : : }
4006 : :
4185 noah@leadboat.com 4007 : 50 : return ret;
4008 : : }
4009 : :
4010 : : /* ----------
4011 : : * afterTriggerCheckState()
4012 : : *
4013 : : * Returns true if the trigger event is actually in state DEFERRED.
4014 : : * ----------
4015 : : */
4016 : : static bool
6161 tgl@sss.pgh.pa.us 4017 : 5923 : afterTriggerCheckState(AfterTriggerShared evtshared)
4018 : : {
4019 : 5923 : Oid tgoid = evtshared->ats_tgoid;
3971 rhaas@postgresql.org 4020 : 5923 : SetConstraintState state = afterTriggers.state;
4021 : : int i;
4022 : :
4023 : : /*
4024 : : * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
4025 : : * constraints declared NOT DEFERRABLE), the state is always false.
4026 : : */
6161 tgl@sss.pgh.pa.us 4027 [ + + ]: 5923 : if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
9474 JanWieck@Yahoo.com 4028 : 5554 : return false;
4029 : :
4030 : : /*
4031 : : * If constraint state exists, SET CONSTRAINTS might have been executed
4032 : : * either for this trigger or for all triggers.
4033 : : */
3971 rhaas@postgresql.org 4034 [ + + ]: 369 : if (state != NULL)
4035 : : {
4036 : : /* Check for SET CONSTRAINTS for this specific trigger. */
4037 [ + + ]: 158 : for (i = 0; i < state->numstates; i++)
4038 : : {
4039 [ + + ]: 125 : if (state->trigstates[i].sct_tgoid == tgoid)
4040 : 30 : return state->trigstates[i].sct_tgisdeferred;
4041 : : }
4042 : :
4043 : : /* Check for SET CONSTRAINTS ALL. */
4044 [ + + ]: 33 : if (state->all_isset)
4045 : 27 : return state->all_isdeferred;
4046 : : }
4047 : :
4048 : : /*
4049 : : * Otherwise return the default state for the trigger.
4050 : : */
6161 tgl@sss.pgh.pa.us 4051 : 312 : return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
4052 : : }
4053 : :
4054 : : /* ----------
4055 : : * afterTriggerCopyBitmap()
4056 : : *
4057 : : * Copy bitmap into AfterTriggerEvents memory context, which is where the after
4058 : : * trigger events are kept.
4059 : : * ----------
4060 : : */
4061 : : static Bitmapset *
797 tomas.vondra@postgre 4062 : 5492 : afterTriggerCopyBitmap(Bitmapset *src)
4063 : : {
4064 : : Bitmapset *dst;
4065 : : MemoryContext oldcxt;
4066 : :
4067 [ + + ]: 5492 : if (src == NULL)
4068 : 3856 : return NULL;
4069 : :
4070 : 1636 : oldcxt = MemoryContextSwitchTo(afterTriggers.event_cxt);
4071 : :
4072 : 1636 : dst = bms_copy(src);
4073 : :
4074 : 1636 : MemoryContextSwitchTo(oldcxt);
4075 : :
4076 : 1636 : return dst;
4077 : : }
4078 : :
4079 : : /* ----------
4080 : : * afterTriggerAddEvent()
4081 : : *
4082 : : * Add a new trigger event to the specified queue.
4083 : : * The passed-in event data is copied.
4084 : : * ----------
4085 : : */
4086 : : static void
6161 tgl@sss.pgh.pa.us 4087 : 6260 : afterTriggerAddEvent(AfterTriggerEventList *events,
4088 : : AfterTriggerEvent event, AfterTriggerShared evtshared)
4089 : : {
4090 [ + + + + : 6260 : Size eventsize = SizeofTriggerEvent(event);
+ + ]
4091 : 6260 : Size needed = eventsize + sizeof(AfterTriggerSharedData);
4092 : : AfterTriggerEventChunk *chunk;
4093 : : AfterTriggerShared newshared;
4094 : : AfterTriggerEvent newevent;
4095 : :
4096 : : /*
4097 : : * If empty list or not enough room in the tail chunk, make a new chunk.
4098 : : * We assume here that a new shared record will always be needed.
4099 : : */
4100 : 6260 : chunk = events->tail;
4101 [ + + ]: 6260 : if (chunk == NULL ||
4102 [ - + ]: 2357 : chunk->endfree - chunk->freeptr < needed)
4103 : : {
4104 : : Size chunksize;
4105 : :
4106 : : /* Create event context if we didn't already */
3971 rhaas@postgresql.org 4107 [ + + ]: 3903 : if (afterTriggers.event_cxt == NULL)
4108 : 3286 : afterTriggers.event_cxt =
6161 tgl@sss.pgh.pa.us 4109 : 3286 : AllocSetContextCreate(TopTransactionContext,
4110 : : "AfterTriggerEvents",
4111 : : ALLOCSET_DEFAULT_SIZES);
4112 : :
4113 : : /*
4114 : : * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4115 : : * These numbers are fairly arbitrary, though there is a hard limit at
4116 : : * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4117 : : * shared records using the available space in ate_flags. Another
4118 : : * constraint is that if the chunk size gets too huge, the search loop
4119 : : * below would get slow given a (not too common) usage pattern with
4120 : : * many distinct event types in a chunk. Therefore, we double the
4121 : : * preceding chunk size only if there weren't too many shared records
4122 : : * in the preceding chunk; otherwise we halve it. This gives us some
4123 : : * ability to adapt to the actual usage pattern of the current query
4124 : : * while still having large chunk sizes in typical usage. All chunk
4125 : : * sizes used should be MAXALIGN multiples, to ensure that the shared
4126 : : * records will be aligned safely.
4127 : : */
4128 : : #define MIN_CHUNK_SIZE 1024
4129 : : #define MAX_CHUNK_SIZE (1024*1024)
4130 : :
4131 : : #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4132 : : #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4133 : : #endif
4134 : :
4135 [ + - ]: 3903 : if (chunk == NULL)
4136 : 3903 : chunksize = MIN_CHUNK_SIZE;
4137 : : else
4138 : : {
4139 : : /* preceding chunk size... */
6161 tgl@sss.pgh.pa.us 4140 :UBC 0 : chunksize = chunk->endptr - (char *) chunk;
4141 : : /* check number of shared records in preceding chunk */
4142 [ # # ]: 0 : if ((chunk->endptr - chunk->endfree) <=
4143 : : (100 * sizeof(AfterTriggerSharedData)))
5931 bruce@momjian.us 4144 : 0 : chunksize *= 2; /* okay, double it */
4145 : : else
4146 : 0 : chunksize /= 2; /* too many shared records */
6161 tgl@sss.pgh.pa.us 4147 : 0 : chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4148 : : }
3971 rhaas@postgresql.org 4149 :CBC 3903 : chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
6161 tgl@sss.pgh.pa.us 4150 : 3903 : chunk->next = NULL;
4151 : 3903 : chunk->freeptr = CHUNK_DATA_START(chunk);
4152 : 3903 : chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4153 [ - + ]: 3903 : Assert(chunk->endfree - chunk->freeptr >= needed);
4154 : :
346 alvherre@alvh.no-ip. 4155 [ + - ]: 3903 : if (events->tail == NULL)
4156 : : {
4157 [ - + ]: 3903 : Assert(events->head == NULL);
6161 tgl@sss.pgh.pa.us 4158 : 3903 : events->head = chunk;
4159 : : }
4160 : : else
6161 tgl@sss.pgh.pa.us 4161 :UBC 0 : events->tail->next = chunk;
6161 tgl@sss.pgh.pa.us 4162 :CBC 3903 : events->tail = chunk;
4163 : : /* events->tailfree is now out of sync, but we'll fix it below */
4164 : : }
4165 : :
4166 : : /*
4167 : : * Try to locate a matching shared-data record already in the chunk. If
4168 : : * none, make a new one. The search begins with the most recently added
4169 : : * record, since newer ones are most likely to match.
4170 : : */
226 4171 : 6260 : for (newshared = (AfterTriggerShared) chunk->endfree;
4172 [ + + ]: 8872 : (char *) newshared < chunk->endptr;
4173 : 2612 : newshared++)
4174 : : {
4175 : : /* compare fields roughly by probability of them being different */
6161 4176 [ + + ]: 3380 : if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4177 [ + + ]: 877 : newshared->ats_event == evtshared->ats_event &&
227 4178 [ + + ]: 874 : newshared->ats_firing_id == 0 &&
2912 4179 [ + - ]: 787 : newshared->ats_table == evtshared->ats_table &&
227 4180 [ + - ]: 787 : newshared->ats_relid == evtshared->ats_relid &&
226 4181 [ + + + + ]: 1571 : newshared->ats_rolid == evtshared->ats_rolid &&
227 4182 : 784 : bms_equal(newshared->ats_modifiedcols,
4183 : 784 : evtshared->ats_modifiedcols))
6161 4184 : 768 : break;
4185 : : }
226 4186 [ + + ]: 6260 : if ((char *) newshared >= chunk->endptr)
4187 : : {
4188 : 5492 : newshared = ((AfterTriggerShared) chunk->endfree) - 1;
6161 4189 : 5492 : *newshared = *evtshared;
4190 : : /* now we must make a suitably-long-lived copy of the bitmap */
227 4191 : 5492 : newshared->ats_modifiedcols = afterTriggerCopyBitmap(evtshared->ats_modifiedcols);
5931 bruce@momjian.us 4192 : 5492 : newshared->ats_firing_id = 0; /* just to be sure */
6161 tgl@sss.pgh.pa.us 4193 : 5492 : chunk->endfree = (char *) newshared;
4194 : : }
4195 : :
4196 : : /* Insert the data */
4197 : 6260 : newevent = (AfterTriggerEvent) chunk->freeptr;
4198 : 6260 : memcpy(newevent, event, eventsize);
4199 : : /* ... and link the new event to its shared record */
4200 : 6260 : newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4201 : 6260 : newevent->ate_flags |= (char *) newshared - (char *) newevent;
4202 : :
4203 : 6260 : chunk->freeptr += eventsize;
4204 : 6260 : events->tailfree = chunk->freeptr;
4205 : 6260 : }
4206 : :
4207 : : /* ----------
4208 : : * afterTriggerFreeEventList()
4209 : : *
4210 : : * Free all the event storage in the given list.
4211 : : * ----------
4212 : : */
4213 : : static void
4214 : 8520 : afterTriggerFreeEventList(AfterTriggerEventList *events)
4215 : : {
4216 : : AfterTriggerEventChunk *chunk;
4217 : :
2911 4218 [ + + ]: 11670 : while ((chunk = events->head) != NULL)
4219 : : {
4220 : 3150 : events->head = chunk->next;
6161 4221 : 3150 : pfree(chunk);
4222 : : }
4223 : 8520 : events->tail = NULL;
4224 : 8520 : events->tailfree = NULL;
4225 : 8520 : }
4226 : :
4227 : : /* ----------
4228 : : * afterTriggerRestoreEventList()
4229 : : *
4230 : : * Restore an event list to its prior length, removing all the events
4231 : : * added since it had the value old_events.
4232 : : * ----------
4233 : : */
4234 : : static void
4235 : 4667 : afterTriggerRestoreEventList(AfterTriggerEventList *events,
4236 : : const AfterTriggerEventList *old_events)
4237 : : {
4238 : : AfterTriggerEventChunk *chunk;
4239 : : AfterTriggerEventChunk *next_chunk;
4240 : :
4241 [ + + ]: 4667 : if (old_events->tail == NULL)
4242 : : {
4243 : : /* restoring to a completely empty state, so free everything */
4244 : 4656 : afterTriggerFreeEventList(events);
4245 : : }
4246 : : else
4247 : : {
4248 : 11 : *events = *old_events;
4249 : : /* free any chunks after the last one we want to keep */
4250 [ - + ]: 11 : for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4251 : : {
6161 tgl@sss.pgh.pa.us 4252 :UBC 0 : next_chunk = chunk->next;
4253 : 0 : pfree(chunk);
4254 : : }
4255 : : /* and clean up the tail chunk to be the right length */
6161 tgl@sss.pgh.pa.us 4256 :CBC 11 : events->tail->next = NULL;
4257 : 11 : events->tail->freeptr = events->tailfree;
4258 : :
4259 : : /*
4260 : : * We don't make any effort to remove now-unused shared data records.
4261 : : * They might still be useful, anyway.
4262 : : */
4263 : : }
9474 JanWieck@Yahoo.com 4264 : 4667 : }
4265 : :
4266 : : /* ----------
4267 : : * afterTriggerDeleteHeadEventChunk()
4268 : : *
4269 : : * Remove the first chunk of events from the query level's event list.
4270 : : * Keep any event list pointers elsewhere in the query level's data
4271 : : * structures in sync.
4272 : : * ----------
4273 : : */
4274 : : static void
2911 tgl@sss.pgh.pa.us 4275 :UBC 0 : afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs)
4276 : : {
4277 : 0 : AfterTriggerEventChunk *target = qs->events.head;
4278 : : ListCell *lc;
4279 : :
4280 [ # # # # ]: 0 : Assert(target && target->next);
4281 : :
4282 : : /*
4283 : : * First, update any pointers in the per-table data, so that they won't be
4284 : : * dangling. Resetting obsoleted pointers to NULL will make
4285 : : * cancel_prior_stmt_triggers start from the list head, which is fine.
4286 : : */
4287 [ # # # # : 0 : foreach(lc, qs->tables)
# # ]
4288 : : {
4289 : 0 : AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
4290 : :
4291 [ # # ]: 0 : if (table->after_trig_done &&
4292 [ # # ]: 0 : table->after_trig_events.tail == target)
4293 : : {
4294 : 0 : table->after_trig_events.head = NULL;
4295 : 0 : table->after_trig_events.tail = NULL;
4296 : 0 : table->after_trig_events.tailfree = NULL;
4297 : : }
4298 : : }
4299 : :
4300 : : /* Now we can flush the head chunk */
4301 : 0 : qs->events.head = target->next;
4302 : 0 : pfree(target);
4303 : 0 : }
4304 : :
4305 : :
4306 : : /* ----------
4307 : : * AfterTriggerExecute()
4308 : : *
4309 : : * Fetch the required tuples back from the heap and fire one
4310 : : * single trigger function.
4311 : : *
4312 : : * Frequently, this will be fired many times in a row for triggers of
4313 : : * a single relation. Therefore, we cache the open relation and provide
4314 : : * fmgr lookup cache space at the caller level. (For triggers fired at
4315 : : * the end of a query, we can even piggyback on the executor's state.)
4316 : : *
4317 : : * When fired for a cross-partition update of a partitioned table, the old
4318 : : * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4319 : : * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4320 : : * both are converted into the root partitioned table's format before passing
4321 : : * to the trigger function.
4322 : : *
4323 : : * event: event currently being fired.
4324 : : * relInfo: result relation for event.
4325 : : * src_relInfo: source partition of a cross-partition update
4326 : : * dst_relInfo: its destination partition
4327 : : * trigdesc: working copy of rel's trigger info.
4328 : : * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4329 : : * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4330 : : * or NULL if no instrumentation is wanted.
4331 : : * per_tuple_context: memory context to call trigger function in.
4332 : : * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4333 : : * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4334 : : * ----------
4335 : : */
4336 : : static void
2384 andres@anarazel.de 4337 :CBC 5780 : AfterTriggerExecute(EState *estate,
4338 : : AfterTriggerEvent event,
4339 : : ResultRelInfo *relInfo,
4340 : : ResultRelInfo *src_relInfo,
4341 : : ResultRelInfo *dst_relInfo,
4342 : : TriggerDesc *trigdesc,
4343 : : FmgrInfo *finfo, Instrumentation *instr,
4344 : : MemoryContext per_tuple_context,
4345 : : TupleTableSlot *trig_tuple_slot1,
4346 : : TupleTableSlot *trig_tuple_slot2)
4347 : : {
4348 : 5780 : Relation rel = relInfo->ri_RelationDesc;
1266 alvherre@alvh.no-ip. 4349 : 5780 : Relation src_rel = src_relInfo->ri_RelationDesc;
4350 : 5780 : Relation dst_rel = dst_relInfo->ri_RelationDesc;
6161 tgl@sss.pgh.pa.us 4351 : 5780 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
4352 : 5780 : Oid tgoid = evtshared->ats_tgoid;
2021 peter@eisentraut.org 4353 : 5780 : TriggerData LocTriggerData = {0};
4354 : : Oid save_rolid;
4355 : : int save_sec_context;
4356 : : HeapTuple rettuple;
4357 : : int tgindx;
2384 andres@anarazel.de 4358 : 5780 : bool should_free_trig = false;
4359 : 5780 : bool should_free_new = false;
4360 : :
4361 : : /*
4362 : : * Locate trigger in trigdesc. It might not be present, and in fact the
4363 : : * trigdesc could be NULL, if the trigger was dropped since the event was
4364 : : * queued. In that case, silently do nothing.
4365 : : */
443 tgl@sss.pgh.pa.us 4366 [ + + ]: 5780 : if (trigdesc == NULL)
4367 : 3 : return;
7470 4368 [ + - ]: 12941 : for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4369 : : {
4370 [ + + ]: 12941 : if (trigdesc->triggers[tgindx].tgoid == tgoid)
4371 : : {
4372 : 5777 : LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4373 : 5777 : break;
4374 : : }
4375 : : }
4376 [ - + ]: 5777 : if (LocTriggerData.tg_trigger == NULL)
443 tgl@sss.pgh.pa.us 4377 :UBC 0 : return;
4378 : :
4379 : : /*
4380 : : * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4381 : : * to include time spent re-fetching tuples in the trigger cost.
4382 : : */
7470 tgl@sss.pgh.pa.us 4383 [ - + ]:CBC 5777 : if (instr)
7470 tgl@sss.pgh.pa.us 4384 :UBC 0 : InstrStartNode(instr + tgindx);
4385 : :
4386 : : /*
4387 : : * Fetch the required tuple(s).
4388 : : */
4185 noah@leadboat.com 4389 [ + + + ]:CBC 5777 : switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4390 : : {
4391 : 25 : case AFTER_TRIGGER_FDW_FETCH:
4392 : : {
2912 tgl@sss.pgh.pa.us 4393 : 25 : Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4394 : :
4185 noah@leadboat.com 4395 [ - + ]: 25 : if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4396 : : trig_tuple_slot1))
4185 noah@leadboat.com 4397 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4398 : :
4185 noah@leadboat.com 4399 [ + + ]:CBC 25 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4400 : 9 : TRIGGER_EVENT_UPDATE &&
4401 [ - + ]: 9 : !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4402 : : trig_tuple_slot2))
4185 noah@leadboat.com 4403 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4404 : : }
4405 : : /* fall through */
4406 : : case AFTER_TRIGGER_FDW_REUSE:
4407 : :
4408 : : /*
4409 : : * Store tuple in the slot so that tg_trigtuple does not reference
4410 : : * tuplestore memory. (It is formally possible for the trigger
4411 : : * function to queue trigger events that add to the same
4412 : : * tuplestore, which can push other tuples out of memory.) The
4413 : : * distinction is academic, because we start with a minimal tuple
4414 : : * that is stored as a heap tuple, constructed in different memory
4415 : : * context, in the slot anyway.
4416 : : */
2384 andres@anarazel.de 4417 :CBC 29 : LocTriggerData.tg_trigslot = trig_tuple_slot1;
4418 : 29 : LocTriggerData.tg_trigtuple =
4419 : 29 : ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4420 : :
2097 efujita@postgresql.o 4421 [ + + ]: 29 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4422 : : TRIGGER_EVENT_UPDATE)
4423 : : {
4424 : 11 : LocTriggerData.tg_newslot = trig_tuple_slot2;
4425 : 11 : LocTriggerData.tg_newtuple =
4426 : 11 : ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4427 : : }
4428 : : else
4429 : : {
4430 : 18 : LocTriggerData.tg_newtuple = NULL;
4431 : : }
4185 noah@leadboat.com 4432 : 29 : break;
4433 : :
4434 : 5748 : default:
4435 [ + + ]: 5748 : if (ItemPointerIsValid(&(event->ate_ctid1)))
4436 : : {
1266 alvherre@alvh.no-ip. 4437 : 5228 : TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4438 : : src_relInfo);
4439 : :
4440 [ - + ]: 5228 : if (!table_tuple_fetch_row_version(src_rel,
4441 : : &(event->ate_ctid1),
4442 : : SnapshotAny,
4443 : : src_slot))
4185 noah@leadboat.com 4444 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4445 : :
4446 : : /*
4447 : : * Store the tuple fetched from the source partition into the
4448 : : * target (root partitioned) table slot, converting if needed.
4449 : : */
1266 alvherre@alvh.no-ip. 4450 [ + + ]:CBC 5228 : if (src_relInfo != relInfo)
4451 : : {
4452 : 72 : TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4453 : :
4454 : 72 : LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4455 [ + + ]: 72 : if (map)
4456 : : {
4457 : 18 : execute_attr_map_slot(map->attrMap,
4458 : : src_slot,
4459 : : LocTriggerData.tg_trigslot);
4460 : : }
4461 : : else
4462 : 54 : ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4463 : : }
4464 : : else
4465 : 5156 : LocTriggerData.tg_trigslot = src_slot;
2384 andres@anarazel.de 4466 : 5228 : LocTriggerData.tg_trigtuple =
2357 4467 : 5228 : ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4468 : : }
4469 : : else
4470 : : {
4185 noah@leadboat.com 4471 : 520 : LocTriggerData.tg_trigtuple = NULL;
4472 : : }
4473 : :
4474 : : /* don't touch ctid2 if not there */
1266 alvherre@alvh.no-ip. 4475 [ + + ]: 5748 : if (((event->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ||
4476 [ + + + - ]: 5820 : (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4185 noah@leadboat.com 4477 : 1543 : ItemPointerIsValid(&(event->ate_ctid2)))
4478 : 1543 : {
1266 alvherre@alvh.no-ip. 4479 : 1543 : TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4480 : : dst_relInfo);
4481 : :
4482 [ - + ]: 1543 : if (!table_tuple_fetch_row_version(dst_rel,
4483 : : &(event->ate_ctid2),
4484 : : SnapshotAny,
4485 : : dst_slot))
4185 noah@leadboat.com 4486 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4487 : :
4488 : : /*
4489 : : * Store the tuple fetched from the destination partition into
4490 : : * the target (root partitioned) table slot, converting if
4491 : : * needed.
4492 : : */
1266 alvherre@alvh.no-ip. 4493 [ + + ]:CBC 1543 : if (dst_relInfo != relInfo)
4494 : : {
4495 : 72 : TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4496 : :
4497 : 72 : LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4498 [ + + ]: 72 : if (map)
4499 : : {
4500 : 18 : execute_attr_map_slot(map->attrMap,
4501 : : dst_slot,
4502 : : LocTriggerData.tg_newslot);
4503 : : }
4504 : : else
4505 : 54 : ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4506 : : }
4507 : : else
4508 : 1471 : LocTriggerData.tg_newslot = dst_slot;
2384 andres@anarazel.de 4509 : 1543 : LocTriggerData.tg_newtuple =
2357 4510 : 1543 : ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4511 : : }
4512 : : else
4513 : : {
4185 noah@leadboat.com 4514 : 4205 : LocTriggerData.tg_newtuple = NULL;
4515 : : }
4516 : : }
4517 : :
4518 : : /*
4519 : : * Set up the tuplestore information to let the trigger have access to
4520 : : * transition tables. When we first make a transition table available to
4521 : : * a trigger, mark it "closed" so that it cannot change anymore. If any
4522 : : * additional events of the same type get queued in the current trigger
4523 : : * query level, they'll go into new transition tables.
4524 : : */
2992 rhodiumtoad@postgres 4525 : 5777 : LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
2912 tgl@sss.pgh.pa.us 4526 [ + + ]: 5777 : if (evtshared->ats_table)
4527 : : {
2992 rhodiumtoad@postgres 4528 [ + + ]: 282 : if (LocTriggerData.tg_trigger->tgoldtable)
4529 : : {
1258 alvherre@alvh.no-ip. 4530 [ + + ]: 156 : if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4531 : 81 : LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4532 : : else
4533 : 75 : LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
2912 tgl@sss.pgh.pa.us 4534 : 156 : evtshared->ats_table->closed = true;
4535 : : }
4536 : :
4537 [ + + ]: 282 : if (LocTriggerData.tg_trigger->tgnewtable)
4538 : : {
1258 alvherre@alvh.no-ip. 4539 [ + + ]: 201 : if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4540 : 111 : LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4541 : : else
4542 : 90 : LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
2912 tgl@sss.pgh.pa.us 4543 : 201 : evtshared->ats_table->closed = true;
4544 : : }
4545 : : }
4546 : :
4547 : : /*
4548 : : * Setup the remaining trigger information
4549 : : */
9231 4550 : 5777 : LocTriggerData.type = T_TriggerData;
7666 4551 : 5777 : LocTriggerData.tg_event =
6161 4552 : 5777 : evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW);
9231 4553 : 5777 : LocTriggerData.tg_relation = rel;
2007 peter@eisentraut.org 4554 [ + + ]: 5777 : if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4555 : 2718 : LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4556 : :
7666 tgl@sss.pgh.pa.us 4557 : 5777 : MemoryContextReset(per_tuple_context);
4558 : :
4559 : : /*
4560 : : * If necessary, become the role that was active when the trigger got
4561 : : * queued. Note that the role might have been dropped since the trigger
4562 : : * was queued, but if that is a problem, we will get an error later.
4563 : : * Checking here would still leave a race condition.
4564 : : */
226 4565 : 5777 : GetUserIdAndSecContext(&save_rolid, &save_sec_context);
4566 [ + + ]: 5777 : if (save_rolid != evtshared->ats_rolid)
4567 : 12 : SetUserIdAndSecContext(evtshared->ats_rolid,
4568 : : save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
4569 : :
4570 : : /*
4571 : : * Call the trigger and throw away any possibly returned updated tuple.
4572 : : * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4573 : : */
8863 4574 : 5777 : rettuple = ExecCallTriggerFunc(&LocTriggerData,
4575 : : tgindx,
4576 : : finfo,
4577 : : NULL,
4578 : : per_tuple_context);
4185 noah@leadboat.com 4579 [ + + ]: 5148 : if (rettuple != NULL &&
4580 [ + + ]: 1708 : rettuple != LocTriggerData.tg_trigtuple &&
4581 [ - + ]: 720 : rettuple != LocTriggerData.tg_newtuple)
9396 JanWieck@Yahoo.com 4582 :UBC 0 : heap_freetuple(rettuple);
4583 : :
4584 : : /* Restore the current role if necessary */
226 tgl@sss.pgh.pa.us 4585 [ + + ]:CBC 5148 : if (save_rolid != evtshared->ats_rolid)
4586 : 9 : SetUserIdAndSecContext(save_rolid, save_sec_context);
4587 : :
4588 : : /*
4589 : : * Release resources
4590 : : */
2384 andres@anarazel.de 4591 [ + + ]: 5148 : if (should_free_trig)
4592 : 86 : heap_freetuple(LocTriggerData.tg_trigtuple);
4593 [ + + ]: 5148 : if (should_free_new)
4594 : 68 : heap_freetuple(LocTriggerData.tg_newtuple);
4595 : :
4596 : : /* don't clear slots' contents if foreign table */
2097 efujita@postgresql.o 4597 [ + + ]: 5148 : if (trig_tuple_slot1 == NULL)
4598 : : {
4599 [ + + ]: 5113 : if (LocTriggerData.tg_trigslot)
4600 : 4623 : ExecClearTuple(LocTriggerData.tg_trigslot);
4601 [ + + ]: 5113 : if (LocTriggerData.tg_newslot)
4602 : 1382 : ExecClearTuple(LocTriggerData.tg_newslot);
4603 : : }
4604 : :
4605 : : /*
4606 : : * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4607 : : * one "tuple returned" (really the number of firings).
4608 : : */
7470 tgl@sss.pgh.pa.us 4609 [ - + ]: 5148 : if (instr)
7039 bruce@momjian.us 4610 :UBC 0 : InstrStopNode(instr + tgindx, 1);
4611 : : }
4612 : :
4613 : :
4614 : : /*
4615 : : * afterTriggerMarkEvents()
4616 : : *
4617 : : * Scan the given event list for not yet invoked events. Mark the ones
4618 : : * that can be invoked now with the current firing ID.
4619 : : *
4620 : : * If move_list isn't NULL, events that are not to be invoked now are
4621 : : * transferred to move_list.
4622 : : *
4623 : : * When immediate_only is true, do not invoke currently-deferred triggers.
4624 : : * (This will be false only at main transaction exit.)
4625 : : *
4626 : : * Returns true if any invokable events were found.
4627 : : */
4628 : : static bool
7666 tgl@sss.pgh.pa.us 4629 :CBC 302036 : afterTriggerMarkEvents(AfterTriggerEventList *events,
4630 : : AfterTriggerEventList *move_list,
4631 : : bool immediate_only)
4632 : : {
4633 : 302036 : bool found = false;
1762 noah@leadboat.com 4634 : 302036 : bool deferred_found = false;
4635 : : AfterTriggerEvent event;
4636 : : AfterTriggerEventChunk *chunk;
4637 : :
6161 tgl@sss.pgh.pa.us 4638 [ + + + + : 312653 : for_each_event_chunk(event, chunk, *events)
+ + + + +
+ ]
4639 : : {
4640 : 6601 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
7666 4641 : 6601 : bool defer_it = false;
4642 : :
6161 4643 [ + + ]: 6601 : if (!(event->ate_flags &
4644 : : (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)))
4645 : : {
4646 : : /*
4647 : : * This trigger hasn't been called or scheduled yet. Check if we
4648 : : * should call it now.
4649 : : */
4650 [ + + + + ]: 6184 : if (immediate_only && afterTriggerCheckState(evtshared))
4651 : : {
7666 4652 : 309 : defer_it = true;
4653 : : }
4654 : : else
4655 : : {
4656 : : /*
4657 : : * Mark it as to be fired in this firing cycle.
4658 : : */
3971 rhaas@postgresql.org 4659 : 5875 : evtshared->ats_firing_id = afterTriggers.firing_counter;
6161 tgl@sss.pgh.pa.us 4660 : 5875 : event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
7666 4661 : 5875 : found = true;
4662 : : }
4663 : : }
4664 : :
4665 : : /*
4666 : : * If it's deferred, move it to move_list, if requested.
4667 : : */
4668 [ + + + - ]: 6601 : if (defer_it && move_list != NULL)
4669 : : {
1762 noah@leadboat.com 4670 : 309 : deferred_found = true;
4671 : : /* add it to move_list */
6161 tgl@sss.pgh.pa.us 4672 : 309 : afterTriggerAddEvent(move_list, event, evtshared);
4673 : : /* mark original copy "done" so we don't do it again */
4674 : 309 : event->ate_flags |= AFTER_TRIGGER_DONE;
4675 : : }
4676 : : }
4677 : :
4678 : : /*
4679 : : * We could allow deferred triggers if, before the end of the
4680 : : * security-restricted operation, we were to verify that a SET CONSTRAINTS
4681 : : * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4682 : : */
1762 noah@leadboat.com 4683 [ + + + + ]: 302036 : if (deferred_found && InSecurityRestrictedOperation())
4684 [ + - ]: 6 : ereport(ERROR,
4685 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4686 : : errmsg("cannot fire deferred trigger within security-restricted operation")));
4687 : :
7666 tgl@sss.pgh.pa.us 4688 : 302030 : return found;
4689 : : }
4690 : :
4691 : : /*
4692 : : * afterTriggerInvokeEvents()
4693 : : *
4694 : : * Scan the given event list for events that are marked as to be fired
4695 : : * in the current firing cycle, and fire them.
4696 : : *
4697 : : * If estate isn't NULL, we use its result relation info to avoid repeated
4698 : : * openings and closing of trigger target relations. If it is NULL, we
4699 : : * make one locally to cache the info in case there are multiple trigger
4700 : : * events per rel.
4701 : : *
4702 : : * When delete_ok is true, it's safe to delete fully-processed events.
4703 : : * (We are not very tense about that: we simply reset a chunk to be empty
4704 : : * if all its events got fired. The objective here is just to avoid useless
4705 : : * rescanning of events when a trigger queues new events during transaction
4706 : : * end, so it's not necessary to worry much about the case where only
4707 : : * some events are fired.)
4708 : : *
4709 : : * Returns true if no unfired events remain in the list (this allows us
4710 : : * to avoid repeating afterTriggerMarkEvents).
4711 : : */
4712 : : static bool
4713 : 3782 : afterTriggerInvokeEvents(AfterTriggerEventList *events,
4714 : : CommandId firing_id,
4715 : : EState *estate,
4716 : : bool delete_ok)
4717 : : {
6161 4718 : 3782 : bool all_fired = true;
4719 : : AfterTriggerEventChunk *chunk;
4720 : : MemoryContext per_tuple_context;
6597 4721 : 3782 : bool local_estate = false;
2383 andres@anarazel.de 4722 : 3782 : ResultRelInfo *rInfo = NULL;
8863 tgl@sss.pgh.pa.us 4723 : 3782 : Relation rel = NULL;
8363 4724 : 3782 : TriggerDesc *trigdesc = NULL;
8863 4725 : 3782 : FmgrInfo *finfo = NULL;
7470 4726 : 3782 : Instrumentation *instr = NULL;
4185 noah@leadboat.com 4727 : 3782 : TupleTableSlot *slot1 = NULL,
4728 : 3782 : *slot2 = NULL;
4729 : :
4730 : : /* Make a local EState if need be */
6597 tgl@sss.pgh.pa.us 4731 [ + + ]: 3782 : if (estate == NULL)
4732 : : {
4733 : 181 : estate = CreateExecutorState();
4734 : 181 : local_estate = true;
4735 : : }
4736 : :
4737 : : /* Make a per-tuple memory context for trigger function calls */
4738 : : per_tuple_context =
8993 4739 : 3782 : AllocSetContextCreate(CurrentMemoryContext,
4740 : : "AfterTriggerTupleContext",
4741 : : ALLOCSET_DEFAULT_SIZES);
4742 : :
6161 4743 [ + + ]: 6935 : for_each_chunk(chunk, *events)
4744 : : {
4745 : : AfterTriggerEvent event;
4746 : 3782 : bool all_fired_in_chunk = true;
4747 : :
4748 [ + + + + : 9668 : for_each_event(event, chunk)
+ + + + ]
4749 : : {
4750 : 6515 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
4751 : :
4752 : : /*
4753 : : * Is it one for me to fire?
4754 : : */
4755 [ + + ]: 6515 : if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4756 [ + - ]: 5780 : evtshared->ats_firing_id == firing_id)
8863 4757 : 5151 : {
4758 : : ResultRelInfo *src_rInfo,
4759 : : *dst_rInfo;
4760 : :
4761 : : /*
4762 : : * So let's fire it... but first, find the correct relation if
4763 : : * this is not the same relation as before.
4764 : : */
6161 4765 [ + + + + ]: 5780 : if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4766 : : {
1266 alvherre@alvh.no-ip. 4767 : 3933 : rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4768 : : NULL);
6161 tgl@sss.pgh.pa.us 4769 : 3933 : rel = rInfo->ri_RelationDesc;
4770 : : /* Catch calls with insufficient relcache refcounting */
1568 4771 [ - + ]: 3933 : Assert(!RelationHasReferenceCountZero(rel));
6161 4772 : 3933 : trigdesc = rInfo->ri_TrigDesc;
4773 : : /* caution: trigdesc could be NULL here */
4774 : 3933 : finfo = rInfo->ri_TrigFunctions;
4775 : 3933 : instr = rInfo->ri_TrigInstrument;
2097 efujita@postgresql.o 4776 [ - + ]: 3933 : if (slot1 != NULL)
4777 : : {
2097 efujita@postgresql.o 4778 :UBC 0 : ExecDropSingleTupleTableSlot(slot1);
4779 : 0 : ExecDropSingleTupleTableSlot(slot2);
4780 : 0 : slot1 = slot2 = NULL;
4781 : : }
4185 noah@leadboat.com 4782 [ + + ]:CBC 3933 : if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4783 : : {
2487 andres@anarazel.de 4784 : 19 : slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4785 : : &TTSOpsMinimalTuple);
4786 : 19 : slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4787 : : &TTSOpsMinimalTuple);
4788 : : }
4789 : : }
4790 : :
4791 : : /*
4792 : : * Look up source and destination partition result rels of a
4793 : : * cross-partition update event.
4794 : : */
1266 alvherre@alvh.no-ip. 4795 [ + + ]: 5780 : if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4796 : : AFTER_TRIGGER_CP_UPDATE)
4797 : : {
4798 [ + - - + ]: 72 : Assert(OidIsValid(event->ate_src_part) &&
4799 : : OidIsValid(event->ate_dst_part));
4800 : 72 : src_rInfo = ExecGetTriggerResultRel(estate,
4801 : : event->ate_src_part,
4802 : : rInfo);
4803 : 72 : dst_rInfo = ExecGetTriggerResultRel(estate,
4804 : : event->ate_dst_part,
4805 : : rInfo);
4806 : : }
4807 : : else
4808 : 5708 : src_rInfo = dst_rInfo = rInfo;
4809 : :
4810 : : /*
4811 : : * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4812 : : * still set, so recursive examinations of the event list
4813 : : * won't try to re-fire it.
4814 : : */
4815 : 5780 : AfterTriggerExecute(estate, event, rInfo,
4816 : : src_rInfo, dst_rInfo,
4817 : : trigdesc, finfo, instr,
4818 : : per_tuple_context, slot1, slot2);
4819 : :
4820 : : /*
4821 : : * Mark the event as done.
4822 : : */
6161 tgl@sss.pgh.pa.us 4823 : 5151 : event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4824 : 5151 : event->ate_flags |= AFTER_TRIGGER_DONE;
4825 : : }
4826 [ + + ]: 735 : else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4827 : : {
4828 : : /* something remains to be done */
4829 : 255 : all_fired = all_fired_in_chunk = false;
4830 : : }
4831 : : }
4832 : :
4833 : : /* Clear the chunk if delete_ok and nothing left of interest */
4834 [ + + + - ]: 3153 : if (delete_ok && all_fired_in_chunk)
4835 : : {
4836 : 96 : chunk->freeptr = CHUNK_DATA_START(chunk);
4837 : 96 : chunk->endfree = chunk->endptr;
4838 : :
4839 : : /*
4840 : : * If it's last chunk, must sync event list's tailfree too. Note
4841 : : * that delete_ok must NOT be passed as true if there could be
4842 : : * additional AfterTriggerEventList values pointing at this event
4843 : : * list, since we'd fail to fix their copies of tailfree.
4844 : : */
5497 4845 [ + - ]: 96 : if (chunk == events->tail)
4846 : 96 : events->tailfree = chunk->freeptr;
4847 : : }
4848 : : }
4185 noah@leadboat.com 4849 [ + + ]: 3153 : if (slot1 != NULL)
4850 : : {
4851 : 19 : ExecDropSingleTupleTableSlot(slot1);
4852 : 19 : ExecDropSingleTupleTableSlot(slot2);
4853 : : }
4854 : :
4855 : : /* Release working resources */
6597 tgl@sss.pgh.pa.us 4856 : 3153 : MemoryContextDelete(per_tuple_context);
4857 : :
4858 [ + + ]: 3153 : if (local_estate)
4859 : : {
1789 heikki.linnakangas@i 4860 : 96 : ExecCloseResultRelations(estate);
2384 andres@anarazel.de 4861 : 96 : ExecResetTupleTable(estate->es_tupleTable, false);
6597 tgl@sss.pgh.pa.us 4862 : 96 : FreeExecutorState(estate);
4863 : : }
4864 : :
6161 4865 : 3153 : return all_fired;
4866 : : }
4867 : :
4868 : :
4869 : : /*
4870 : : * GetAfterTriggersTableData
4871 : : *
4872 : : * Find or create an AfterTriggersTableData struct for the specified
4873 : : * trigger event (relation + operation type). Ignore existing structs
4874 : : * marked "closed"; we don't want to put any additional tuples into them,
4875 : : * nor change their stmt-triggers-fired state.
4876 : : *
4877 : : * Note: the AfterTriggersTableData list is allocated in the current
4878 : : * (sub)transaction's CurTransactionContext. This is OK because
4879 : : * we don't need it to live past AfterTriggerEndQuery.
4880 : : */
4881 : : static AfterTriggersTableData *
2912 4882 : 1097 : GetAfterTriggersTableData(Oid relid, CmdType cmdType)
4883 : : {
4884 : : AfterTriggersTableData *table;
4885 : : AfterTriggersQueryData *qs;
4886 : : MemoryContext oldcxt;
4887 : : ListCell *lc;
4888 : :
4889 : : /* Caller should have ensured query_depth is OK. */
4890 [ + - - + ]: 1097 : Assert(afterTriggers.query_depth >= 0 &&
4891 : : afterTriggers.query_depth < afterTriggers.maxquerydepth);
4892 : 1097 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4893 : :
4894 [ + + + + : 1271 : foreach(lc, qs->tables)
+ + ]
4895 : : {
4896 : 719 : table = (AfterTriggersTableData *) lfirst(lc);
4897 [ + + + + ]: 719 : if (table->relid == relid && table->cmdType == cmdType &&
4898 [ + + ]: 563 : !table->closed)
4899 : 545 : return table;
4900 : : }
4901 : :
4902 : 552 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4903 : :
4904 : 552 : table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData));
4905 : 552 : table->relid = relid;
4906 : 552 : table->cmdType = cmdType;
4907 : 552 : qs->tables = lappend(qs->tables, table);
4908 : :
4909 : 552 : MemoryContextSwitchTo(oldcxt);
4910 : :
4911 : 552 : return table;
4912 : : }
4913 : :
4914 : : /*
4915 : : * Returns a TupleTableSlot suitable for holding the tuples to be put
4916 : : * into AfterTriggersTableData's transition table tuplestores.
4917 : : */
4918 : : static TupleTableSlot *
1652 alvherre@alvh.no-ip. 4919 : 147 : GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
4920 : : TupleDesc tupdesc)
4921 : : {
4922 : : /* Create it if not already done. */
4923 [ + + ]: 147 : if (!table->storeslot)
4924 : : {
4925 : : MemoryContext oldcxt;
4926 : :
4927 : : /*
4928 : : * We need this slot only until AfterTriggerEndQuery, but making it
4929 : : * last till end-of-subxact is good enough. It'll be freed by
4930 : : * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4931 : : * a different lifespan, so we'd better make a copy of that.
4932 : : */
4933 : 42 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
1077 tgl@sss.pgh.pa.us 4934 : 42 : tupdesc = CreateTupleDescCopy(tupdesc);
1652 alvherre@alvh.no-ip. 4935 : 42 : table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4936 : 42 : MemoryContextSwitchTo(oldcxt);
4937 : : }
4938 : :
4939 : 147 : return table->storeslot;
4940 : : }
4941 : :
4942 : : /*
4943 : : * MakeTransitionCaptureState
4944 : : *
4945 : : * Make a TransitionCaptureState object for the given TriggerDesc, target
4946 : : * relation, and operation type. The TCS object holds all the state needed
4947 : : * to decide whether to capture tuples in transition tables.
4948 : : *
4949 : : * If there are no triggers in 'trigdesc' that request relevant transition
4950 : : * tables, then return NULL.
4951 : : *
4952 : : * The resulting object can be passed to the ExecAR* functions. When
4953 : : * dealing with child tables, the caller can set tcs_original_insert_tuple
4954 : : * to avoid having to reconstruct the original tuple in the root table's
4955 : : * format.
4956 : : *
4957 : : * Note that we copy the flags from a parent table into this struct (rather
4958 : : * than subsequently using the relation's TriggerDesc directly) so that we can
4959 : : * use it to control collection of transition tuples from child tables.
4960 : : *
4961 : : * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4962 : : * on the same table during one query should share one transition table.
4963 : : * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4964 : : * looked up using the table OID + CmdType, and are merely referenced by
4965 : : * the TransitionCaptureState objects we hand out to callers.
4966 : : */
4967 : : TransitionCaptureState *
2912 tgl@sss.pgh.pa.us 4968 : 56367 : MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
4969 : : {
4970 : : TransitionCaptureState *state;
4971 : : bool need_old_upd,
4972 : : need_new_upd,
4973 : : need_old_del,
4974 : : need_new_ins;
4975 : : AfterTriggersTableData *table;
4976 : : MemoryContext oldcxt;
4977 : : ResourceOwner saveResourceOwner;
4978 : :
4979 [ + + ]: 56367 : if (trigdesc == NULL)
4980 : 50081 : return NULL;
4981 : :
4982 : : /* Detect which table(s) we need. */
4983 [ + + + + : 6286 : switch (cmdType)
- ]
4984 : : {
4985 : 3464 : case CMD_INSERT:
1258 alvherre@alvh.no-ip. 4986 : 3464 : need_old_upd = need_old_del = need_new_upd = false;
4987 : 3464 : need_new_ins = trigdesc->trig_insert_new_table;
2912 tgl@sss.pgh.pa.us 4988 : 3464 : break;
4989 : 1945 : case CMD_UPDATE:
1258 alvherre@alvh.no-ip. 4990 : 1945 : need_old_upd = trigdesc->trig_update_old_table;
4991 : 1945 : need_new_upd = trigdesc->trig_update_new_table;
4992 : 1945 : need_old_del = need_new_ins = false;
2912 tgl@sss.pgh.pa.us 4993 : 1945 : break;
4994 : 713 : case CMD_DELETE:
1258 alvherre@alvh.no-ip. 4995 : 713 : need_old_del = trigdesc->trig_delete_old_table;
4996 : 713 : need_old_upd = need_new_upd = need_new_ins = false;
4997 : 713 : break;
4998 : 164 : case CMD_MERGE:
4999 : 164 : need_old_upd = trigdesc->trig_update_old_table;
5000 : 164 : need_new_upd = trigdesc->trig_update_new_table;
5001 : 164 : need_old_del = trigdesc->trig_delete_old_table;
5002 : 164 : need_new_ins = trigdesc->trig_insert_new_table;
2912 tgl@sss.pgh.pa.us 5003 : 164 : break;
2912 tgl@sss.pgh.pa.us 5004 :UBC 0 : default:
5005 [ # # ]: 0 : elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
5006 : : /* keep compiler quiet */
5007 : : need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
5008 : : break;
5009 : : }
1258 alvherre@alvh.no-ip. 5010 [ + + + + :CBC 6286 : if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
+ + + + ]
2912 tgl@sss.pgh.pa.us 5011 : 5987 : return NULL;
5012 : :
5013 : : /* Check state, like AfterTriggerSaveEvent. */
5014 [ - + ]: 299 : if (afterTriggers.query_depth < 0)
2912 tgl@sss.pgh.pa.us 5015 [ # # ]:UBC 0 : elog(ERROR, "MakeTransitionCaptureState() called outside of query");
5016 : :
5017 : : /* Be sure we have enough space to record events at this query depth. */
2912 tgl@sss.pgh.pa.us 5018 [ + + ]:CBC 299 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5019 : 227 : AfterTriggerEnlargeQueryState();
5020 : :
5021 : : /*
5022 : : * Find or create an AfterTriggersTableData struct to hold the
5023 : : * tuplestore(s). If there's a matching struct but it's marked closed,
5024 : : * ignore it; we need a newer one.
5025 : : *
5026 : : * Note: the AfterTriggersTableData list, as well as the tuplestores, are
5027 : : * allocated in the current (sub)transaction's CurTransactionContext, and
5028 : : * the tuplestores are managed by the (sub)transaction's resource owner.
5029 : : * This is sufficient lifespan because we do not allow triggers using
5030 : : * transition tables to be deferrable; they will be fired during
5031 : : * AfterTriggerEndQuery, after which it's okay to delete the data.
5032 : : */
5033 : 299 : table = GetAfterTriggersTableData(relid, cmdType);
5034 : :
5035 : : /* Now create required tuplestore(s), if we don't have them already. */
5036 : 299 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
5037 : 299 : saveResourceOwner = CurrentResourceOwner;
2887 5038 : 299 : CurrentResourceOwner = CurTransactionResourceOwner;
5039 : :
1258 alvherre@alvh.no-ip. 5040 [ + + + + ]: 299 : if (need_old_upd && table->old_upd_tuplestore == NULL)
5041 : 86 : table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
5042 [ + + + + ]: 299 : if (need_new_upd && table->new_upd_tuplestore == NULL)
5043 : 92 : table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
5044 [ + + + + ]: 299 : if (need_old_del && table->old_del_tuplestore == NULL)
5045 : 71 : table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
5046 [ + + + + ]: 299 : if (need_new_ins && table->new_ins_tuplestore == NULL)
5047 : 115 : table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
5048 : :
2912 tgl@sss.pgh.pa.us 5049 : 299 : CurrentResourceOwner = saveResourceOwner;
5050 : 299 : MemoryContextSwitchTo(oldcxt);
5051 : :
5052 : : /* Now build the TransitionCaptureState struct, in caller's context */
5053 : 299 : state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState));
205 michael@paquier.xyz 5054 : 299 : state->tcs_delete_old_table = need_old_del;
5055 : 299 : state->tcs_update_old_table = need_old_upd;
5056 : 299 : state->tcs_update_new_table = need_new_upd;
5057 : 299 : state->tcs_insert_new_table = need_new_ins;
2912 tgl@sss.pgh.pa.us 5058 : 299 : state->tcs_private = table;
5059 : :
5060 : 299 : return state;
5061 : : }
5062 : :
5063 : :
5064 : : /* ----------
5065 : : * AfterTriggerBeginXact()
5066 : : *
5067 : : * Called at transaction start (either BEGIN or implicit for single
5068 : : * statement outside of transaction block).
5069 : : * ----------
5070 : : */
5071 : : void
7666 5072 : 317084 : AfterTriggerBeginXact(void)
5073 : : {
5074 : : /*
5075 : : * Initialize after-trigger state structure to empty
5076 : : */
2999 5077 : 317084 : afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
3971 rhaas@postgresql.org 5078 : 317084 : afterTriggers.query_depth = -1;
5079 : :
5080 : : /*
5081 : : * Verify that there is no leftover state remaining. If these assertions
5082 : : * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
5083 : : * up properly.
5084 : : */
5085 [ - + ]: 317084 : Assert(afterTriggers.state == NULL);
5086 [ - + ]: 317084 : Assert(afterTriggers.query_stack == NULL);
5087 [ - + ]: 317084 : Assert(afterTriggers.maxquerydepth == 0);
5088 [ - + ]: 317084 : Assert(afterTriggers.event_cxt == NULL);
5089 [ - + ]: 317084 : Assert(afterTriggers.events.head == NULL);
2912 tgl@sss.pgh.pa.us 5090 [ - + ]: 317084 : Assert(afterTriggers.trans_stack == NULL);
3971 rhaas@postgresql.org 5091 [ - + ]: 317084 : Assert(afterTriggers.maxtransdepth == 0);
7666 tgl@sss.pgh.pa.us 5092 : 317084 : }
5093 : :
5094 : :
5095 : : /* ----------
5096 : : * AfterTriggerBeginQuery()
5097 : : *
5098 : : * Called just before we start processing a single query within a
5099 : : * transaction (or subtransaction). Most of the real work gets deferred
5100 : : * until somebody actually tries to queue a trigger event.
5101 : : * ----------
5102 : : */
5103 : : void
5104 : 204759 : AfterTriggerBeginQuery(void)
5105 : : {
5106 : : /* Increase the query stack depth */
3971 rhaas@postgresql.org 5107 : 204759 : afterTriggers.query_depth++;
9474 JanWieck@Yahoo.com 5108 : 204759 : }
5109 : :
5110 : :
5111 : : /* ----------
5112 : : * AfterTriggerEndQuery()
5113 : : *
5114 : : * Called after one query has been completely processed. At this time
5115 : : * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5116 : : * transfer deferred trigger events to the global deferred-trigger list.
5117 : : *
5118 : : * Note that this must be called BEFORE closing down the executor
5119 : : * with ExecutorEnd, because we make use of the EState's info about
5120 : : * target relations. Normally it is called from ExecutorFinish.
5121 : : * ----------
5122 : : */
5123 : : void
7470 tgl@sss.pgh.pa.us 5124 : 202480 : AfterTriggerEndQuery(EState *estate)
5125 : : {
5126 : : AfterTriggersQueryData *qs;
5127 : :
5128 : : /* Must be inside a query, too */
3971 rhaas@postgresql.org 5129 [ - + ]: 202480 : Assert(afterTriggers.query_depth >= 0);
5130 : :
5131 : : /*
5132 : : * If we never even got as far as initializing the event stack, there
5133 : : * certainly won't be any events, so exit quickly.
5134 : : */
5135 [ + + ]: 202480 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5136 : : {
5137 : 198081 : afterTriggers.query_depth--;
5138 : 198081 : return;
5139 : : }
5140 : :
5141 : : /*
5142 : : * Process all immediate-mode triggers queued by the query, and move the
5143 : : * deferred ones to the main list of deferred events.
5144 : : *
5145 : : * Notice that we decide which ones will be fired, and put the deferred
5146 : : * ones on the main list, before anything is actually fired. This ensures
5147 : : * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5148 : : * IMMEDIATE: all events we have decided to defer will be available for it
5149 : : * to fire.
5150 : : *
5151 : : * We loop in case a trigger queues more events at the same query level.
5152 : : * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5153 : : * will instead fire any triggers in a dedicated query level. Foreign key
5154 : : * enforcement triggers do add to the current query level, thanks to their
5155 : : * passing fire_triggers = false to SPI_execute_snapshot(). Other
5156 : : * C-language triggers might do likewise.
5157 : : *
5158 : : * If we find no firable events, we don't have to increment
5159 : : * firing_counter.
5160 : : */
2911 tgl@sss.pgh.pa.us 5161 : 4399 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5162 : :
5163 : : for (;;)
5164 : : {
2912 5165 [ + + ]: 4549 : if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
5166 : : {
3971 rhaas@postgresql.org 5167 : 3601 : CommandId firing_id = afterTriggers.firing_counter++;
2911 tgl@sss.pgh.pa.us 5168 : 3601 : AfterTriggerEventChunk *oldtail = qs->events.tail;
5169 : :
5170 [ + + ]: 3601 : if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
6161 5171 : 2907 : break; /* all fired */
5172 : :
5173 : : /*
5174 : : * Firing a trigger could result in query_stack being repalloc'd,
5175 : : * so we must recalculate qs after each afterTriggerInvokeEvents
5176 : : * call. Furthermore, it's unsafe to pass delete_ok = true here,
5177 : : * because that could cause afterTriggerInvokeEvents to try to
5178 : : * access qs->events after the stack has been repalloc'd.
5179 : : */
2911 5180 : 150 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5181 : :
5182 : : /*
5183 : : * We'll need to scan the events list again. To reduce the cost
5184 : : * of doing so, get rid of completely-fired chunks. We know that
5185 : : * all events were marked IN_PROGRESS or DONE at the conclusion of
5186 : : * afterTriggerMarkEvents, so any still-interesting events must
5187 : : * have been added after that, and so must be in the chunk that
5188 : : * was then the tail chunk, or in later chunks. So, zap all
5189 : : * chunks before oldtail. This is approximately the same set of
5190 : : * events we would have gotten rid of by passing delete_ok = true.
5191 : : */
5192 [ - + ]: 150 : Assert(oldtail != NULL);
5193 [ - + ]: 150 : while (qs->events.head != oldtail)
2911 tgl@sss.pgh.pa.us 5194 :UBC 0 : afterTriggerDeleteHeadEventChunk(qs);
5195 : : }
5196 : : else
6161 tgl@sss.pgh.pa.us 5197 :CBC 942 : break;
5198 : : }
5199 : :
5200 : : /* Release query-level-local storage, including tuplestores if any */
2912 5201 : 3849 : AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5202 : :
5203 : 3849 : afterTriggers.query_depth--;
5204 : : }
5205 : :
5206 : :
5207 : : /*
5208 : : * AfterTriggerFreeQuery
5209 : : * Release subsidiary storage for a trigger query level.
5210 : : * This includes closing down tuplestores.
5211 : : * Note: it's important for this to be safe if interrupted by an error
5212 : : * and then called again for the same query level.
5213 : : */
5214 : : static void
5215 : 3864 : AfterTriggerFreeQuery(AfterTriggersQueryData *qs)
5216 : : {
5217 : : Tuplestorestate *ts;
5218 : : List *tables;
5219 : : ListCell *lc;
5220 : :
5221 : : /* Drop the trigger events */
5222 : 3864 : afterTriggerFreeEventList(&qs->events);
5223 : :
5224 : : /* Drop FDW tuplestore if any */
5225 : 3864 : ts = qs->fdw_tuplestore;
5226 : 3864 : qs->fdw_tuplestore = NULL;
5227 [ + + ]: 3864 : if (ts)
5228 : 18 : tuplestore_end(ts);
5229 : :
5230 : : /* Release per-table subsidiary storage */
5231 : 3864 : tables = qs->tables;
5232 [ + + + + : 4379 : foreach(lc, tables)
+ + ]
5233 : : {
5234 : 515 : AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
5235 : :
1258 alvherre@alvh.no-ip. 5236 : 515 : ts = table->old_upd_tuplestore;
5237 : 515 : table->old_upd_tuplestore = NULL;
5238 [ + + ]: 515 : if (ts)
5239 : 78 : tuplestore_end(ts);
5240 : 515 : ts = table->new_upd_tuplestore;
5241 : 515 : table->new_upd_tuplestore = NULL;
5242 [ + + ]: 515 : if (ts)
5243 : 81 : tuplestore_end(ts);
5244 : 515 : ts = table->old_del_tuplestore;
5245 : 515 : table->old_del_tuplestore = NULL;
2912 tgl@sss.pgh.pa.us 5246 [ + + ]: 515 : if (ts)
5247 : 63 : tuplestore_end(ts);
1258 alvherre@alvh.no-ip. 5248 : 515 : ts = table->new_ins_tuplestore;
5249 : 515 : table->new_ins_tuplestore = NULL;
2912 tgl@sss.pgh.pa.us 5250 [ + + ]: 515 : if (ts)
5251 : 105 : tuplestore_end(ts);
1652 alvherre@alvh.no-ip. 5252 [ + + ]: 515 : if (table->storeslot)
5253 : : {
1077 tgl@sss.pgh.pa.us 5254 : 42 : TupleTableSlot *slot = table->storeslot;
5255 : :
5256 : 42 : table->storeslot = NULL;
5257 : 42 : ExecDropSingleTupleTableSlot(slot);
5258 : : }
5259 : : }
5260 : :
5261 : : /*
5262 : : * Now free the AfterTriggersTableData structs and list cells. Reset list
5263 : : * pointer first; if list_free_deep somehow gets an error, better to leak
5264 : : * that storage than have an infinite loop.
5265 : : */
2912 5266 : 3864 : qs->tables = NIL;
5267 : 3864 : list_free_deep(tables);
9474 JanWieck@Yahoo.com 5268 : 3864 : }
5269 : :
5270 : :
5271 : : /* ----------
5272 : : * AfterTriggerFireDeferred()
5273 : : *
5274 : : * Called just before the current transaction is committed. At this
5275 : : * time we invoke all pending DEFERRED triggers.
5276 : : *
5277 : : * It is possible for other modules to queue additional deferred triggers
5278 : : * during pre-commit processing; therefore xact.c may have to call this
5279 : : * multiple times.
5280 : : * ----------
5281 : : */
5282 : : void
7453 tgl@sss.pgh.pa.us 5283 : 297470 : AfterTriggerFireDeferred(void)
5284 : : {
5285 : : AfterTriggerEventList *events;
6326 alvherre@alvh.no-ip. 5286 : 297470 : bool snap_pushed = false;
5287 : :
5288 : : /* Must not be inside a query */
3971 rhaas@postgresql.org 5289 [ - + ]: 297470 : Assert(afterTriggers.query_depth == -1);
5290 : :
5291 : : /*
5292 : : * If there are any triggers to fire, make sure we have set a snapshot for
5293 : : * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5294 : : * can't assume ActiveSnapshot is valid on entry.)
5295 : : */
5296 : 297470 : events = &afterTriggers.events;
7453 tgl@sss.pgh.pa.us 5297 [ + + ]: 297470 : if (events->head != NULL)
5298 : : {
6326 alvherre@alvh.no-ip. 5299 : 173 : PushActiveSnapshot(GetTransactionSnapshot());
5300 : 173 : snap_pushed = true;
5301 : : }
5302 : :
5303 : : /*
5304 : : * Run all the remaining triggers. Loop until they are all gone, in case
5305 : : * some trigger queues more for us to do.
5306 : : */
7666 tgl@sss.pgh.pa.us 5307 [ + + ]: 297470 : while (afterTriggerMarkEvents(events, NULL, false))
5308 : : {
3971 rhaas@postgresql.org 5309 : 173 : CommandId firing_id = afterTriggers.firing_counter++;
5310 : :
6161 tgl@sss.pgh.pa.us 5311 [ + - ]: 173 : if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5312 : 96 : break; /* all fired */
5313 : : }
5314 : :
5315 : : /*
5316 : : * We don't bother freeing the event list, since it will go away anyway
5317 : : * (and more efficiently than via pfree) in AfterTriggerEndXact.
5318 : : */
5319 : :
6326 alvherre@alvh.no-ip. 5320 [ + + ]: 297393 : if (snap_pushed)
5321 : 96 : PopActiveSnapshot();
9474 JanWieck@Yahoo.com 5322 : 297393 : }
5323 : :
5324 : :
5325 : : /* ----------
5326 : : * AfterTriggerEndXact()
5327 : : *
5328 : : * The current transaction is finishing.
5329 : : *
5330 : : * Any unfired triggers are canceled so we simply throw
5331 : : * away anything we know.
5332 : : *
5333 : : * Note: it is possible for this to be called repeatedly in case of
5334 : : * error during transaction abort; therefore, do not complain if
5335 : : * already closed down.
5336 : : * ----------
5337 : : */
5338 : : void
7453 tgl@sss.pgh.pa.us 5339 : 317290 : AfterTriggerEndXact(bool isCommit)
5340 : : {
5341 : : /*
5342 : : * Forget the pending-events list.
5343 : : *
5344 : : * Since all the info is in TopTransactionContext or children thereof, we
5345 : : * don't really need to do anything to reclaim memory. However, the
5346 : : * pending-events list could be large, and so it's useful to discard it as
5347 : : * soon as possible --- especially if we are aborting because we ran out
5348 : : * of memory for the list!
5349 : : */
3971 rhaas@postgresql.org 5350 [ + + ]: 317290 : if (afterTriggers.event_cxt)
5351 : : {
5352 : 3286 : MemoryContextDelete(afterTriggers.event_cxt);
5353 : 3286 : afterTriggers.event_cxt = NULL;
5354 : 3286 : afterTriggers.events.head = NULL;
5355 : 3286 : afterTriggers.events.tail = NULL;
5356 : 3286 : afterTriggers.events.tailfree = NULL;
5357 : : }
5358 : :
5359 : : /*
5360 : : * Forget any subtransaction state as well. Since this can't be very
5361 : : * large, we let the eventual reset of TopTransactionContext free the
5362 : : * memory instead of doing it here.
5363 : : */
2912 tgl@sss.pgh.pa.us 5364 : 317290 : afterTriggers.trans_stack = NULL;
3971 rhaas@postgresql.org 5365 : 317290 : afterTriggers.maxtransdepth = 0;
5366 : :
5367 : :
5368 : : /*
5369 : : * Forget the query stack and constraint-related state information. As
5370 : : * with the subtransaction state information, we don't bother freeing the
5371 : : * memory here.
5372 : : */
5373 : 317290 : afterTriggers.query_stack = NULL;
5374 : 317290 : afterTriggers.maxquerydepth = 0;
5375 : 317290 : afterTriggers.state = NULL;
5376 : :
5377 : : /* No more afterTriggers manipulation until next transaction starts. */
5378 : 317290 : afterTriggers.query_depth = -1;
9474 JanWieck@Yahoo.com 5379 : 317290 : }
5380 : :
5381 : : /*
5382 : : * AfterTriggerBeginSubXact()
5383 : : *
5384 : : * Start a subtransaction.
5385 : : */
5386 : : void
7666 tgl@sss.pgh.pa.us 5387 : 9090 : AfterTriggerBeginSubXact(void)
5388 : : {
7670 5389 : 9090 : int my_level = GetCurrentTransactionNestLevel();
5390 : :
5391 : : /*
5392 : : * Allocate more space in the trans_stack if needed. (Note: because the
5393 : : * minimum nest level of a subtransaction is 2, we waste the first couple
5394 : : * entries of the array; not worth the notational effort to avoid it.)
5395 : : */
3971 rhaas@postgresql.org 5396 [ + + ]: 10454 : while (my_level >= afterTriggers.maxtransdepth)
5397 : : {
5398 [ + + ]: 1364 : if (afterTriggers.maxtransdepth == 0)
5399 : : {
5400 : : /* Arbitrarily initialize for max of 8 subtransaction levels */
2912 tgl@sss.pgh.pa.us 5401 : 1322 : afterTriggers.trans_stack = (AfterTriggersTransData *)
5402 : 1322 : MemoryContextAlloc(TopTransactionContext,
5403 : : 8 * sizeof(AfterTriggersTransData));
5404 : 1322 : afterTriggers.maxtransdepth = 8;
5405 : : }
5406 : : else
5407 : : {
5408 : : /* repalloc will keep the stack in the same context */
3971 rhaas@postgresql.org 5409 : 42 : int new_alloc = afterTriggers.maxtransdepth * 2;
5410 : :
2912 tgl@sss.pgh.pa.us 5411 : 42 : afterTriggers.trans_stack = (AfterTriggersTransData *)
5412 : 42 : repalloc(afterTriggers.trans_stack,
5413 : : new_alloc * sizeof(AfterTriggersTransData));
3971 rhaas@postgresql.org 5414 : 42 : afterTriggers.maxtransdepth = new_alloc;
5415 : : }
5416 : : }
5417 : :
5418 : : /*
5419 : : * Push the current information into the stack. The SET CONSTRAINTS state
5420 : : * is not saved until/unless changed. Likewise, we don't make a
5421 : : * per-subtransaction event context until needed.
5422 : : */
2912 tgl@sss.pgh.pa.us 5423 : 9090 : afterTriggers.trans_stack[my_level].state = NULL;
5424 : 9090 : afterTriggers.trans_stack[my_level].events = afterTriggers.events;
5425 : 9090 : afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
5426 : 9090 : afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
7737 5427 : 9090 : }
5428 : :
5429 : : /*
5430 : : * AfterTriggerEndSubXact()
5431 : : *
5432 : : * The current subtransaction is ending.
5433 : : */
5434 : : void
7666 5435 : 9090 : AfterTriggerEndSubXact(bool isCommit)
5436 : : {
7670 5437 : 9090 : int my_level = GetCurrentTransactionNestLevel();
5438 : : SetConstraintState state;
5439 : : AfterTriggerEvent event;
5440 : : AfterTriggerEventChunk *chunk;
5441 : : CommandId subxact_firing_id;
5442 : :
5443 : : /*
5444 : : * Pop the prior state if needed.
5445 : : */
7737 5446 [ + + ]: 9090 : if (isCommit)
5447 : : {
3971 rhaas@postgresql.org 5448 [ - + ]: 4423 : Assert(my_level < afterTriggers.maxtransdepth);
5449 : : /* If we saved a prior state, we don't need it anymore */
2912 tgl@sss.pgh.pa.us 5450 : 4423 : state = afterTriggers.trans_stack[my_level].state;
7737 5451 [ + + ]: 4423 : if (state != NULL)
5452 : 3 : pfree(state);
5453 : : /* this avoids double pfree if error later: */
2912 5454 : 4423 : afterTriggers.trans_stack[my_level].state = NULL;
3971 rhaas@postgresql.org 5455 [ - + ]: 4423 : Assert(afterTriggers.query_depth ==
5456 : : afterTriggers.trans_stack[my_level].query_depth);
5457 : : }
5458 : : else
5459 : : {
5460 : : /*
5461 : : * Aborting. It is possible subxact start failed before calling
5462 : : * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5463 : : * trans_stack levels that aren't there.
5464 : : */
5465 [ - + ]: 4667 : if (my_level >= afterTriggers.maxtransdepth)
5704 tgl@sss.pgh.pa.us 5466 :UBC 0 : return;
5467 : :
5468 : : /*
5469 : : * Release query-level storage for queries being aborted, and restore
5470 : : * query_depth to its pre-subxact value. This assumes that a
5471 : : * subtransaction will not add events to query levels started in a
5472 : : * earlier transaction state.
5473 : : */
2912 tgl@sss.pgh.pa.us 5474 [ + + ]:CBC 4712 : while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
5475 : : {
3971 rhaas@postgresql.org 5476 [ + + ]: 45 : if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
2912 tgl@sss.pgh.pa.us 5477 : 15 : AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
3971 rhaas@postgresql.org 5478 : 45 : afterTriggers.query_depth--;
5479 : : }
5480 [ - + ]: 4667 : Assert(afterTriggers.query_depth ==
5481 : : afterTriggers.trans_stack[my_level].query_depth);
5482 : :
5483 : : /*
5484 : : * Restore the global deferred-event list to its former length,
5485 : : * discarding any events queued by the subxact.
5486 : : */
5487 : 4667 : afterTriggerRestoreEventList(&afterTriggers.events,
2912 tgl@sss.pgh.pa.us 5488 : 4667 : &afterTriggers.trans_stack[my_level].events);
5489 : :
5490 : : /*
5491 : : * Restore the trigger state. If the saved state is NULL, then this
5492 : : * subxact didn't save it, so it doesn't need restoring.
5493 : : */
5494 : 4667 : state = afterTriggers.trans_stack[my_level].state;
7737 5495 [ + + ]: 4667 : if (state != NULL)
5496 : : {
3971 rhaas@postgresql.org 5497 : 2 : pfree(afterTriggers.state);
5498 : 2 : afterTriggers.state = state;
5499 : : }
5500 : : /* this avoids double pfree if error later: */
2912 tgl@sss.pgh.pa.us 5501 : 4667 : afterTriggers.trans_stack[my_level].state = NULL;
5502 : :
5503 : : /*
5504 : : * Scan for any remaining deferred events that were marked DONE or IN
5505 : : * PROGRESS by this subxact or a child, and un-mark them. We can
5506 : : * recognize such events because they have a firing ID greater than or
5507 : : * equal to the firing_counter value we saved at subtransaction start.
5508 : : * (This essentially assumes that the current subxact includes all
5509 : : * subxacts started after it.)
5510 : : */
5511 : 4667 : subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
3971 rhaas@postgresql.org 5512 [ + - + - : 4689 : for_each_event_chunk(event, chunk, afterTriggers.events)
+ - + + +
+ ]
5513 : : {
6161 tgl@sss.pgh.pa.us 5514 : 11 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
5515 : :
5516 [ + + ]: 11 : if (event->ate_flags &
5517 : : (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))
5518 : : {
5519 [ + - ]: 2 : if (evtshared->ats_firing_id >= subxact_firing_id)
5520 : 2 : event->ate_flags &=
5521 : : ~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS);
5522 : : }
5523 : : }
5524 : : }
5525 : : }
5526 : :
5527 : : /*
5528 : : * Get the transition table for the given event and depending on whether we are
5529 : : * processing the old or the new tuple.
5530 : : */
5531 : : static Tuplestorestate *
1275 alvherre@alvh.no-ip. 5532 : 33063 : GetAfterTriggersTransitionTable(int event,
5533 : : TupleTableSlot *oldslot,
5534 : : TupleTableSlot *newslot,
5535 : : TransitionCaptureState *transition_capture)
5536 : : {
5537 : 33063 : Tuplestorestate *tuplestore = NULL;
5538 : 33063 : bool delete_old_table = transition_capture->tcs_delete_old_table;
5539 : 33063 : bool update_old_table = transition_capture->tcs_update_old_table;
5540 : 33063 : bool update_new_table = transition_capture->tcs_update_new_table;
5541 : 33063 : bool insert_new_table = transition_capture->tcs_insert_new_table;
5542 : :
5543 : : /*
5544 : : * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5545 : : * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5546 : : * non-NULL. But for UPDATE events fired for capturing transition tuples
5547 : : * during UPDATE partition-key row movement, OLD is NULL when the event is
5548 : : * for a row being inserted, whereas NEW is NULL when the event is for a
5549 : : * row being deleted.
5550 : : */
5551 [ + + + - : 33063 : Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
+ - - + ]
5552 : : TupIsNull(oldslot)));
5553 [ + + + - : 33063 : Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
+ - - + ]
5554 : : TupIsNull(newslot)));
5555 : :
5556 [ + + + - ]: 33063 : if (!TupIsNull(oldslot))
5557 : : {
5558 [ - + - - ]: 2706 : Assert(TupIsNull(newslot));
5559 [ + + + - ]: 2706 : if (event == TRIGGER_EVENT_DELETE && delete_old_table)
1258 5560 : 2526 : tuplestore = transition_capture->tcs_private->old_del_tuplestore;
1275 5561 [ + - + + ]: 180 : else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
1258 5562 : 168 : tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5563 : : }
1275 5564 [ + - + - ]: 30357 : else if (!TupIsNull(newslot))
5565 : : {
5566 [ - + - - ]: 30357 : Assert(TupIsNull(oldslot));
5567 [ + + + - ]: 30357 : if (event == TRIGGER_EVENT_INSERT && insert_new_table)
1258 5568 : 30177 : tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
1275 5569 [ + - + + ]: 180 : else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
1258 5570 : 177 : tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5571 : : }
5572 : :
1275 5573 : 33063 : return tuplestore;
5574 : : }
5575 : :
5576 : : /*
5577 : : * Add the given heap tuple to the given tuplestore, applying the conversion
5578 : : * map if necessary.
5579 : : *
5580 : : * If original_insert_tuple is given, we can add that tuple without conversion.
5581 : : */
5582 : : static void
5583 : 33063 : TransitionTableAddTuple(EState *estate,
5584 : : TransitionCaptureState *transition_capture,
5585 : : ResultRelInfo *relinfo,
5586 : : TupleTableSlot *slot,
5587 : : TupleTableSlot *original_insert_tuple,
5588 : : Tuplestorestate *tuplestore)
5589 : : {
5590 : : TupleConversionMap *map;
5591 : :
5592 : : /*
5593 : : * Nothing needs to be done if we don't have a tuplestore.
5594 : : */
5595 [ + + ]: 33063 : if (tuplestore == NULL)
5596 : 15 : return;
5597 : :
5598 [ + + ]: 33048 : if (original_insert_tuple)
5599 : 72 : tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5600 [ + + ]: 32976 : else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5601 : : {
5602 : 147 : AfterTriggersTableData *table = transition_capture->tcs_private;
5603 : : TupleTableSlot *storeslot;
5604 : :
5605 : 147 : storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5606 : 147 : execute_attr_map_slot(map->attrMap, slot, storeslot);
5607 : 147 : tuplestore_puttupleslot(tuplestore, storeslot);
5608 : : }
5609 : : else
5610 : 32829 : tuplestore_puttupleslot(tuplestore, slot);
5611 : : }
5612 : :
5613 : : /* ----------
5614 : : * AfterTriggerEnlargeQueryState()
5615 : : *
5616 : : * Prepare the necessary state so that we can record AFTER trigger events
5617 : : * queued by a query. It is allowed to have nested queries within a
5618 : : * (sub)transaction, so we need to have separate state for each query
5619 : : * nesting level.
5620 : : * ----------
5621 : : */
5622 : : static void
3971 rhaas@postgresql.org 5623 : 3471 : AfterTriggerEnlargeQueryState(void)
5624 : : {
3759 bruce@momjian.us 5625 : 3471 : int init_depth = afterTriggers.maxquerydepth;
5626 : :
3971 rhaas@postgresql.org 5627 [ - + ]: 3471 : Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
5628 : :
5629 [ + - ]: 3471 : if (afterTriggers.maxquerydepth == 0)
5630 : : {
3970 5631 : 3471 : int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5632 : :
2912 tgl@sss.pgh.pa.us 5633 : 3471 : afterTriggers.query_stack = (AfterTriggersQueryData *)
3971 rhaas@postgresql.org 5634 : 3471 : MemoryContextAlloc(TopTransactionContext,
5635 : : new_alloc * sizeof(AfterTriggersQueryData));
5636 : 3471 : afterTriggers.maxquerydepth = new_alloc;
5637 : : }
5638 : : else
5639 : : {
5640 : : /* repalloc will keep the stack in the same context */
3971 rhaas@postgresql.org 5641 :UBC 0 : int old_alloc = afterTriggers.maxquerydepth;
3970 5642 : 0 : int new_alloc = Max(afterTriggers.query_depth + 1,
5643 : : old_alloc * 2);
5644 : :
2912 tgl@sss.pgh.pa.us 5645 : 0 : afterTriggers.query_stack = (AfterTriggersQueryData *)
3971 rhaas@postgresql.org 5646 : 0 : repalloc(afterTriggers.query_stack,
5647 : : new_alloc * sizeof(AfterTriggersQueryData));
5648 : 0 : afterTriggers.maxquerydepth = new_alloc;
5649 : : }
5650 : :
5651 : : /* Initialize new array entries to empty */
3971 rhaas@postgresql.org 5652 [ + + ]:CBC 31239 : while (init_depth < afterTriggers.maxquerydepth)
5653 : : {
2912 tgl@sss.pgh.pa.us 5654 : 27768 : AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
5655 : :
5656 : 27768 : qs->events.head = NULL;
5657 : 27768 : qs->events.tail = NULL;
5658 : 27768 : qs->events.tailfree = NULL;
5659 : 27768 : qs->fdw_tuplestore = NULL;
5660 : 27768 : qs->tables = NIL;
5661 : :
3971 rhaas@postgresql.org 5662 : 27768 : ++init_depth;
5663 : : }
5664 : 3471 : }
5665 : :
5666 : : /*
5667 : : * Create an empty SetConstraintState with room for numalloc trigstates
5668 : : */
5669 : : static SetConstraintState
7666 tgl@sss.pgh.pa.us 5670 : 48 : SetConstraintStateCreate(int numalloc)
5671 : : {
5672 : : SetConstraintState state;
5673 : :
5674 : : /* Behave sanely with numalloc == 0 */
7737 5675 [ + + ]: 48 : if (numalloc <= 0)
5676 : 5 : numalloc = 1;
5677 : :
5678 : : /*
5679 : : * We assume that zeroing will correctly initialize the state values.
5680 : : */
5681 : : state = (SetConstraintState)
5682 : 48 : MemoryContextAllocZero(TopTransactionContext,
5683 : : offsetof(SetConstraintStateData, trigstates) +
3759 bruce@momjian.us 5684 : 48 : numalloc * sizeof(SetConstraintTriggerData));
5685 : :
7737 tgl@sss.pgh.pa.us 5686 : 48 : state->numalloc = numalloc;
5687 : :
5688 : 48 : return state;
5689 : : }
5690 : :
5691 : : /*
5692 : : * Copy a SetConstraintState
5693 : : */
5694 : : static SetConstraintState
7666 5695 : 5 : SetConstraintStateCopy(SetConstraintState origstate)
5696 : : {
5697 : : SetConstraintState state;
5698 : :
5699 : 5 : state = SetConstraintStateCreate(origstate->numstates);
5700 : :
7737 5701 : 5 : state->all_isset = origstate->all_isset;
5702 : 5 : state->all_isdeferred = origstate->all_isdeferred;
5703 : 5 : state->numstates = origstate->numstates;
5704 : 5 : memcpy(state->trigstates, origstate->trigstates,
7666 5705 : 5 : origstate->numstates * sizeof(SetConstraintTriggerData));
5706 : :
7737 5707 : 5 : return state;
5708 : : }
5709 : :
5710 : : /*
5711 : : * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5712 : : * pointer to the state object (it will change if we have to repalloc).
5713 : : */
5714 : : static SetConstraintState
7666 5715 : 171 : SetConstraintStateAddItem(SetConstraintState state,
5716 : : Oid tgoid, bool tgisdeferred)
5717 : : {
7737 5718 [ + + ]: 171 : if (state->numstates >= state->numalloc)
5719 : : {
7678 bruce@momjian.us 5720 : 15 : int newalloc = state->numalloc * 2;
5721 : :
5722 : 15 : newalloc = Max(newalloc, 8); /* in case original has size 0 */
5723 : : state = (SetConstraintState)
7737 tgl@sss.pgh.pa.us 5724 : 15 : repalloc(state,
5725 : : offsetof(SetConstraintStateData, trigstates) +
3851 5726 : 15 : newalloc * sizeof(SetConstraintTriggerData));
7737 5727 : 15 : state->numalloc = newalloc;
5728 [ - + ]: 15 : Assert(state->numstates < state->numalloc);
5729 : : }
5730 : :
7666 5731 : 171 : state->trigstates[state->numstates].sct_tgoid = tgoid;
5732 : 171 : state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
7737 5733 : 171 : state->numstates++;
5734 : :
5735 : 171 : return state;
5736 : : }
5737 : :
5738 : : /* ----------
5739 : : * AfterTriggerSetState()
5740 : : *
5741 : : * Execute the SET CONSTRAINTS ... utility command.
5742 : : * ----------
5743 : : */
5744 : : void
7666 5745 : 51 : AfterTriggerSetState(ConstraintsSetStmt *stmt)
5746 : : {
7670 5747 : 51 : int my_level = GetCurrentTransactionNestLevel();
5748 : :
5749 : : /* If we haven't already done so, initialize our state. */
3971 rhaas@postgresql.org 5750 [ + + ]: 51 : if (afterTriggers.state == NULL)
5751 : 43 : afterTriggers.state = SetConstraintStateCreate(8);
5752 : :
5753 : : /*
5754 : : * If in a subtransaction, and we didn't save the current state already,
5755 : : * save it so it can be restored if the subtransaction aborts.
5756 : : */
7670 tgl@sss.pgh.pa.us 5757 [ + + ]: 51 : if (my_level > 1 &&
2912 5758 [ + - ]: 5 : afterTriggers.trans_stack[my_level].state == NULL)
5759 : : {
5760 : 5 : afterTriggers.trans_stack[my_level].state =
3971 rhaas@postgresql.org 5761 : 5 : SetConstraintStateCopy(afterTriggers.state);
5762 : : }
5763 : :
5764 : : /*
5765 : : * Handle SET CONSTRAINTS ALL ...
5766 : : */
9278 bruce@momjian.us 5767 [ + + ]: 51 : if (stmt->constraints == NIL)
5768 : : {
5769 : : /*
5770 : : * Forget any previous SET CONSTRAINTS commands in this transaction.
5771 : : */
3971 rhaas@postgresql.org 5772 : 27 : afterTriggers.state->numstates = 0;
5773 : :
5774 : : /*
5775 : : * Set the per-transaction ALL state to known.
5776 : : */
5777 : 27 : afterTriggers.state->all_isset = true;
5778 : 27 : afterTriggers.state->all_isdeferred = stmt->deferred;
5779 : : }
5780 : : else
5781 : : {
5782 : : Relation conrel;
5783 : : Relation tgrel;
5711 tgl@sss.pgh.pa.us 5784 : 24 : List *conoidlist = NIL;
5785 : 24 : List *tgoidlist = NIL;
5786 : : ListCell *lc;
5787 : :
5788 : : /*
5789 : : * Handle SET CONSTRAINTS constraint-name [, ...]
5790 : : *
5791 : : * First, identify all the named constraints and make a list of their
5792 : : * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5793 : : * the same name within a schema, the specifications are not
5794 : : * necessarily unique. Our strategy is to target all matching
5795 : : * constraints within the first search-path schema that has any
5796 : : * matches, but disregard matches in schemas beyond the first match.
5797 : : * (This is a bit odd but it's the historical behavior.)
5798 : : *
5799 : : * A constraint in a partitioned table may have corresponding
5800 : : * constraints in the partitions. Grab those too.
5801 : : */
2420 andres@anarazel.de 5802 : 24 : conrel = table_open(ConstraintRelationId, AccessShareLock);
5803 : :
5711 tgl@sss.pgh.pa.us 5804 [ + - + + : 48 : foreach(lc, stmt->constraints)
+ + ]
5805 : : {
5806 : 24 : RangeVar *constraint = lfirst(lc);
5807 : : bool found;
5808 : : List *namespacelist;
5809 : : ListCell *nslc;
5810 : :
7072 bruce@momjian.us 5811 [ - + ]: 24 : if (constraint->catalogname)
5812 : : {
7072 bruce@momjian.us 5813 [ # # ]:UBC 0 : if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5814 [ # # ]: 0 : ereport(ERROR,
5815 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5816 : : errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5817 : : constraint->catalogname, constraint->schemaname,
5818 : : constraint->relname)));
5819 : : }
5820 : :
5821 : : /*
5822 : : * If we're given the schema name with the constraint, look only
5823 : : * in that schema. If given a bare constraint name, use the
5824 : : * search path to find the first matching constraint.
5825 : : */
6912 bruce@momjian.us 5826 [ + + ]:CBC 24 : if (constraint->schemaname)
5827 : : {
4606 5828 : 6 : Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5829 : : false);
5830 : :
5711 tgl@sss.pgh.pa.us 5831 : 6 : namespacelist = list_make1_oid(namespaceId);
5832 : : }
5833 : : else
5834 : : {
5835 : 18 : namespacelist = fetch_search_path(true);
5836 : : }
5837 : :
9474 JanWieck@Yahoo.com 5838 : 24 : found = false;
5711 tgl@sss.pgh.pa.us 5839 [ + - + - : 60 : foreach(nslc, namespacelist)
+ - ]
5840 : : {
5841 : 60 : Oid namespaceId = lfirst_oid(nslc);
5842 : : SysScanDesc conscan;
5843 : : ScanKeyData skey[2];
5844 : : HeapTuple tup;
5845 : :
5846 : 60 : ScanKeyInit(&skey[0],
5847 : : Anum_pg_constraint_conname,
5848 : : BTEqualStrategyNumber, F_NAMEEQ,
5849 : 60 : CStringGetDatum(constraint->relname));
5850 : 60 : ScanKeyInit(&skey[1],
5851 : : Anum_pg_constraint_connamespace,
5852 : : BTEqualStrategyNumber, F_OIDEQ,
5853 : : ObjectIdGetDatum(namespaceId));
5854 : :
5855 : 60 : conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5856 : : true, NULL, 2, skey);
5857 : :
5858 [ + + ]: 108 : while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5859 : : {
5860 : 48 : Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
5861 : :
5862 [ + - ]: 48 : if (con->condeferrable)
2482 andres@anarazel.de 5863 : 48 : conoidlist = lappend_oid(conoidlist, con->oid);
5711 tgl@sss.pgh.pa.us 5864 [ # # ]:UBC 0 : else if (stmt->deferred)
5865 [ # # ]: 0 : ereport(ERROR,
5866 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5867 : : errmsg("constraint \"%s\" is not deferrable",
5868 : : constraint->relname)));
7072 bruce@momjian.us 5869 :CBC 48 : found = true;
5870 : : }
5871 : :
5711 tgl@sss.pgh.pa.us 5872 : 60 : systable_endscan(conscan);
5873 : :
5874 : : /*
5875 : : * Once we've found a matching constraint we do not search
5876 : : * later parts of the search path.
5877 : : */
7072 bruce@momjian.us 5878 [ + + ]: 60 : if (found)
5879 : 24 : break;
5880 : : }
5881 : :
5711 tgl@sss.pgh.pa.us 5882 : 24 : list_free(namespacelist);
5883 : :
5884 : : /*
5885 : : * Not found ?
5886 : : */
8421 bruce@momjian.us 5887 [ - + ]: 24 : if (!found)
8084 tgl@sss.pgh.pa.us 5888 [ # # ]:UBC 0 : ereport(ERROR,
5889 : : (errcode(ERRCODE_UNDEFINED_OBJECT),
5890 : : errmsg("constraint \"%s\" does not exist",
5891 : : constraint->relname)));
5892 : : }
5893 : :
5894 : : /*
5895 : : * Scan for any possible descendants of the constraints. We append
5896 : : * whatever we find to the same list that we're scanning; this has the
5897 : : * effect that we create new scans for those, too, so if there are
5898 : : * further descendents, we'll also catch them.
5899 : : */
2724 alvherre@alvh.no-ip. 5900 [ + - + + :CBC 129 : foreach(lc, conoidlist)
+ + ]
5901 : : {
5902 : 105 : Oid parent = lfirst_oid(lc);
5903 : : ScanKeyData key;
5904 : : SysScanDesc scan;
5905 : : HeapTuple tuple;
5906 : :
5907 : 105 : ScanKeyInit(&key,
5908 : : Anum_pg_constraint_conparentid,
5909 : : BTEqualStrategyNumber, F_OIDEQ,
5910 : : ObjectIdGetDatum(parent));
5911 : :
5912 : 105 : scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5913 : :
5914 [ + + ]: 162 : while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5915 : : {
2482 andres@anarazel.de 5916 : 57 : Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
5917 : :
5918 : 57 : conoidlist = lappend_oid(conoidlist, con->oid);
5919 : : }
5920 : :
2724 alvherre@alvh.no-ip. 5921 : 105 : systable_endscan(scan);
5922 : : }
5923 : :
2420 andres@anarazel.de 5924 : 24 : table_close(conrel, AccessShareLock);
5925 : :
5926 : : /*
5927 : : * Now, locate the trigger(s) implementing each of these constraints,
5928 : : * and make a list of their OIDs.
5929 : : */
5930 : 24 : tgrel = table_open(TriggerRelationId, AccessShareLock);
5931 : :
5711 tgl@sss.pgh.pa.us 5932 [ + - + + : 129 : foreach(lc, conoidlist)
+ + ]
5933 : : {
5934 : 105 : Oid conoid = lfirst_oid(lc);
5935 : : ScanKeyData skey;
5936 : : SysScanDesc tgscan;
5937 : : HeapTuple htup;
5938 : :
5939 : 105 : ScanKeyInit(&skey,
5940 : : Anum_pg_trigger_tgconstraint,
5941 : : BTEqualStrategyNumber, F_OIDEQ,
5942 : : ObjectIdGetDatum(conoid));
5943 : :
5944 : 105 : tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5945 : : NULL, 1, &skey);
5946 : :
5947 [ + + ]: 429 : while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5948 : : {
5949 : 219 : Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5950 : :
5951 : : /*
5952 : : * Silently skip triggers that are marked as non-deferrable in
5953 : : * pg_trigger. This is not an error condition, since a
5954 : : * deferrable RI constraint may have some non-deferrable
5955 : : * actions.
5956 : : */
5957 [ + - ]: 219 : if (pg_trigger->tgdeferrable)
2482 andres@anarazel.de 5958 : 219 : tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5959 : : }
5960 : :
5711 tgl@sss.pgh.pa.us 5961 : 105 : systable_endscan(tgscan);
5962 : : }
5963 : :
2420 andres@anarazel.de 5964 : 24 : table_close(tgrel, AccessShareLock);
5965 : :
5966 : : /*
5967 : : * Now we can set the trigger states of individual triggers for this
5968 : : * xact.
5969 : : */
5711 tgl@sss.pgh.pa.us 5970 [ + - + + : 243 : foreach(lc, tgoidlist)
+ + ]
5971 : : {
5972 : 219 : Oid tgoid = lfirst_oid(lc);
3971 rhaas@postgresql.org 5973 : 219 : SetConstraintState state = afterTriggers.state;
7737 tgl@sss.pgh.pa.us 5974 : 219 : bool found = false;
5975 : : int i;
5976 : :
7668 5977 [ + + ]: 1224 : for (i = 0; i < state->numstates; i++)
5978 : : {
7666 5979 [ + + ]: 1053 : if (state->trigstates[i].sct_tgoid == tgoid)
5980 : : {
5981 : 48 : state->trigstates[i].sct_tgisdeferred = stmt->deferred;
9474 JanWieck@Yahoo.com 5982 : 48 : found = true;
5983 : 48 : break;
5984 : : }
5985 : : }
5986 [ + + ]: 219 : if (!found)
5987 : : {
3971 rhaas@postgresql.org 5988 : 171 : afterTriggers.state =
7666 tgl@sss.pgh.pa.us 5989 : 171 : SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5990 : : }
5991 : : }
5992 : : }
5993 : :
5994 : : /*
5995 : : * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5996 : : * checks against that constraint must be made when the SET CONSTRAINTS
5997 : : * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5998 : : * apply retroactively. We've updated the constraints state, so scan the
5999 : : * list of previously deferred events to fire any that have now become
6000 : : * immediate.
6001 : : *
6002 : : * Obviously, if this was SET ... DEFERRED then it can't have converted
6003 : : * any unfired events to immediate, so we need do nothing in that case.
6004 : : */
6005 [ + + ]: 51 : if (!stmt->deferred)
6006 : : {
3971 rhaas@postgresql.org 6007 : 17 : AfterTriggerEventList *events = &afterTriggers.events;
6111 tgl@sss.pgh.pa.us 6008 : 17 : bool snapshot_set = false;
6009 : :
6597 6010 [ + + ]: 17 : while (afterTriggerMarkEvents(events, NULL, true))
6011 : : {
3971 rhaas@postgresql.org 6012 : 8 : CommandId firing_id = afterTriggers.firing_counter++;
6013 : :
6014 : : /*
6015 : : * Make sure a snapshot has been established in case trigger
6016 : : * functions need one. Note that we avoid setting a snapshot if
6017 : : * we don't find at least one trigger that has to be fired now.
6018 : : * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
6019 : : * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
6020 : : * at the start of a transaction it's not possible for any trigger
6021 : : * events to be queued yet.)
6022 : : */
6111 tgl@sss.pgh.pa.us 6023 [ + - ]: 8 : if (!snapshot_set)
6024 : : {
6025 : 8 : PushActiveSnapshot(GetTransactionSnapshot());
6026 : 8 : snapshot_set = true;
6027 : : }
6028 : :
6029 : : /*
6030 : : * We can delete fired events if we are at top transaction level,
6031 : : * but we'd better not if inside a subtransaction, since the
6032 : : * subtransaction could later get rolled back.
6033 : : */
6161 tgl@sss.pgh.pa.us 6034 [ # # ]:UBC 0 : if (afterTriggerInvokeEvents(events, firing_id, NULL,
6161 tgl@sss.pgh.pa.us 6035 :CBC 8 : !IsSubTransaction()))
6161 tgl@sss.pgh.pa.us 6036 :UBC 0 : break; /* all fired */
6037 : : }
6038 : :
6111 tgl@sss.pgh.pa.us 6039 [ - + ]:CBC 9 : if (snapshot_set)
6111 tgl@sss.pgh.pa.us 6040 :UBC 0 : PopActiveSnapshot();
6041 : : }
9474 JanWieck@Yahoo.com 6042 :CBC 43 : }
6043 : :
6044 : : /* ----------
6045 : : * AfterTriggerPendingOnRel()
6046 : : * Test to see if there are any pending after-trigger events for rel.
6047 : : *
6048 : : * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
6049 : : * it is unsafe to perform major surgery on a relation. Note that only
6050 : : * local pending events are examined. We assume that having exclusive lock
6051 : : * on a rel guarantees there are no unserviced events in other backends ---
6052 : : * but having a lock does not prevent there being such events in our own.
6053 : : *
6054 : : * In some scenarios it'd be reasonable to remove pending events (more
6055 : : * specifically, mark them DONE by the current subxact) but without a lot
6056 : : * of knowledge of the trigger semantics we can't do this in general.
6057 : : * ----------
6058 : : */
6059 : : bool
6457 tgl@sss.pgh.pa.us 6060 : 66032 : AfterTriggerPendingOnRel(Oid relid)
6061 : : {
6062 : : AfterTriggerEvent event;
6063 : : AfterTriggerEventChunk *chunk;
6064 : : int depth;
6065 : :
6066 : : /* Scan queued events */
3971 rhaas@postgresql.org 6067 [ + - + - : 66050 : for_each_event_chunk(event, chunk, afterTriggers.events)
+ - + + +
+ ]
6068 : : {
6161 tgl@sss.pgh.pa.us 6069 : 18 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
6070 : :
6071 : : /*
6072 : : * We can ignore completed events. (Even if a DONE flag is rolled
6073 : : * back by subxact abort, it's OK because the effects of the TRUNCATE
6074 : : * or whatever must get rolled back too.)
6075 : : */
6076 [ - + ]: 18 : if (event->ate_flags & AFTER_TRIGGER_DONE)
6942 tgl@sss.pgh.pa.us 6077 :UBC 0 : continue;
6078 : :
6161 tgl@sss.pgh.pa.us 6079 [ + + ]:CBC 18 : if (evtshared->ats_relid == relid)
6457 6080 : 9 : return true;
6081 : : }
6082 : :
6083 : : /*
6084 : : * Also scan events queued by incomplete queries. This could only matter
6085 : : * if TRUNCATE/etc is executed by a function or trigger within an updating
6086 : : * query on the same relation, which is pretty perverse, but let's check.
6087 : : */
3953 rhaas@postgresql.org 6088 [ - + - - ]: 66023 : for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
6089 : : {
2912 tgl@sss.pgh.pa.us 6090 [ # # # # :UBC 0 : for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
# # # # #
# ]
6091 : : {
6161 6092 : 0 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
6093 : :
6094 [ # # ]: 0 : if (event->ate_flags & AFTER_TRIGGER_DONE)
6942 6095 : 0 : continue;
6096 : :
6161 6097 [ # # ]: 0 : if (evtshared->ats_relid == relid)
6457 6098 : 0 : return true;
6099 : : }
6100 : : }
6101 : :
6457 tgl@sss.pgh.pa.us 6102 :CBC 66023 : return false;
6103 : : }
6104 : :
6105 : : /* ----------
6106 : : * AfterTriggerSaveEvent()
6107 : : *
6108 : : * Called by ExecA[RS]...Triggers() to queue up the triggers that should
6109 : : * be fired for an event.
6110 : : *
6111 : : * NOTE: this is called whenever there are any triggers associated with
6112 : : * the event (even if they are disabled). This function decides which
6113 : : * triggers actually need to be queued. It is also called after each row,
6114 : : * even if there are no triggers for that event, if there are any AFTER
6115 : : * STATEMENT triggers for the statement which use transition tables, so that
6116 : : * the transition tuplestores can be built. Furthermore, if the transition
6117 : : * capture is happening for UPDATEd rows being moved to another partition due
6118 : : * to the partition-key being changed, then this function is called once when
6119 : : * the row is deleted (to capture OLD row), and once when the row is inserted
6120 : : * into another partition (to capture NEW row). This is done separately because
6121 : : * DELETE and INSERT happen on different tables.
6122 : : *
6123 : : * Transition tuplestores are built now, rather than when events are pulled
6124 : : * off of the queue because AFTER ROW triggers are allowed to select from the
6125 : : * transition tables for the statement.
6126 : : *
6127 : : * This contains special support to queue the update events for the case where
6128 : : * a partitioned table undergoing a cross-partition update may have foreign
6129 : : * keys pointing into it. Normally, a partitioned table's row triggers are
6130 : : * not fired because the leaf partition(s) which are modified as a result of
6131 : : * the operation on the partitioned table contain the same triggers which are
6132 : : * fired instead. But that general scheme can cause problematic behavior with
6133 : : * foreign key triggers during cross-partition updates, which are implemented
6134 : : * as DELETE on the source partition followed by INSERT into the destination
6135 : : * partition. Specifically, firing DELETE triggers would lead to the wrong
6136 : : * foreign key action to be enforced considering that the original command is
6137 : : * UPDATE; in this case, this function is called with relinfo as the
6138 : : * partitioned table, and src_partinfo and dst_partinfo referring to the
6139 : : * source and target leaf partitions, respectively.
6140 : : *
6141 : : * is_crosspart_update is true either when a DELETE event is fired on the
6142 : : * source partition (which is to be ignored) or an UPDATE event is fired on
6143 : : * the root partitioned table.
6144 : : * ----------
6145 : : */
6146 : : static void
5769 6147 : 38321 : AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
6148 : : ResultRelInfo *src_partinfo,
6149 : : ResultRelInfo *dst_partinfo,
6150 : : int event, bool row_trigger,
6151 : : TupleTableSlot *oldslot, TupleTableSlot *newslot,
6152 : : List *recheckIndexes, Bitmapset *modifiedCols,
6153 : : TransitionCaptureState *transition_capture,
6154 : : bool is_crosspart_update)
6155 : : {
8863 6156 : 38321 : Relation rel = relinfo->ri_RelationDesc;
6157 : 38321 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
6158 : : AfterTriggerEventData new_event;
6159 : : AfterTriggerSharedData new_shared;
2912 6160 : 38321 : char relkind = rel->rd_rel->relkind;
6161 : : int tgtype_event;
6162 : : int tgtype_level;
6163 : : int i;
4185 noah@leadboat.com 6164 : 38321 : Tuplestorestate *fdw_tuplestore = NULL;
6165 : :
6166 : : /*
6167 : : * Check state. We use a normal test not Assert because it is possible to
6168 : : * reach here in the wrong state given misconfigured RI triggers, in
6169 : : * particular deferring a cascade action trigger.
6170 : : */
3971 rhaas@postgresql.org 6171 [ - + ]: 38321 : if (afterTriggers.query_depth < 0)
5793 tgl@sss.pgh.pa.us 6172 [ # # ]:UBC 0 : elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
6173 : :
6174 : : /* Be sure we have enough space to record events at this query depth. */
3971 rhaas@postgresql.org 6175 [ + + ]:CBC 38321 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6176 : 3076 : AfterTriggerEnlargeQueryState();
6177 : :
6178 : : /*
6179 : : * If the directly named relation has any triggers with transition tables,
6180 : : * then we need to capture transition tuples.
6181 : : */
2992 rhodiumtoad@postgres 6182 [ + + + + ]: 38321 : if (row_trigger && transition_capture != NULL)
6183 : : {
1258 alvherre@alvh.no-ip. 6184 : 32907 : TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
6185 : :
6186 : : /*
6187 : : * Capture the old tuple in the appropriate transition table based on
6188 : : * the event.
6189 : : */
1275 6190 [ + + + + ]: 32907 : if (!TupIsNull(oldslot))
6191 : : {
6192 : : Tuplestorestate *old_tuplestore;
6193 : :
6194 : 2706 : old_tuplestore = GetAfterTriggersTransitionTable(event,
6195 : : oldslot,
6196 : : NULL,
6197 : : transition_capture);
6198 : 2706 : TransitionTableAddTuple(estate, transition_capture, relinfo,
6199 : : oldslot, NULL, old_tuplestore);
6200 : : }
6201 : :
6202 : : /*
6203 : : * Capture the new tuple in the appropriate transition table based on
6204 : : * the event.
6205 : : */
6206 [ + + + - ]: 32907 : if (!TupIsNull(newslot))
6207 : : {
6208 : : Tuplestorestate *new_tuplestore;
6209 : :
6210 : 30357 : new_tuplestore = GetAfterTriggersTransitionTable(event,
6211 : : NULL,
6212 : : newslot,
6213 : : transition_capture);
6214 : 30357 : TransitionTableAddTuple(estate, transition_capture, relinfo,
6215 : : newslot, original_insert_tuple, new_tuplestore);
6216 : : }
6217 : :
6218 : : /*
6219 : : * If transition tables are the only reason we're here, return. As
6220 : : * mentioned above, we can also be here during update tuple routing in
6221 : : * presence of transition tables, in which case this function is
6222 : : * called separately for OLD and NEW, so we expect exactly one of them
6223 : : * to be NULL.
6224 : : */
2992 rhodiumtoad@postgres 6225 [ + + + + ]: 32907 : if (trigdesc == NULL ||
6226 [ + + + + ]: 32787 : (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
2999 tgl@sss.pgh.pa.us 6227 [ + + + + ]: 30297 : (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
2787 rhaas@postgresql.org 6228 [ + + + + ]: 177 : (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
2384 andres@anarazel.de 6229 [ + - - + : 18 : (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
+ - - + -
+ ]
3228 kgrittn@postgresql.o 6230 : 32850 : return;
6231 : : }
6232 : :
6233 : : /*
6234 : : * We normally don't see partitioned tables here for row level triggers
6235 : : * except in the special case of a cross-partition update. In that case,
6236 : : * nodeModifyTable.c:ExecCrossPartitionUpdateForeignKey() calls here to
6237 : : * queue an update event on the root target partitioned table, also
6238 : : * passing the source and destination partitions and their tuples.
6239 : : */
1266 alvherre@alvh.no-ip. 6240 [ + + + + : 5471 : Assert(!row_trigger ||
+ - + - +
- - + ]
6241 : : rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE ||
6242 : : (is_crosspart_update &&
6243 : : TRIGGER_FIRED_BY_UPDATE(event) &&
6244 : : src_partinfo != NULL && dst_partinfo != NULL));
6245 : :
6246 : : /*
6247 : : * Validate the event code and collect the associated tuple CTIDs.
6248 : : *
6249 : : * The event code will be used both as a bitmask and an array offset, so
6250 : : * validation is important to make sure we don't walk off the edge of our
6251 : : * arrays.
6252 : : *
6253 : : * Also, if we're considering statement-level triggers, check whether we
6254 : : * already queued a set of them for this event, and cancel the prior set
6255 : : * if so. This preserves the behavior that statement-level triggers fire
6256 : : * just once per statement and fire after row-level triggers.
6257 : : */
6161 tgl@sss.pgh.pa.us 6258 [ + + + + : 5471 : switch (event)
- ]
6259 : : {
6260 : 2885 : case TRIGGER_EVENT_INSERT:
5445 6261 : 2885 : tgtype_event = TRIGGER_TYPE_INSERT;
6161 6262 [ + + ]: 2885 : if (row_trigger)
6263 : : {
2384 andres@anarazel.de 6264 [ - + ]: 2664 : Assert(oldslot == NULL);
6265 [ - + ]: 2664 : Assert(newslot != NULL);
6266 : 2664 : ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
6161 tgl@sss.pgh.pa.us 6267 : 2664 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6268 : : }
6269 : : else
6270 : : {
2384 andres@anarazel.de 6271 [ - + ]: 221 : Assert(oldslot == NULL);
6272 [ - + ]: 221 : Assert(newslot == NULL);
6161 tgl@sss.pgh.pa.us 6273 : 221 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6274 : 221 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
2912 6275 : 221 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6276 : : CMD_INSERT, event);
6277 : : }
6161 6278 : 2885 : break;
6279 : 703 : case TRIGGER_EVENT_DELETE:
5445 6280 : 703 : tgtype_event = TRIGGER_TYPE_DELETE;
6161 6281 [ + + ]: 703 : if (row_trigger)
6282 : : {
2384 andres@anarazel.de 6283 [ - + ]: 585 : Assert(oldslot != NULL);
6284 [ - + ]: 585 : Assert(newslot == NULL);
6285 : 585 : ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6161 tgl@sss.pgh.pa.us 6286 : 585 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6287 : : }
6288 : : else
6289 : : {
2384 andres@anarazel.de 6290 [ - + ]: 118 : Assert(oldslot == NULL);
6291 [ - + ]: 118 : Assert(newslot == NULL);
6161 tgl@sss.pgh.pa.us 6292 : 118 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6293 : 118 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
2912 6294 : 118 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6295 : : CMD_DELETE, event);
6296 : : }
6161 6297 : 703 : break;
6298 : 1879 : case TRIGGER_EVENT_UPDATE:
5445 6299 : 1879 : tgtype_event = TRIGGER_TYPE_UPDATE;
6161 6300 [ + + ]: 1879 : if (row_trigger)
6301 : : {
2384 andres@anarazel.de 6302 [ - + ]: 1675 : Assert(oldslot != NULL);
6303 [ - + ]: 1675 : Assert(newslot != NULL);
6304 : 1675 : ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6305 : 1675 : ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
6306 : :
6307 : : /*
6308 : : * Also remember the OIDs of partitions to fetch these tuples
6309 : : * out of later in AfterTriggerExecute().
6310 : : */
1266 alvherre@alvh.no-ip. 6311 [ + + ]: 1675 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6312 : : {
6313 [ + - - + ]: 141 : Assert(src_partinfo != NULL && dst_partinfo != NULL);
6314 : 141 : new_event.ate_src_part =
6315 : 141 : RelationGetRelid(src_partinfo->ri_RelationDesc);
6316 : 141 : new_event.ate_dst_part =
6317 : 141 : RelationGetRelid(dst_partinfo->ri_RelationDesc);
6318 : : }
6319 : : }
6320 : : else
6321 : : {
2384 andres@anarazel.de 6322 [ - + ]: 204 : Assert(oldslot == NULL);
6323 [ - + ]: 204 : Assert(newslot == NULL);
6161 tgl@sss.pgh.pa.us 6324 : 204 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6325 : 204 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
2912 6326 : 204 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6327 : : CMD_UPDATE, event);
6328 : : }
6161 6329 : 1879 : break;
6330 : 4 : case TRIGGER_EVENT_TRUNCATE:
5445 6331 : 4 : tgtype_event = TRIGGER_TYPE_TRUNCATE;
2384 andres@anarazel.de 6332 [ - + ]: 4 : Assert(oldslot == NULL);
6333 [ - + ]: 4 : Assert(newslot == NULL);
6161 tgl@sss.pgh.pa.us 6334 : 4 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6335 : 4 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6336 : 4 : break;
6161 tgl@sss.pgh.pa.us 6337 :UBC 0 : default:
6338 [ # # ]: 0 : elog(ERROR, "invalid after-trigger event code: %d", event);
6339 : : tgtype_event = 0; /* keep compiler quiet */
6340 : : break;
6341 : : }
6342 : :
6343 : : /* Determine flags */
4185 noah@leadboat.com 6344 [ + + + + ]:CBC 5471 : if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
6345 : : {
1266 alvherre@alvh.no-ip. 6346 [ + + + + ]: 5443 : if (row_trigger && event == TRIGGER_EVENT_UPDATE)
6347 : : {
6348 [ + + ]: 1665 : if (relkind == RELKIND_PARTITIONED_TABLE)
6349 : 141 : new_event.ate_flags = AFTER_TRIGGER_CP_UPDATE;
6350 : : else
6351 : 1524 : new_event.ate_flags = AFTER_TRIGGER_2CTID;
6352 : : }
6353 : : else
6354 : 3778 : new_event.ate_flags = AFTER_TRIGGER_1CTID;
6355 : : }
6356 : :
6357 : : /* else, we'll initialize ate_flags for each trigger */
6358 : :
5445 tgl@sss.pgh.pa.us 6359 : 5471 : tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
6360 : :
6361 : : /*
6362 : : * Must convert/copy the source and destination partition tuples into the
6363 : : * root partitioned table's format/slot, because the processing in the
6364 : : * loop below expects both oldslot and newslot tuples to be in that form.
6365 : : */
1266 alvherre@alvh.no-ip. 6366 [ + + + + ]: 5471 : if (row_trigger && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6367 : : {
6368 : : TupleTableSlot *rootslot;
6369 : : TupleConversionMap *map;
6370 : :
6371 : 141 : rootslot = ExecGetTriggerOldSlot(estate, relinfo);
6372 : 141 : map = ExecGetChildToRootMap(src_partinfo);
6373 [ + + ]: 141 : if (map)
6374 : 18 : oldslot = execute_attr_map_slot(map->attrMap,
6375 : : oldslot,
6376 : : rootslot);
6377 : : else
6378 : 123 : oldslot = ExecCopySlot(rootslot, oldslot);
6379 : :
6380 : 141 : rootslot = ExecGetTriggerNewSlot(estate, relinfo);
6381 : 141 : map = ExecGetChildToRootMap(dst_partinfo);
6382 [ + + ]: 141 : if (map)
6383 : 18 : newslot = execute_attr_map_slot(map->attrMap,
6384 : : newslot,
6385 : : rootslot);
6386 : : else
6387 : 123 : newslot = ExecCopySlot(rootslot, newslot);
6388 : : }
6389 : :
5445 tgl@sss.pgh.pa.us 6390 [ + + ]: 25086 : for (i = 0; i < trigdesc->numtriggers; i++)
6391 : : {
6392 : 19615 : Trigger *trigger = &trigdesc->triggers[i];
6393 : :
6394 [ + + ]: 19615 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
6395 : : tgtype_level,
6396 : : TRIGGER_TYPE_AFTER,
6397 : : tgtype_event))
6398 : 12636 : continue;
5769 6399 [ + + ]: 6979 : if (!TriggerEnabled(estate, relinfo, trigger, event,
6400 : : modifiedCols, oldslot, newslot))
5806 6401 : 211 : continue;
6402 : :
4185 noah@leadboat.com 6403 [ + + + + ]: 6768 : if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
6404 : : {
6405 [ + + ]: 29 : if (fdw_tuplestore == NULL)
6406 : : {
2912 tgl@sss.pgh.pa.us 6407 : 25 : fdw_tuplestore = GetCurrentFDWTuplestore();
4185 noah@leadboat.com 6408 : 25 : new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
6409 : : }
6410 : : else
6411 : : /* subsequent event for the same tuple */
6412 : 4 : new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
6413 : : }
6414 : :
6415 : : /*
6416 : : * If the trigger is a foreign key enforcement trigger, there are
6417 : : * certain cases where we can skip queueing the event because we can
6418 : : * tell by inspection that the FK constraint will still pass. There
6419 : : * are also some cases during cross-partition updates of a partitioned
6420 : : * table where queuing the event can be skipped.
6421 : : */
2606 peter_e@gmx.net 6422 [ + + + + ]: 6768 : if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
6423 : : {
7404 neilc@samurai.com 6424 [ + + + - ]: 3293 : switch (RI_FKey_trigger_type(trigger->tgfoid))
6425 : : {
6426 : 1289 : case RI_TRIGGER_PK:
6427 : :
6428 : : /*
6429 : : * For cross-partitioned updates of partitioned PK table,
6430 : : * skip the event fired by the component delete on the
6431 : : * source leaf partition unless the constraint originates
6432 : : * in the partition itself (!tgisclone), because the
6433 : : * update event that will be fired on the root
6434 : : * (partitioned) target table will be used to perform the
6435 : : * necessary foreign key enforcement action.
6436 : : */
1266 alvherre@alvh.no-ip. 6437 [ + + ]: 1289 : if (is_crosspart_update &&
6438 [ + + ]: 249 : TRIGGER_FIRED_BY_DELETE(event) &&
6439 [ + + ]: 132 : trigger->tgisclone)
6440 : 123 : continue;
6441 : :
6442 : : /* Update or delete on trigger's PK table */
4827 tgl@sss.pgh.pa.us 6443 [ + + ]: 1166 : if (!RI_FKey_pk_upd_check_required(trigger, rel,
6444 : : oldslot, newslot))
6445 : : {
6446 : : /* skip queuing this event */
7404 neilc@samurai.com 6447 : 271 : continue;
6448 : : }
7666 tgl@sss.pgh.pa.us 6449 : 895 : break;
6450 : :
7404 neilc@samurai.com 6451 : 597 : case RI_TRIGGER_FK:
6452 : :
6453 : : /*
6454 : : * Update on trigger's FK table. We can skip the update
6455 : : * event fired on a partitioned table during a
6456 : : * cross-partition of that table, because the insert event
6457 : : * that is fired on the destination leaf partition would
6458 : : * suffice to perform the necessary foreign key check.
6459 : : * Moreover, RI_FKey_fk_upd_check_required() expects to be
6460 : : * passed a tuple that contains system attributes, most of
6461 : : * which are not present in the virtual slot belonging to
6462 : : * a partitioned table.
6463 : : */
1266 alvherre@alvh.no-ip. 6464 [ + + ]: 597 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
6465 [ + + ]: 540 : !RI_FKey_fk_upd_check_required(trigger, rel,
6466 : : oldslot, newslot))
6467 : : {
6468 : : /* skip queuing this event */
7404 neilc@samurai.com 6469 : 364 : continue;
6470 : : }
7666 tgl@sss.pgh.pa.us 6471 : 233 : break;
6472 : :
7404 neilc@samurai.com 6473 : 1407 : case RI_TRIGGER_NONE:
6474 : :
6475 : : /*
6476 : : * Not an FK trigger. No need to queue the update event
6477 : : * fired during a cross-partitioned update of a
6478 : : * partitioned table, because the same row trigger must be
6479 : : * present in the leaf partition(s) that are affected as
6480 : : * part of this update and the events fired on them are
6481 : : * queued instead.
6482 : : */
1266 alvherre@alvh.no-ip. 6483 [ + + ]: 1407 : if (row_trigger &&
6484 [ + + ]: 1067 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6485 : 15 : continue;
7404 neilc@samurai.com 6486 : 1392 : break;
6487 : : }
6488 : : }
6489 : :
6490 : : /*
6491 : : * If the trigger is a deferred unique constraint check trigger, only
6492 : : * queue it if the unique constraint was potentially violated, which
6493 : : * we know from index insertion time.
6494 : : */
5883 tgl@sss.pgh.pa.us 6495 [ + + ]: 5995 : if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
6496 : : {
6497 [ + + ]: 105 : if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
6498 : 44 : continue; /* Uniqueness definitely not violated */
6499 : : }
6500 : :
6501 : : /*
6502 : : * Fill in event structure and add it to the current query's queue.
6503 : : * Note we set ats_table to NULL whenever this trigger doesn't use
6504 : : * transition tables, to improve sharability of the shared event data.
6505 : : */
6161 6506 : 5951 : new_shared.ats_event =
7666 6507 : 11902 : (event & TRIGGER_EVENT_OPMASK) |
6508 [ + + ]: 5951 : (row_trigger ? TRIGGER_EVENT_ROW : 0) |
6509 [ + + ]: 5951 : (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
6510 [ + + ]: 5951 : (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
6161 6511 : 5951 : new_shared.ats_tgoid = trigger->tgoid;
6512 : 5951 : new_shared.ats_relid = RelationGetRelid(rel);
226 6513 : 5951 : new_shared.ats_rolid = GetUserId();
6161 6514 : 5951 : new_shared.ats_firing_id = 0;
2912 6515 [ + + + + : 5951 : if ((trigger->tgoldtable || trigger->tgnewtable) &&
+ - ]
6516 : : transition_capture != NULL)
6517 : 318 : new_shared.ats_table = transition_capture->tcs_private;
6518 : : else
6519 : 5633 : new_shared.ats_table = NULL;
227 6520 : 5951 : new_shared.ats_modifiedcols = modifiedCols;
6521 : :
2912 6522 : 5951 : afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
6523 : : &new_event, &new_shared);
6524 : : }
6525 : :
6526 : : /*
6527 : : * Finally, spool any foreign tuple(s). The tuplestore squashes them to
6528 : : * minimal tuples, so this loses any system columns. The executor lost
6529 : : * those columns before us, for an unrelated reason, so this is fine.
6530 : : */
4185 noah@leadboat.com 6531 [ + + ]: 5471 : if (fdw_tuplestore)
6532 : : {
2384 andres@anarazel.de 6533 [ + + ]: 25 : if (oldslot != NULL)
6534 : 16 : tuplestore_puttupleslot(fdw_tuplestore, oldslot);
6535 [ + + ]: 25 : if (newslot != NULL)
6536 : 18 : tuplestore_puttupleslot(fdw_tuplestore, newslot);
6537 : : }
6538 : : }
6539 : :
6540 : : /*
6541 : : * Detect whether we already queued BEFORE STATEMENT triggers for the given
6542 : : * relation + operation, and set the flag so the next call will report "true".
6543 : : */
6544 : : static bool
2911 tgl@sss.pgh.pa.us 6545 : 255 : before_stmt_triggers_fired(Oid relid, CmdType cmdType)
6546 : : {
6547 : : bool result;
6548 : : AfterTriggersTableData *table;
6549 : :
6550 : : /* Check state, like AfterTriggerSaveEvent. */
6551 [ - + ]: 255 : if (afterTriggers.query_depth < 0)
2911 tgl@sss.pgh.pa.us 6552 [ # # ]:UBC 0 : elog(ERROR, "before_stmt_triggers_fired() called outside of query");
6553 : :
6554 : : /* Be sure we have enough space to record events at this query depth. */
2911 tgl@sss.pgh.pa.us 6555 [ + + ]:CBC 255 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6556 : 168 : AfterTriggerEnlargeQueryState();
6557 : :
6558 : : /*
6559 : : * We keep this state in the AfterTriggersTableData that also holds
6560 : : * transition tables for the relation + operation. In this way, if we are
6561 : : * forced to make a new set of transition tables because more tuples get
6562 : : * entered after we've already fired triggers, we will allow a new set of
6563 : : * statement triggers to get queued.
6564 : : */
6565 : 255 : table = GetAfterTriggersTableData(relid, cmdType);
6566 : 255 : result = table->before_trig_done;
6567 : 255 : table->before_trig_done = true;
6568 : 255 : return result;
6569 : : }
6570 : :
6571 : : /*
6572 : : * If we previously queued a set of AFTER STATEMENT triggers for the given
6573 : : * relation + operation, and they've not been fired yet, cancel them. The
6574 : : * caller will queue a fresh set that's after any row-level triggers that may
6575 : : * have been queued by the current sub-statement, preserving (as much as
6576 : : * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
6577 : : * triggers, and that the latter only fire once. This deals with the
6578 : : * situation where several FK enforcement triggers sequentially queue triggers
6579 : : * for the same table into the same trigger query level. We can't fully
6580 : : * prevent odd behavior though: if there are AFTER ROW triggers taking
6581 : : * transition tables, we don't want to change the transition tables once the
6582 : : * first such trigger has seen them. In such a case, any additional events
6583 : : * will result in creating new transition tables and allowing new firings of
6584 : : * statement triggers.
6585 : : *
6586 : : * This also saves the current event list location so that a later invocation
6587 : : * of this function can cheaply find the triggers we're about to queue and
6588 : : * cancel them.
6589 : : */
6590 : : static void
2912 6591 : 543 : cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
6592 : : {
6593 : : AfterTriggersTableData *table;
6594 : 543 : AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth];
6595 : :
6596 : : /*
6597 : : * We keep this state in the AfterTriggersTableData that also holds
6598 : : * transition tables for the relation + operation. In this way, if we are
6599 : : * forced to make a new set of transition tables because more tuples get
6600 : : * entered after we've already fired triggers, we will allow a new set of
6601 : : * statement triggers to get queued without canceling the old ones.
6602 : : */
6603 : 543 : table = GetAfterTriggersTableData(relid, cmdType);
6604 : :
2911 6605 [ + + ]: 543 : if (table->after_trig_done)
6606 : : {
6607 : : /*
6608 : : * We want to start scanning from the tail location that existed just
6609 : : * before we inserted any statement triggers. But the events list
6610 : : * might've been entirely empty then, in which case scan from the
6611 : : * current head.
6612 : : */
6613 : : AfterTriggerEvent event;
6614 : : AfterTriggerEventChunk *chunk;
6615 : :
6616 [ + + ]: 33 : if (table->after_trig_events.tail)
6617 : : {
6618 : 30 : chunk = table->after_trig_events.tail;
6619 : 30 : event = (AfterTriggerEvent) table->after_trig_events.tailfree;
6620 : : }
6621 : : else
6622 : : {
2912 6623 : 3 : chunk = qs->events.head;
6624 : 3 : event = NULL;
6625 : : }
6626 : :
6627 [ + + ]: 48 : for_each_chunk_from(chunk)
6628 : : {
6629 [ + + ]: 33 : if (event == NULL)
6630 : 3 : event = (AfterTriggerEvent) CHUNK_DATA_START(chunk);
6631 [ + - + - : 69 : for_each_event_from(event, chunk)
+ + ]
6632 : : {
6633 : 54 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
6634 : :
6635 : : /*
6636 : : * Exit loop when we reach events that aren't AS triggers for
6637 : : * the target relation.
6638 : : */
6639 [ - + ]: 54 : if (evtshared->ats_relid != relid)
2912 tgl@sss.pgh.pa.us 6640 :UBC 0 : goto done;
2912 tgl@sss.pgh.pa.us 6641 [ - + ]:CBC 54 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent)
2912 tgl@sss.pgh.pa.us 6642 :UBC 0 : goto done;
2912 tgl@sss.pgh.pa.us 6643 [ + + ]:CBC 54 : if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event))
6644 : 18 : goto done;
6645 [ - + ]: 36 : if (!TRIGGER_FIRED_AFTER(evtshared->ats_event))
2912 tgl@sss.pgh.pa.us 6646 :UBC 0 : goto done;
6647 : : /* OK, mark it DONE */
2912 tgl@sss.pgh.pa.us 6648 :CBC 36 : event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
6649 [ + - ]: 36 : event->ate_flags |= AFTER_TRIGGER_DONE;
6650 : : }
6651 : : /* signal we must reinitialize event ptr for next chunk */
6652 : 15 : event = NULL;
6653 : : }
6654 : : }
6655 : 525 : done:
6656 : :
6657 : : /* In any case, save current insertion point for next time */
2911 6658 : 543 : table->after_trig_done = true;
6659 : 543 : table->after_trig_events = qs->events;
2912 6660 : 543 : }
6661 : :
6662 : : /*
6663 : : * GUC assign_hook for session_replication_role
6664 : : */
6665 : : void
1089 6666 : 1610 : assign_session_replication_role(int newval, void *extra)
6667 : : {
6668 : : /*
6669 : : * Must flush the plan cache when changing replication role; but don't
6670 : : * flush unnecessarily.
6671 : : */
6672 [ + + ]: 1610 : if (SessionReplicationRole != newval)
6673 : 543 : ResetPlanCache();
6674 : 1610 : }
6675 : :
6676 : : /*
6677 : : * SQL function pg_trigger_depth()
6678 : : */
6679 : : Datum
4973 alvherre@alvh.no-ip. 6680 : 45 : pg_trigger_depth(PG_FUNCTION_ARGS)
6681 : : {
6682 : 45 : PG_RETURN_INT32(MyTriggerDepth);
6683 : : }
6684 : :
6685 : : /*
6686 : : * Check whether a trigger modified a virtual generated column and replace the
6687 : : * value with null if so.
6688 : : *
6689 : : * We need to check this so that we don't end up storing a non-null value in a
6690 : : * virtual generated column.
6691 : : *
6692 : : * We don't need to check for stored generated columns, since those will be
6693 : : * overwritten later anyway.
6694 : : */
6695 : : static HeapTuple
211 peter@eisentraut.org 6696 : 1024 : check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple)
6697 : : {
6698 [ + + + + ]: 1024 : if (!(tupdesc->constr && tupdesc->constr->has_generated_virtual))
6699 : 1015 : return tuple;
6700 : :
6701 [ + + ]: 33 : for (int i = 0; i < tupdesc->natts; i++)
6702 : : {
6703 [ + + ]: 24 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
6704 : : {
6705 [ + + ]: 9 : if (!heap_attisnull(tuple, i + 1, tupdesc))
6706 : : {
6707 : 6 : int replCol = i + 1;
6708 : 6 : Datum replValue = 0;
6709 : 6 : bool replIsnull = true;
6710 : :
6711 : 6 : tuple = heap_modify_tuple_by_cols(tuple, tupdesc, 1, &replCol, &replValue, &replIsnull);
6712 : : }
6713 : : }
6714 : : }
6715 : :
6716 : 9 : return tuple;
6717 : : }
|