/src/postgres/src/backend/commands/trigger.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * trigger.c |
4 | | * PostgreSQL TRIGGERs support code. |
5 | | * |
6 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
7 | | * Portions Copyright (c) 1994, Regents of the University of California |
8 | | * |
9 | | * IDENTIFICATION |
10 | | * src/backend/commands/trigger.c |
11 | | * |
12 | | *------------------------------------------------------------------------- |
13 | | */ |
14 | | #include "postgres.h" |
15 | | |
16 | | #include "access/genam.h" |
17 | | #include "access/htup_details.h" |
18 | | #include "access/relation.h" |
19 | | #include "access/sysattr.h" |
20 | | #include "access/table.h" |
21 | | #include "access/tableam.h" |
22 | | #include "access/xact.h" |
23 | | #include "catalog/catalog.h" |
24 | | #include "catalog/dependency.h" |
25 | | #include "catalog/indexing.h" |
26 | | #include "catalog/objectaccess.h" |
27 | | #include "catalog/partition.h" |
28 | | #include "catalog/pg_constraint.h" |
29 | | #include "catalog/pg_inherits.h" |
30 | | #include "catalog/pg_proc.h" |
31 | | #include "catalog/pg_trigger.h" |
32 | | #include "catalog/pg_type.h" |
33 | | #include "commands/dbcommands.h" |
34 | | #include "commands/trigger.h" |
35 | | #include "executor/executor.h" |
36 | | #include "miscadmin.h" |
37 | | #include "nodes/bitmapset.h" |
38 | | #include "nodes/makefuncs.h" |
39 | | #include "optimizer/optimizer.h" |
40 | | #include "parser/parse_clause.h" |
41 | | #include "parser/parse_collate.h" |
42 | | #include "parser/parse_func.h" |
43 | | #include "parser/parse_relation.h" |
44 | | #include "partitioning/partdesc.h" |
45 | | #include "pgstat.h" |
46 | | #include "rewrite/rewriteHandler.h" |
47 | | #include "rewrite/rewriteManip.h" |
48 | | #include "storage/lmgr.h" |
49 | | #include "utils/acl.h" |
50 | | #include "utils/builtins.h" |
51 | | #include "utils/fmgroids.h" |
52 | | #include "utils/guc_hooks.h" |
53 | | #include "utils/inval.h" |
54 | | #include "utils/lsyscache.h" |
55 | | #include "utils/memutils.h" |
56 | | #include "utils/plancache.h" |
57 | | #include "utils/rel.h" |
58 | | #include "utils/snapmgr.h" |
59 | | #include "utils/syscache.h" |
60 | | #include "utils/tuplestore.h" |
61 | | |
62 | | |
63 | | /* GUC variables */ |
64 | | int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN; |
65 | | |
66 | | /* How many levels deep into trigger execution are we? */ |
67 | | static int MyTriggerDepth = 0; |
68 | | |
69 | | /* Local function prototypes */ |
70 | | static void renametrig_internal(Relation tgrel, Relation targetrel, |
71 | | HeapTuple trigtup, const char *newname, |
72 | | const char *expected_name); |
73 | | static void renametrig_partition(Relation tgrel, Oid partitionId, |
74 | | Oid parentTriggerOid, const char *newname, |
75 | | const char *expected_name); |
76 | | static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger); |
77 | | static bool GetTupleForTrigger(EState *estate, |
78 | | EPQState *epqstate, |
79 | | ResultRelInfo *relinfo, |
80 | | ItemPointer tid, |
81 | | LockTupleMode lockmode, |
82 | | TupleTableSlot *oldslot, |
83 | | TupleTableSlot **epqslot, |
84 | | TM_Result *tmresultp, |
85 | | TM_FailureData *tmfdp); |
86 | | static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo, |
87 | | Trigger *trigger, TriggerEvent event, |
88 | | Bitmapset *modifiedCols, |
89 | | TupleTableSlot *oldslot, TupleTableSlot *newslot); |
90 | | static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata, |
91 | | int tgindx, |
92 | | FmgrInfo *finfo, |
93 | | Instrumentation *instr, |
94 | | MemoryContext per_tuple_context); |
95 | | static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, |
96 | | ResultRelInfo *src_partinfo, |
97 | | ResultRelInfo *dst_partinfo, |
98 | | int event, bool row_trigger, |
99 | | TupleTableSlot *oldslot, TupleTableSlot *newslot, |
100 | | List *recheckIndexes, Bitmapset *modifiedCols, |
101 | | TransitionCaptureState *transition_capture, |
102 | | bool is_crosspart_update); |
103 | | static void AfterTriggerEnlargeQueryState(void); |
104 | | static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType); |
105 | | static HeapTuple check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple); |
106 | | |
107 | | |
108 | | /* |
109 | | * Create a trigger. Returns the address of the created trigger. |
110 | | * |
111 | | * queryString is the source text of the CREATE TRIGGER command. |
112 | | * This must be supplied if a whenClause is specified, else it can be NULL. |
113 | | * |
114 | | * relOid, if nonzero, is the relation on which the trigger should be |
115 | | * created. If zero, the name provided in the statement will be looked up. |
116 | | * |
117 | | * refRelOid, if nonzero, is the relation to which the constraint trigger |
118 | | * refers. If zero, the constraint relation name provided in the statement |
119 | | * will be looked up as needed. |
120 | | * |
121 | | * constraintOid, if nonzero, says that this trigger is being created |
122 | | * internally to implement that constraint. A suitable pg_depend entry will |
123 | | * be made to link the trigger to that constraint. constraintOid is zero when |
124 | | * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT |
125 | | * TRIGGER, we build a pg_constraint entry internally.) |
126 | | * |
127 | | * indexOid, if nonzero, is the OID of an index associated with the constraint. |
128 | | * We do nothing with this except store it into pg_trigger.tgconstrindid; |
129 | | * but when creating a trigger for a deferrable unique constraint on a |
130 | | * partitioned table, its children are looked up. Note we don't cope with |
131 | | * invalid indexes in that case. |
132 | | * |
133 | | * funcoid, if nonzero, is the OID of the function to invoke. When this is |
134 | | * given, stmt->funcname is ignored. |
135 | | * |
136 | | * parentTriggerOid, if nonzero, is a trigger that begets this one; so that |
137 | | * if that trigger is dropped, this one should be too. There are two cases |
138 | | * when a nonzero value is passed for this: 1) when this function recurses to |
139 | | * create the trigger on partitions, 2) when creating child foreign key |
140 | | * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers(). |
141 | | * |
142 | | * If whenClause is passed, it is an already-transformed expression for |
143 | | * WHEN. In this case, we ignore any that may come in stmt->whenClause. |
144 | | * |
145 | | * If isInternal is true then this is an internally-generated trigger. |
146 | | * This argument sets the tgisinternal field of the pg_trigger entry, and |
147 | | * if true causes us to modify the given trigger name to ensure uniqueness. |
148 | | * |
149 | | * When isInternal is not true we require ACL_TRIGGER permissions on the |
150 | | * relation, as well as ACL_EXECUTE on the trigger function. For internal |
151 | | * triggers the caller must apply any required permission checks. |
152 | | * |
153 | | * When called on partitioned tables, this function recurses to create the |
154 | | * trigger on all the partitions, except if isInternal is true, in which |
155 | | * case caller is expected to execute recursion on its own. in_partition |
156 | | * indicates such a recursive call; outside callers should pass "false" |
157 | | * (but see CloneRowTriggersToPartition). |
158 | | */ |
159 | | ObjectAddress |
160 | | CreateTrigger(CreateTrigStmt *stmt, const char *queryString, |
161 | | Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid, |
162 | | Oid funcoid, Oid parentTriggerOid, Node *whenClause, |
163 | | bool isInternal, bool in_partition) |
164 | 0 | { |
165 | 0 | return |
166 | 0 | CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid, |
167 | 0 | constraintOid, indexOid, funcoid, |
168 | 0 | parentTriggerOid, whenClause, isInternal, |
169 | 0 | in_partition, TRIGGER_FIRES_ON_ORIGIN); |
170 | 0 | } |
171 | | |
172 | | /* |
173 | | * Like the above; additionally the firing condition |
174 | | * (always/origin/replica/disabled) can be specified. |
175 | | */ |
176 | | ObjectAddress |
177 | | CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString, |
178 | | Oid relOid, Oid refRelOid, Oid constraintOid, |
179 | | Oid indexOid, Oid funcoid, Oid parentTriggerOid, |
180 | | Node *whenClause, bool isInternal, bool in_partition, |
181 | | char trigger_fires_when) |
182 | 0 | { |
183 | 0 | int16 tgtype; |
184 | 0 | int ncolumns; |
185 | 0 | int16 *columns; |
186 | 0 | int2vector *tgattr; |
187 | 0 | List *whenRtable; |
188 | 0 | char *qual; |
189 | 0 | Datum values[Natts_pg_trigger]; |
190 | 0 | bool nulls[Natts_pg_trigger]; |
191 | 0 | Relation rel; |
192 | 0 | AclResult aclresult; |
193 | 0 | Relation tgrel; |
194 | 0 | Relation pgrel; |
195 | 0 | HeapTuple tuple = NULL; |
196 | 0 | Oid funcrettype; |
197 | 0 | Oid trigoid = InvalidOid; |
198 | 0 | char internaltrigname[NAMEDATALEN]; |
199 | 0 | char *trigname; |
200 | 0 | Oid constrrelid = InvalidOid; |
201 | 0 | ObjectAddress myself, |
202 | 0 | referenced; |
203 | 0 | char *oldtablename = NULL; |
204 | 0 | char *newtablename = NULL; |
205 | 0 | bool partition_recurse; |
206 | 0 | bool trigger_exists = false; |
207 | 0 | Oid existing_constraint_oid = InvalidOid; |
208 | 0 | bool existing_isInternal = false; |
209 | 0 | bool existing_isClone = false; |
210 | |
|
211 | 0 | if (OidIsValid(relOid)) |
212 | 0 | rel = table_open(relOid, ShareRowExclusiveLock); |
213 | 0 | else |
214 | 0 | rel = table_openrv(stmt->relation, ShareRowExclusiveLock); |
215 | | |
216 | | /* |
217 | | * Triggers must be on tables or views, and there are additional |
218 | | * relation-type-specific restrictions. |
219 | | */ |
220 | 0 | if (rel->rd_rel->relkind == RELKIND_RELATION) |
221 | 0 | { |
222 | | /* Tables can't have INSTEAD OF triggers */ |
223 | 0 | if (stmt->timing != TRIGGER_TYPE_BEFORE && |
224 | 0 | stmt->timing != TRIGGER_TYPE_AFTER) |
225 | 0 | ereport(ERROR, |
226 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
227 | 0 | errmsg("\"%s\" is a table", |
228 | 0 | RelationGetRelationName(rel)), |
229 | 0 | errdetail("Tables cannot have INSTEAD OF triggers."))); |
230 | 0 | } |
231 | 0 | else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
232 | 0 | { |
233 | | /* Partitioned tables can't have INSTEAD OF triggers */ |
234 | 0 | if (stmt->timing != TRIGGER_TYPE_BEFORE && |
235 | 0 | stmt->timing != TRIGGER_TYPE_AFTER) |
236 | 0 | ereport(ERROR, |
237 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
238 | 0 | errmsg("\"%s\" is a table", |
239 | 0 | RelationGetRelationName(rel)), |
240 | 0 | errdetail("Tables cannot have INSTEAD OF triggers."))); |
241 | | |
242 | | /* |
243 | | * FOR EACH ROW triggers have further restrictions |
244 | | */ |
245 | 0 | if (stmt->row) |
246 | 0 | { |
247 | | /* |
248 | | * Disallow use of transition tables. |
249 | | * |
250 | | * Note that we have another restriction about transition tables |
251 | | * in partitions; search for 'has_superclass' below for an |
252 | | * explanation. The check here is just to protect from the fact |
253 | | * that if we allowed it here, the creation would succeed for a |
254 | | * partitioned table with no partitions, but would be blocked by |
255 | | * the other restriction when the first partition was created, |
256 | | * which is very unfriendly behavior. |
257 | | */ |
258 | 0 | if (stmt->transitionRels != NIL) |
259 | 0 | ereport(ERROR, |
260 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
261 | 0 | errmsg("\"%s\" is a partitioned table", |
262 | 0 | RelationGetRelationName(rel)), |
263 | 0 | errdetail("ROW triggers with transition tables are not supported on partitioned tables."))); |
264 | 0 | } |
265 | 0 | } |
266 | 0 | else if (rel->rd_rel->relkind == RELKIND_VIEW) |
267 | 0 | { |
268 | | /* |
269 | | * Views can have INSTEAD OF triggers (which we check below are |
270 | | * row-level), or statement-level BEFORE/AFTER triggers. |
271 | | */ |
272 | 0 | if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row) |
273 | 0 | ereport(ERROR, |
274 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
275 | 0 | errmsg("\"%s\" is a view", |
276 | 0 | RelationGetRelationName(rel)), |
277 | 0 | errdetail("Views cannot have row-level BEFORE or AFTER triggers."))); |
278 | | /* Disallow TRUNCATE triggers on VIEWs */ |
279 | 0 | if (TRIGGER_FOR_TRUNCATE(stmt->events)) |
280 | 0 | ereport(ERROR, |
281 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
282 | 0 | errmsg("\"%s\" is a view", |
283 | 0 | RelationGetRelationName(rel)), |
284 | 0 | errdetail("Views cannot have TRUNCATE triggers."))); |
285 | 0 | } |
286 | 0 | else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
287 | 0 | { |
288 | 0 | if (stmt->timing != TRIGGER_TYPE_BEFORE && |
289 | 0 | stmt->timing != TRIGGER_TYPE_AFTER) |
290 | 0 | ereport(ERROR, |
291 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
292 | 0 | errmsg("\"%s\" is a foreign table", |
293 | 0 | RelationGetRelationName(rel)), |
294 | 0 | errdetail("Foreign tables cannot have INSTEAD OF triggers."))); |
295 | | |
296 | | /* |
297 | | * We disallow constraint triggers to protect the assumption that |
298 | | * triggers on FKs can't be deferred. See notes with AfterTriggers |
299 | | * data structures, below. |
300 | | */ |
301 | 0 | if (stmt->isconstraint) |
302 | 0 | ereport(ERROR, |
303 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
304 | 0 | errmsg("\"%s\" is a foreign table", |
305 | 0 | RelationGetRelationName(rel)), |
306 | 0 | errdetail("Foreign tables cannot have constraint triggers."))); |
307 | 0 | } |
308 | 0 | else |
309 | 0 | ereport(ERROR, |
310 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
311 | 0 | errmsg("relation \"%s\" cannot have triggers", |
312 | 0 | RelationGetRelationName(rel)), |
313 | 0 | errdetail_relkind_not_supported(rel->rd_rel->relkind))); |
314 | | |
315 | 0 | if (!allowSystemTableMods && IsSystemRelation(rel)) |
316 | 0 | ereport(ERROR, |
317 | 0 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
318 | 0 | errmsg("permission denied: \"%s\" is a system catalog", |
319 | 0 | RelationGetRelationName(rel)))); |
320 | | |
321 | 0 | if (stmt->isconstraint) |
322 | 0 | { |
323 | | /* |
324 | | * We must take a lock on the target relation to protect against |
325 | | * concurrent drop. It's not clear that AccessShareLock is strong |
326 | | * enough, but we certainly need at least that much... otherwise, we |
327 | | * might end up creating a pg_constraint entry referencing a |
328 | | * nonexistent table. |
329 | | */ |
330 | 0 | if (OidIsValid(refRelOid)) |
331 | 0 | { |
332 | 0 | LockRelationOid(refRelOid, AccessShareLock); |
333 | 0 | constrrelid = refRelOid; |
334 | 0 | } |
335 | 0 | else if (stmt->constrrel != NULL) |
336 | 0 | constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, |
337 | 0 | false); |
338 | 0 | } |
339 | | |
340 | | /* permission checks */ |
341 | 0 | if (!isInternal) |
342 | 0 | { |
343 | 0 | aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), |
344 | 0 | ACL_TRIGGER); |
345 | 0 | if (aclresult != ACLCHECK_OK) |
346 | 0 | aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), |
347 | 0 | RelationGetRelationName(rel)); |
348 | |
|
349 | 0 | if (OidIsValid(constrrelid)) |
350 | 0 | { |
351 | 0 | aclresult = pg_class_aclcheck(constrrelid, GetUserId(), |
352 | 0 | ACL_TRIGGER); |
353 | 0 | if (aclresult != ACLCHECK_OK) |
354 | 0 | aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)), |
355 | 0 | get_rel_name(constrrelid)); |
356 | 0 | } |
357 | 0 | } |
358 | | |
359 | | /* |
360 | | * When called on a partitioned table to create a FOR EACH ROW trigger |
361 | | * that's not internal, we create one trigger for each partition, too. |
362 | | * |
363 | | * For that, we'd better hold lock on all of them ahead of time. |
364 | | */ |
365 | 0 | partition_recurse = !isInternal && stmt->row && |
366 | 0 | rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE; |
367 | 0 | if (partition_recurse) |
368 | 0 | list_free(find_all_inheritors(RelationGetRelid(rel), |
369 | 0 | ShareRowExclusiveLock, NULL)); |
370 | | |
371 | | /* Compute tgtype */ |
372 | 0 | TRIGGER_CLEAR_TYPE(tgtype); |
373 | 0 | if (stmt->row) |
374 | 0 | TRIGGER_SETT_ROW(tgtype); |
375 | 0 | tgtype |= stmt->timing; |
376 | 0 | tgtype |= stmt->events; |
377 | | |
378 | | /* Disallow ROW-level TRUNCATE triggers */ |
379 | 0 | if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype)) |
380 | 0 | ereport(ERROR, |
381 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
382 | 0 | errmsg("TRUNCATE FOR EACH ROW triggers are not supported"))); |
383 | | |
384 | | /* INSTEAD triggers must be row-level, and can't have WHEN or columns */ |
385 | 0 | if (TRIGGER_FOR_INSTEAD(tgtype)) |
386 | 0 | { |
387 | 0 | if (!TRIGGER_FOR_ROW(tgtype)) |
388 | 0 | ereport(ERROR, |
389 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
390 | 0 | errmsg("INSTEAD OF triggers must be FOR EACH ROW"))); |
391 | 0 | if (stmt->whenClause) |
392 | 0 | ereport(ERROR, |
393 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
394 | 0 | errmsg("INSTEAD OF triggers cannot have WHEN conditions"))); |
395 | 0 | if (stmt->columns != NIL) |
396 | 0 | ereport(ERROR, |
397 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
398 | 0 | errmsg("INSTEAD OF triggers cannot have column lists"))); |
399 | 0 | } |
400 | | |
401 | | /* |
402 | | * We don't yet support naming ROW transition variables, but the parser |
403 | | * recognizes the syntax so we can give a nicer message here. |
404 | | * |
405 | | * Per standard, REFERENCING TABLE names are only allowed on AFTER |
406 | | * triggers. Per standard, REFERENCING ROW names are not allowed with FOR |
407 | | * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is |
408 | | * only allowed once. Per standard, OLD may not be specified when |
409 | | * creating a trigger only for INSERT, and NEW may not be specified when |
410 | | * creating a trigger only for DELETE. |
411 | | * |
412 | | * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to |
413 | | * reference both ROW and TABLE transition data. |
414 | | */ |
415 | 0 | if (stmt->transitionRels != NIL) |
416 | 0 | { |
417 | 0 | List *varList = stmt->transitionRels; |
418 | 0 | ListCell *lc; |
419 | |
|
420 | 0 | foreach(lc, varList) |
421 | 0 | { |
422 | 0 | TriggerTransition *tt = lfirst_node(TriggerTransition, lc); |
423 | |
|
424 | 0 | if (!(tt->isTable)) |
425 | 0 | ereport(ERROR, |
426 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
427 | 0 | errmsg("ROW variable naming in the REFERENCING clause is not supported"), |
428 | 0 | errhint("Use OLD TABLE or NEW TABLE for naming transition tables."))); |
429 | | |
430 | | /* |
431 | | * Because of the above test, we omit further ROW-related testing |
432 | | * below. If we later allow naming OLD and NEW ROW variables, |
433 | | * adjustments will be needed below. |
434 | | */ |
435 | | |
436 | 0 | if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
437 | 0 | ereport(ERROR, |
438 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
439 | 0 | errmsg("\"%s\" is a foreign table", |
440 | 0 | RelationGetRelationName(rel)), |
441 | 0 | errdetail("Triggers on foreign tables cannot have transition tables."))); |
442 | | |
443 | 0 | if (rel->rd_rel->relkind == RELKIND_VIEW) |
444 | 0 | ereport(ERROR, |
445 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
446 | 0 | errmsg("\"%s\" is a view", |
447 | 0 | RelationGetRelationName(rel)), |
448 | 0 | errdetail("Triggers on views cannot have transition tables."))); |
449 | | |
450 | | /* |
451 | | * We currently don't allow row-level triggers with transition |
452 | | * tables on partition or inheritance children. Such triggers |
453 | | * would somehow need to see tuples converted to the format of the |
454 | | * table they're attached to, and it's not clear which subset of |
455 | | * tuples each child should see. See also the prohibitions in |
456 | | * ATExecAttachPartition() and ATExecAddInherit(). |
457 | | */ |
458 | 0 | if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id)) |
459 | 0 | { |
460 | | /* Use appropriate error message. */ |
461 | 0 | if (rel->rd_rel->relispartition) |
462 | 0 | ereport(ERROR, |
463 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
464 | 0 | errmsg("ROW triggers with transition tables are not supported on partitions"))); |
465 | 0 | else |
466 | 0 | ereport(ERROR, |
467 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
468 | 0 | errmsg("ROW triggers with transition tables are not supported on inheritance children"))); |
469 | 0 | } |
470 | | |
471 | 0 | if (stmt->timing != TRIGGER_TYPE_AFTER) |
472 | 0 | ereport(ERROR, |
473 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
474 | 0 | errmsg("transition table name can only be specified for an AFTER trigger"))); |
475 | | |
476 | 0 | if (TRIGGER_FOR_TRUNCATE(tgtype)) |
477 | 0 | ereport(ERROR, |
478 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
479 | 0 | errmsg("TRUNCATE triggers with transition tables are not supported"))); |
480 | | |
481 | | /* |
482 | | * We currently don't allow multi-event triggers ("INSERT OR |
483 | | * UPDATE") with transition tables, because it's not clear how to |
484 | | * handle INSERT ... ON CONFLICT statements which can fire both |
485 | | * INSERT and UPDATE triggers. We show the inserted tuples to |
486 | | * INSERT triggers and the updated tuples to UPDATE triggers, but |
487 | | * it's not yet clear what INSERT OR UPDATE trigger should see. |
488 | | * This restriction could be lifted if we can decide on the right |
489 | | * semantics in a later release. |
490 | | */ |
491 | 0 | if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) + |
492 | 0 | (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) + |
493 | 0 | (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1) |
494 | 0 | ereport(ERROR, |
495 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
496 | 0 | errmsg("transition tables cannot be specified for triggers with more than one event"))); |
497 | | |
498 | | /* |
499 | | * We currently don't allow column-specific triggers with |
500 | | * transition tables. Per spec, that seems to require |
501 | | * accumulating separate transition tables for each combination of |
502 | | * columns, which is a lot of work for a rather marginal feature. |
503 | | */ |
504 | 0 | if (stmt->columns != NIL) |
505 | 0 | ereport(ERROR, |
506 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
507 | 0 | errmsg("transition tables cannot be specified for triggers with column lists"))); |
508 | | |
509 | | /* |
510 | | * We disallow constraint triggers with transition tables, to |
511 | | * protect the assumption that such triggers can't be deferred. |
512 | | * See notes with AfterTriggers data structures, below. |
513 | | * |
514 | | * Currently this is enforced by the grammar, so just Assert here. |
515 | | */ |
516 | 0 | Assert(!stmt->isconstraint); |
517 | |
|
518 | 0 | if (tt->isNew) |
519 | 0 | { |
520 | 0 | if (!(TRIGGER_FOR_INSERT(tgtype) || |
521 | 0 | TRIGGER_FOR_UPDATE(tgtype))) |
522 | 0 | ereport(ERROR, |
523 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
524 | 0 | errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger"))); |
525 | | |
526 | 0 | if (newtablename != NULL) |
527 | 0 | ereport(ERROR, |
528 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
529 | 0 | errmsg("NEW TABLE cannot be specified multiple times"))); |
530 | | |
531 | 0 | newtablename = tt->name; |
532 | 0 | } |
533 | 0 | else |
534 | 0 | { |
535 | 0 | if (!(TRIGGER_FOR_DELETE(tgtype) || |
536 | 0 | TRIGGER_FOR_UPDATE(tgtype))) |
537 | 0 | ereport(ERROR, |
538 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
539 | 0 | errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger"))); |
540 | | |
541 | 0 | if (oldtablename != NULL) |
542 | 0 | ereport(ERROR, |
543 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
544 | 0 | errmsg("OLD TABLE cannot be specified multiple times"))); |
545 | | |
546 | 0 | oldtablename = tt->name; |
547 | 0 | } |
548 | 0 | } |
549 | | |
550 | 0 | if (newtablename != NULL && oldtablename != NULL && |
551 | 0 | strcmp(newtablename, oldtablename) == 0) |
552 | 0 | ereport(ERROR, |
553 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
554 | 0 | errmsg("OLD TABLE name and NEW TABLE name cannot be the same"))); |
555 | 0 | } |
556 | | |
557 | | /* |
558 | | * Parse the WHEN clause, if any and we weren't passed an already |
559 | | * transformed one. |
560 | | * |
561 | | * Note that as a side effect, we fill whenRtable when parsing. If we got |
562 | | * an already parsed clause, this does not occur, which is what we want -- |
563 | | * no point in adding redundant dependencies below. |
564 | | */ |
565 | 0 | if (!whenClause && stmt->whenClause) |
566 | 0 | { |
567 | 0 | ParseState *pstate; |
568 | 0 | ParseNamespaceItem *nsitem; |
569 | 0 | List *varList; |
570 | 0 | ListCell *lc; |
571 | | |
572 | | /* Set up a pstate to parse with */ |
573 | 0 | pstate = make_parsestate(NULL); |
574 | 0 | pstate->p_sourcetext = queryString; |
575 | | |
576 | | /* |
577 | | * Set up nsitems for OLD and NEW references. |
578 | | * |
579 | | * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2. |
580 | | */ |
581 | 0 | nsitem = addRangeTableEntryForRelation(pstate, rel, |
582 | 0 | AccessShareLock, |
583 | 0 | makeAlias("old", NIL), |
584 | 0 | false, false); |
585 | 0 | addNSItemToQuery(pstate, nsitem, false, true, true); |
586 | 0 | nsitem = addRangeTableEntryForRelation(pstate, rel, |
587 | 0 | AccessShareLock, |
588 | 0 | makeAlias("new", NIL), |
589 | 0 | false, false); |
590 | 0 | addNSItemToQuery(pstate, nsitem, false, true, true); |
591 | | |
592 | | /* Transform expression. Copy to be sure we don't modify original */ |
593 | 0 | whenClause = transformWhereClause(pstate, |
594 | 0 | copyObject(stmt->whenClause), |
595 | 0 | EXPR_KIND_TRIGGER_WHEN, |
596 | 0 | "WHEN"); |
597 | | /* we have to fix its collations too */ |
598 | 0 | assign_expr_collations(pstate, whenClause); |
599 | | |
600 | | /* |
601 | | * Check for disallowed references to OLD/NEW. |
602 | | * |
603 | | * NB: pull_var_clause is okay here only because we don't allow |
604 | | * subselects in WHEN clauses; it would fail to examine the contents |
605 | | * of subselects. |
606 | | */ |
607 | 0 | varList = pull_var_clause(whenClause, 0); |
608 | 0 | foreach(lc, varList) |
609 | 0 | { |
610 | 0 | Var *var = (Var *) lfirst(lc); |
611 | |
|
612 | 0 | switch (var->varno) |
613 | 0 | { |
614 | 0 | case PRS2_OLD_VARNO: |
615 | 0 | if (!TRIGGER_FOR_ROW(tgtype)) |
616 | 0 | ereport(ERROR, |
617 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
618 | 0 | errmsg("statement trigger's WHEN condition cannot reference column values"), |
619 | 0 | parser_errposition(pstate, var->location))); |
620 | 0 | if (TRIGGER_FOR_INSERT(tgtype)) |
621 | 0 | ereport(ERROR, |
622 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
623 | 0 | errmsg("INSERT trigger's WHEN condition cannot reference OLD values"), |
624 | 0 | parser_errposition(pstate, var->location))); |
625 | | /* system columns are okay here */ |
626 | 0 | break; |
627 | 0 | case PRS2_NEW_VARNO: |
628 | 0 | if (!TRIGGER_FOR_ROW(tgtype)) |
629 | 0 | ereport(ERROR, |
630 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
631 | 0 | errmsg("statement trigger's WHEN condition cannot reference column values"), |
632 | 0 | parser_errposition(pstate, var->location))); |
633 | 0 | if (TRIGGER_FOR_DELETE(tgtype)) |
634 | 0 | ereport(ERROR, |
635 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
636 | 0 | errmsg("DELETE trigger's WHEN condition cannot reference NEW values"), |
637 | 0 | parser_errposition(pstate, var->location))); |
638 | 0 | if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype)) |
639 | 0 | ereport(ERROR, |
640 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
641 | 0 | errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"), |
642 | 0 | parser_errposition(pstate, var->location))); |
643 | 0 | if (TRIGGER_FOR_BEFORE(tgtype) && |
644 | 0 | var->varattno == 0 && |
645 | 0 | RelationGetDescr(rel)->constr && |
646 | 0 | (RelationGetDescr(rel)->constr->has_generated_stored || |
647 | 0 | RelationGetDescr(rel)->constr->has_generated_virtual)) |
648 | 0 | ereport(ERROR, |
649 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
650 | 0 | errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"), |
651 | 0 | errdetail("A whole-row reference is used and the table contains generated columns."), |
652 | 0 | parser_errposition(pstate, var->location))); |
653 | 0 | if (TRIGGER_FOR_BEFORE(tgtype) && |
654 | 0 | var->varattno > 0 && |
655 | 0 | TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated) |
656 | 0 | ereport(ERROR, |
657 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
658 | 0 | errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"), |
659 | 0 | errdetail("Column \"%s\" is a generated column.", |
660 | 0 | NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)), |
661 | 0 | parser_errposition(pstate, var->location))); |
662 | 0 | break; |
663 | 0 | default: |
664 | | /* can't happen without add_missing_from, so just elog */ |
665 | 0 | elog(ERROR, "trigger WHEN condition cannot contain references to other relations"); |
666 | 0 | break; |
667 | 0 | } |
668 | 0 | } |
669 | | |
670 | | /* we'll need the rtable for recordDependencyOnExpr */ |
671 | 0 | whenRtable = pstate->p_rtable; |
672 | |
|
673 | 0 | qual = nodeToString(whenClause); |
674 | |
|
675 | 0 | free_parsestate(pstate); |
676 | 0 | } |
677 | 0 | else if (!whenClause) |
678 | 0 | { |
679 | 0 | whenClause = NULL; |
680 | 0 | whenRtable = NIL; |
681 | 0 | qual = NULL; |
682 | 0 | } |
683 | 0 | else |
684 | 0 | { |
685 | 0 | qual = nodeToString(whenClause); |
686 | 0 | whenRtable = NIL; |
687 | 0 | } |
688 | | |
689 | | /* |
690 | | * Find and validate the trigger function. |
691 | | */ |
692 | 0 | if (!OidIsValid(funcoid)) |
693 | 0 | funcoid = LookupFuncName(stmt->funcname, 0, NULL, false); |
694 | 0 | if (!isInternal) |
695 | 0 | { |
696 | 0 | aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE); |
697 | 0 | if (aclresult != ACLCHECK_OK) |
698 | 0 | aclcheck_error(aclresult, OBJECT_FUNCTION, |
699 | 0 | NameListToString(stmt->funcname)); |
700 | 0 | } |
701 | 0 | funcrettype = get_func_rettype(funcoid); |
702 | 0 | if (funcrettype != TRIGGEROID) |
703 | 0 | ereport(ERROR, |
704 | 0 | (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), |
705 | 0 | errmsg("function %s must return type %s", |
706 | 0 | NameListToString(stmt->funcname), "trigger"))); |
707 | | |
708 | | /* |
709 | | * Scan pg_trigger to see if there is already a trigger of the same name. |
710 | | * Skip this for internally generated triggers, since we'll modify the |
711 | | * name to be unique below. |
712 | | * |
713 | | * NOTE that this is cool only because we have ShareRowExclusiveLock on |
714 | | * the relation, so the trigger set won't be changing underneath us. |
715 | | */ |
716 | 0 | tgrel = table_open(TriggerRelationId, RowExclusiveLock); |
717 | 0 | if (!isInternal) |
718 | 0 | { |
719 | 0 | ScanKeyData skeys[2]; |
720 | 0 | SysScanDesc tgscan; |
721 | |
|
722 | 0 | ScanKeyInit(&skeys[0], |
723 | 0 | Anum_pg_trigger_tgrelid, |
724 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
725 | 0 | ObjectIdGetDatum(RelationGetRelid(rel))); |
726 | |
|
727 | 0 | ScanKeyInit(&skeys[1], |
728 | 0 | Anum_pg_trigger_tgname, |
729 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
730 | 0 | CStringGetDatum(stmt->trigname)); |
731 | |
|
732 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
733 | 0 | NULL, 2, skeys); |
734 | | |
735 | | /* There should be at most one matching tuple */ |
736 | 0 | if (HeapTupleIsValid(tuple = systable_getnext(tgscan))) |
737 | 0 | { |
738 | 0 | Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple); |
739 | |
|
740 | 0 | trigoid = oldtrigger->oid; |
741 | 0 | existing_constraint_oid = oldtrigger->tgconstraint; |
742 | 0 | existing_isInternal = oldtrigger->tgisinternal; |
743 | 0 | existing_isClone = OidIsValid(oldtrigger->tgparentid); |
744 | 0 | trigger_exists = true; |
745 | | /* copy the tuple to use in CatalogTupleUpdate() */ |
746 | 0 | tuple = heap_copytuple(tuple); |
747 | 0 | } |
748 | 0 | systable_endscan(tgscan); |
749 | 0 | } |
750 | |
|
751 | 0 | if (!trigger_exists) |
752 | 0 | { |
753 | | /* Generate the OID for the new trigger. */ |
754 | 0 | trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId, |
755 | 0 | Anum_pg_trigger_oid); |
756 | 0 | } |
757 | 0 | else |
758 | 0 | { |
759 | | /* |
760 | | * If OR REPLACE was specified, we'll replace the old trigger; |
761 | | * otherwise complain about the duplicate name. |
762 | | */ |
763 | 0 | if (!stmt->replace) |
764 | 0 | ereport(ERROR, |
765 | 0 | (errcode(ERRCODE_DUPLICATE_OBJECT), |
766 | 0 | errmsg("trigger \"%s\" for relation \"%s\" already exists", |
767 | 0 | stmt->trigname, RelationGetRelationName(rel)))); |
768 | | |
769 | | /* |
770 | | * An internal trigger or a child trigger (isClone) cannot be replaced |
771 | | * by a user-defined trigger. However, skip this test when |
772 | | * in_partition, because then we're recursing from a partitioned table |
773 | | * and the check was made at the parent level. |
774 | | */ |
775 | 0 | if ((existing_isInternal || existing_isClone) && |
776 | 0 | !isInternal && !in_partition) |
777 | 0 | ereport(ERROR, |
778 | 0 | (errcode(ERRCODE_DUPLICATE_OBJECT), |
779 | 0 | errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger", |
780 | 0 | stmt->trigname, RelationGetRelationName(rel)))); |
781 | | |
782 | | /* |
783 | | * It is not allowed to replace with a constraint trigger; gram.y |
784 | | * should have enforced this already. |
785 | | */ |
786 | 0 | Assert(!stmt->isconstraint); |
787 | | |
788 | | /* |
789 | | * It is not allowed to replace an existing constraint trigger, |
790 | | * either. (The reason for these restrictions is partly that it seems |
791 | | * difficult to deal with pending trigger events in such cases, and |
792 | | * partly that the command might imply changing the constraint's |
793 | | * properties as well, which doesn't seem nice.) |
794 | | */ |
795 | 0 | if (OidIsValid(existing_constraint_oid)) |
796 | 0 | ereport(ERROR, |
797 | 0 | (errcode(ERRCODE_DUPLICATE_OBJECT), |
798 | 0 | errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger", |
799 | 0 | stmt->trigname, RelationGetRelationName(rel)))); |
800 | 0 | } |
801 | | |
802 | | /* |
803 | | * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a |
804 | | * corresponding pg_constraint entry. |
805 | | */ |
806 | 0 | if (stmt->isconstraint && !OidIsValid(constraintOid)) |
807 | 0 | { |
808 | | /* Internal callers should have made their own constraints */ |
809 | 0 | Assert(!isInternal); |
810 | 0 | constraintOid = CreateConstraintEntry(stmt->trigname, |
811 | 0 | RelationGetNamespace(rel), |
812 | 0 | CONSTRAINT_TRIGGER, |
813 | 0 | stmt->deferrable, |
814 | 0 | stmt->initdeferred, |
815 | 0 | true, /* Is Enforced */ |
816 | 0 | true, |
817 | 0 | InvalidOid, /* no parent */ |
818 | 0 | RelationGetRelid(rel), |
819 | 0 | NULL, /* no conkey */ |
820 | 0 | 0, |
821 | 0 | 0, |
822 | 0 | InvalidOid, /* no domain */ |
823 | 0 | InvalidOid, /* no index */ |
824 | 0 | InvalidOid, /* no foreign key */ |
825 | 0 | NULL, |
826 | 0 | NULL, |
827 | 0 | NULL, |
828 | 0 | NULL, |
829 | 0 | 0, |
830 | 0 | ' ', |
831 | 0 | ' ', |
832 | 0 | NULL, |
833 | 0 | 0, |
834 | 0 | ' ', |
835 | 0 | NULL, /* no exclusion */ |
836 | 0 | NULL, /* no check constraint */ |
837 | 0 | NULL, |
838 | 0 | true, /* islocal */ |
839 | 0 | 0, /* inhcount */ |
840 | 0 | true, /* noinherit */ |
841 | 0 | false, /* conperiod */ |
842 | 0 | isInternal); /* is_internal */ |
843 | 0 | } |
844 | | |
845 | | /* |
846 | | * If trigger is internally generated, modify the provided trigger name to |
847 | | * ensure uniqueness by appending the trigger OID. (Callers will usually |
848 | | * supply a simple constant trigger name in these cases.) |
849 | | */ |
850 | 0 | if (isInternal) |
851 | 0 | { |
852 | 0 | snprintf(internaltrigname, sizeof(internaltrigname), |
853 | 0 | "%s_%u", stmt->trigname, trigoid); |
854 | 0 | trigname = internaltrigname; |
855 | 0 | } |
856 | 0 | else |
857 | 0 | { |
858 | | /* user-defined trigger; use the specified trigger name as-is */ |
859 | 0 | trigname = stmt->trigname; |
860 | 0 | } |
861 | | |
862 | | /* |
863 | | * Build the new pg_trigger tuple. |
864 | | */ |
865 | 0 | memset(nulls, false, sizeof(nulls)); |
866 | |
|
867 | 0 | values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid); |
868 | 0 | values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel)); |
869 | 0 | values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid); |
870 | 0 | values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein, |
871 | 0 | CStringGetDatum(trigname)); |
872 | 0 | values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid); |
873 | 0 | values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype); |
874 | 0 | values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when; |
875 | 0 | values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal); |
876 | 0 | values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid); |
877 | 0 | values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid); |
878 | 0 | values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid); |
879 | 0 | values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable); |
880 | 0 | values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred); |
881 | |
|
882 | 0 | if (stmt->args) |
883 | 0 | { |
884 | 0 | ListCell *le; |
885 | 0 | char *args; |
886 | 0 | int16 nargs = list_length(stmt->args); |
887 | 0 | int len = 0; |
888 | |
|
889 | 0 | foreach(le, stmt->args) |
890 | 0 | { |
891 | 0 | char *ar = strVal(lfirst(le)); |
892 | |
|
893 | 0 | len += strlen(ar) + 4; |
894 | 0 | for (; *ar; ar++) |
895 | 0 | { |
896 | 0 | if (*ar == '\\') |
897 | 0 | len++; |
898 | 0 | } |
899 | 0 | } |
900 | 0 | args = (char *) palloc(len + 1); |
901 | 0 | args[0] = '\0'; |
902 | 0 | foreach(le, stmt->args) |
903 | 0 | { |
904 | 0 | char *s = strVal(lfirst(le)); |
905 | 0 | char *d = args + strlen(args); |
906 | |
|
907 | 0 | while (*s) |
908 | 0 | { |
909 | 0 | if (*s == '\\') |
910 | 0 | *d++ = '\\'; |
911 | 0 | *d++ = *s++; |
912 | 0 | } |
913 | 0 | strcpy(d, "\\000"); |
914 | 0 | } |
915 | 0 | values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs); |
916 | 0 | values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain, |
917 | 0 | CStringGetDatum(args)); |
918 | 0 | } |
919 | 0 | else |
920 | 0 | { |
921 | 0 | values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0); |
922 | 0 | values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain, |
923 | 0 | CStringGetDatum("")); |
924 | 0 | } |
925 | | |
926 | | /* build column number array if it's a column-specific trigger */ |
927 | 0 | ncolumns = list_length(stmt->columns); |
928 | 0 | if (ncolumns == 0) |
929 | 0 | columns = NULL; |
930 | 0 | else |
931 | 0 | { |
932 | 0 | ListCell *cell; |
933 | 0 | int i = 0; |
934 | |
|
935 | 0 | columns = (int16 *) palloc(ncolumns * sizeof(int16)); |
936 | 0 | foreach(cell, stmt->columns) |
937 | 0 | { |
938 | 0 | char *name = strVal(lfirst(cell)); |
939 | 0 | int16 attnum; |
940 | 0 | int j; |
941 | | |
942 | | /* Lookup column name. System columns are not allowed */ |
943 | 0 | attnum = attnameAttNum(rel, name, false); |
944 | 0 | if (attnum == InvalidAttrNumber) |
945 | 0 | ereport(ERROR, |
946 | 0 | (errcode(ERRCODE_UNDEFINED_COLUMN), |
947 | 0 | errmsg("column \"%s\" of relation \"%s\" does not exist", |
948 | 0 | name, RelationGetRelationName(rel)))); |
949 | | |
950 | | /* Check for duplicates */ |
951 | 0 | for (j = i - 1; j >= 0; j--) |
952 | 0 | { |
953 | 0 | if (columns[j] == attnum) |
954 | 0 | ereport(ERROR, |
955 | 0 | (errcode(ERRCODE_DUPLICATE_COLUMN), |
956 | 0 | errmsg("column \"%s\" specified more than once", |
957 | 0 | name))); |
958 | 0 | } |
959 | | |
960 | 0 | columns[i++] = attnum; |
961 | 0 | } |
962 | 0 | } |
963 | 0 | tgattr = buildint2vector(columns, ncolumns); |
964 | 0 | values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr); |
965 | | |
966 | | /* set tgqual if trigger has WHEN clause */ |
967 | 0 | if (qual) |
968 | 0 | values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual); |
969 | 0 | else |
970 | 0 | nulls[Anum_pg_trigger_tgqual - 1] = true; |
971 | |
|
972 | 0 | if (oldtablename) |
973 | 0 | values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein, |
974 | 0 | CStringGetDatum(oldtablename)); |
975 | 0 | else |
976 | 0 | nulls[Anum_pg_trigger_tgoldtable - 1] = true; |
977 | 0 | if (newtablename) |
978 | 0 | values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein, |
979 | 0 | CStringGetDatum(newtablename)); |
980 | 0 | else |
981 | 0 | nulls[Anum_pg_trigger_tgnewtable - 1] = true; |
982 | | |
983 | | /* |
984 | | * Insert or replace tuple in pg_trigger. |
985 | | */ |
986 | 0 | if (!trigger_exists) |
987 | 0 | { |
988 | 0 | tuple = heap_form_tuple(tgrel->rd_att, values, nulls); |
989 | 0 | CatalogTupleInsert(tgrel, tuple); |
990 | 0 | } |
991 | 0 | else |
992 | 0 | { |
993 | 0 | HeapTuple newtup; |
994 | |
|
995 | 0 | newtup = heap_form_tuple(tgrel->rd_att, values, nulls); |
996 | 0 | CatalogTupleUpdate(tgrel, &tuple->t_self, newtup); |
997 | 0 | heap_freetuple(newtup); |
998 | 0 | } |
999 | |
|
1000 | 0 | heap_freetuple(tuple); /* free either original or new tuple */ |
1001 | 0 | table_close(tgrel, RowExclusiveLock); |
1002 | |
|
1003 | 0 | pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1])); |
1004 | 0 | pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1])); |
1005 | 0 | pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1])); |
1006 | 0 | if (oldtablename) |
1007 | 0 | pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1])); |
1008 | 0 | if (newtablename) |
1009 | 0 | pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1])); |
1010 | | |
1011 | | /* |
1012 | | * Update relation's pg_class entry; if necessary; and if not, send an SI |
1013 | | * message to make other backends (and this one) rebuild relcache entries. |
1014 | | */ |
1015 | 0 | pgrel = table_open(RelationRelationId, RowExclusiveLock); |
1016 | 0 | tuple = SearchSysCacheCopy1(RELOID, |
1017 | 0 | ObjectIdGetDatum(RelationGetRelid(rel))); |
1018 | 0 | if (!HeapTupleIsValid(tuple)) |
1019 | 0 | elog(ERROR, "cache lookup failed for relation %u", |
1020 | 0 | RelationGetRelid(rel)); |
1021 | 0 | if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers) |
1022 | 0 | { |
1023 | 0 | ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true; |
1024 | |
|
1025 | 0 | CatalogTupleUpdate(pgrel, &tuple->t_self, tuple); |
1026 | |
|
1027 | 0 | CommandCounterIncrement(); |
1028 | 0 | } |
1029 | 0 | else |
1030 | 0 | CacheInvalidateRelcacheByTuple(tuple); |
1031 | |
|
1032 | 0 | heap_freetuple(tuple); |
1033 | 0 | table_close(pgrel, RowExclusiveLock); |
1034 | | |
1035 | | /* |
1036 | | * If we're replacing a trigger, flush all the old dependencies before |
1037 | | * recording new ones. |
1038 | | */ |
1039 | 0 | if (trigger_exists) |
1040 | 0 | deleteDependencyRecordsFor(TriggerRelationId, trigoid, true); |
1041 | | |
1042 | | /* |
1043 | | * Record dependencies for trigger. Always place a normal dependency on |
1044 | | * the function. |
1045 | | */ |
1046 | 0 | myself.classId = TriggerRelationId; |
1047 | 0 | myself.objectId = trigoid; |
1048 | 0 | myself.objectSubId = 0; |
1049 | |
|
1050 | 0 | referenced.classId = ProcedureRelationId; |
1051 | 0 | referenced.objectId = funcoid; |
1052 | 0 | referenced.objectSubId = 0; |
1053 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); |
1054 | |
|
1055 | 0 | if (isInternal && OidIsValid(constraintOid)) |
1056 | 0 | { |
1057 | | /* |
1058 | | * Internally-generated trigger for a constraint, so make it an |
1059 | | * internal dependency of the constraint. We can skip depending on |
1060 | | * the relation(s), as there'll be an indirect dependency via the |
1061 | | * constraint. |
1062 | | */ |
1063 | 0 | referenced.classId = ConstraintRelationId; |
1064 | 0 | referenced.objectId = constraintOid; |
1065 | 0 | referenced.objectSubId = 0; |
1066 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL); |
1067 | 0 | } |
1068 | 0 | else |
1069 | 0 | { |
1070 | | /* |
1071 | | * User CREATE TRIGGER, so place dependencies. We make trigger be |
1072 | | * auto-dropped if its relation is dropped or if the FK relation is |
1073 | | * dropped. (Auto drop is compatible with our pre-7.3 behavior.) |
1074 | | */ |
1075 | 0 | referenced.classId = RelationRelationId; |
1076 | 0 | referenced.objectId = RelationGetRelid(rel); |
1077 | 0 | referenced.objectSubId = 0; |
1078 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); |
1079 | |
|
1080 | 0 | if (OidIsValid(constrrelid)) |
1081 | 0 | { |
1082 | 0 | referenced.classId = RelationRelationId; |
1083 | 0 | referenced.objectId = constrrelid; |
1084 | 0 | referenced.objectSubId = 0; |
1085 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); |
1086 | 0 | } |
1087 | | /* Not possible to have an index dependency in this case */ |
1088 | 0 | Assert(!OidIsValid(indexOid)); |
1089 | | |
1090 | | /* |
1091 | | * If it's a user-specified constraint trigger, make the constraint |
1092 | | * internally dependent on the trigger instead of vice versa. |
1093 | | */ |
1094 | 0 | if (OidIsValid(constraintOid)) |
1095 | 0 | { |
1096 | 0 | referenced.classId = ConstraintRelationId; |
1097 | 0 | referenced.objectId = constraintOid; |
1098 | 0 | referenced.objectSubId = 0; |
1099 | 0 | recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL); |
1100 | 0 | } |
1101 | | |
1102 | | /* |
1103 | | * If it's a partition trigger, create the partition dependencies. |
1104 | | */ |
1105 | 0 | if (OidIsValid(parentTriggerOid)) |
1106 | 0 | { |
1107 | 0 | ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid); |
1108 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI); |
1109 | 0 | ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel)); |
1110 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC); |
1111 | 0 | } |
1112 | 0 | } |
1113 | | |
1114 | | /* If column-specific trigger, add normal dependencies on columns */ |
1115 | 0 | if (columns != NULL) |
1116 | 0 | { |
1117 | 0 | int i; |
1118 | |
|
1119 | 0 | referenced.classId = RelationRelationId; |
1120 | 0 | referenced.objectId = RelationGetRelid(rel); |
1121 | 0 | for (i = 0; i < ncolumns; i++) |
1122 | 0 | { |
1123 | 0 | referenced.objectSubId = columns[i]; |
1124 | 0 | recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); |
1125 | 0 | } |
1126 | 0 | } |
1127 | | |
1128 | | /* |
1129 | | * If it has a WHEN clause, add dependencies on objects mentioned in the |
1130 | | * expression (eg, functions, as well as any columns used). |
1131 | | */ |
1132 | 0 | if (whenRtable != NIL) |
1133 | 0 | recordDependencyOnExpr(&myself, whenClause, whenRtable, |
1134 | 0 | DEPENDENCY_NORMAL); |
1135 | | |
1136 | | /* Post creation hook for new trigger */ |
1137 | 0 | InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0, |
1138 | 0 | isInternal); |
1139 | | |
1140 | | /* |
1141 | | * Lastly, create the trigger on child relations, if needed. |
1142 | | */ |
1143 | 0 | if (partition_recurse) |
1144 | 0 | { |
1145 | 0 | PartitionDesc partdesc = RelationGetPartitionDesc(rel, true); |
1146 | 0 | int i; |
1147 | 0 | MemoryContext oldcxt, |
1148 | 0 | perChildCxt; |
1149 | |
|
1150 | 0 | perChildCxt = AllocSetContextCreate(CurrentMemoryContext, |
1151 | 0 | "part trig clone", |
1152 | 0 | ALLOCSET_SMALL_SIZES); |
1153 | | |
1154 | | /* |
1155 | | * We don't currently expect to be called with a valid indexOid. If |
1156 | | * that ever changes then we'll need to write code here to find the |
1157 | | * corresponding child index. |
1158 | | */ |
1159 | 0 | Assert(!OidIsValid(indexOid)); |
1160 | |
|
1161 | 0 | oldcxt = MemoryContextSwitchTo(perChildCxt); |
1162 | | |
1163 | | /* Iterate to create the trigger on each existing partition */ |
1164 | 0 | for (i = 0; i < partdesc->nparts; i++) |
1165 | 0 | { |
1166 | 0 | CreateTrigStmt *childStmt; |
1167 | 0 | Relation childTbl; |
1168 | 0 | Node *qual; |
1169 | |
|
1170 | 0 | childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock); |
1171 | | |
1172 | | /* |
1173 | | * Initialize our fabricated parse node by copying the original |
1174 | | * one, then resetting fields that we pass separately. |
1175 | | */ |
1176 | 0 | childStmt = copyObject(stmt); |
1177 | 0 | childStmt->funcname = NIL; |
1178 | 0 | childStmt->whenClause = NULL; |
1179 | | |
1180 | | /* If there is a WHEN clause, create a modified copy of it */ |
1181 | 0 | qual = copyObject(whenClause); |
1182 | 0 | qual = (Node *) |
1183 | 0 | map_partition_varattnos((List *) qual, PRS2_OLD_VARNO, |
1184 | 0 | childTbl, rel); |
1185 | 0 | qual = (Node *) |
1186 | 0 | map_partition_varattnos((List *) qual, PRS2_NEW_VARNO, |
1187 | 0 | childTbl, rel); |
1188 | |
|
1189 | 0 | CreateTriggerFiringOn(childStmt, queryString, |
1190 | 0 | partdesc->oids[i], refRelOid, |
1191 | 0 | InvalidOid, InvalidOid, |
1192 | 0 | funcoid, trigoid, qual, |
1193 | 0 | isInternal, true, trigger_fires_when); |
1194 | |
|
1195 | 0 | table_close(childTbl, NoLock); |
1196 | |
|
1197 | 0 | MemoryContextReset(perChildCxt); |
1198 | 0 | } |
1199 | |
|
1200 | 0 | MemoryContextSwitchTo(oldcxt); |
1201 | 0 | MemoryContextDelete(perChildCxt); |
1202 | 0 | } |
1203 | | |
1204 | | /* Keep lock on target rel until end of xact */ |
1205 | 0 | table_close(rel, NoLock); |
1206 | |
|
1207 | 0 | return myself; |
1208 | 0 | } |
1209 | | |
1210 | | /* |
1211 | | * TriggerSetParentTrigger |
1212 | | * Set a partition's trigger as child of its parent trigger, |
1213 | | * or remove the linkage if parentTrigId is InvalidOid. |
1214 | | * |
1215 | | * This updates the constraint's pg_trigger row to show it as inherited, and |
1216 | | * adds PARTITION dependencies to prevent the trigger from being deleted |
1217 | | * on its own. Alternatively, reverse that. |
1218 | | */ |
1219 | | void |
1220 | | TriggerSetParentTrigger(Relation trigRel, |
1221 | | Oid childTrigId, |
1222 | | Oid parentTrigId, |
1223 | | Oid childTableId) |
1224 | 0 | { |
1225 | 0 | SysScanDesc tgscan; |
1226 | 0 | ScanKeyData skey[1]; |
1227 | 0 | Form_pg_trigger trigForm; |
1228 | 0 | HeapTuple tuple, |
1229 | 0 | newtup; |
1230 | 0 | ObjectAddress depender; |
1231 | 0 | ObjectAddress referenced; |
1232 | | |
1233 | | /* |
1234 | | * Find the trigger to delete. |
1235 | | */ |
1236 | 0 | ScanKeyInit(&skey[0], |
1237 | 0 | Anum_pg_trigger_oid, |
1238 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1239 | 0 | ObjectIdGetDatum(childTrigId)); |
1240 | |
|
1241 | 0 | tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true, |
1242 | 0 | NULL, 1, skey); |
1243 | |
|
1244 | 0 | tuple = systable_getnext(tgscan); |
1245 | 0 | if (!HeapTupleIsValid(tuple)) |
1246 | 0 | elog(ERROR, "could not find tuple for trigger %u", childTrigId); |
1247 | 0 | newtup = heap_copytuple(tuple); |
1248 | 0 | trigForm = (Form_pg_trigger) GETSTRUCT(newtup); |
1249 | 0 | if (OidIsValid(parentTrigId)) |
1250 | 0 | { |
1251 | | /* don't allow setting parent for a constraint that already has one */ |
1252 | 0 | if (OidIsValid(trigForm->tgparentid)) |
1253 | 0 | elog(ERROR, "trigger %u already has a parent trigger", |
1254 | 0 | childTrigId); |
1255 | | |
1256 | 0 | trigForm->tgparentid = parentTrigId; |
1257 | |
|
1258 | 0 | CatalogTupleUpdate(trigRel, &tuple->t_self, newtup); |
1259 | |
|
1260 | 0 | ObjectAddressSet(depender, TriggerRelationId, childTrigId); |
1261 | |
|
1262 | 0 | ObjectAddressSet(referenced, TriggerRelationId, parentTrigId); |
1263 | 0 | recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI); |
1264 | |
|
1265 | 0 | ObjectAddressSet(referenced, RelationRelationId, childTableId); |
1266 | 0 | recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC); |
1267 | 0 | } |
1268 | 0 | else |
1269 | 0 | { |
1270 | 0 | trigForm->tgparentid = InvalidOid; |
1271 | |
|
1272 | 0 | CatalogTupleUpdate(trigRel, &tuple->t_self, newtup); |
1273 | |
|
1274 | 0 | deleteDependencyRecordsForClass(TriggerRelationId, childTrigId, |
1275 | 0 | TriggerRelationId, |
1276 | 0 | DEPENDENCY_PARTITION_PRI); |
1277 | 0 | deleteDependencyRecordsForClass(TriggerRelationId, childTrigId, |
1278 | 0 | RelationRelationId, |
1279 | 0 | DEPENDENCY_PARTITION_SEC); |
1280 | 0 | } |
1281 | | |
1282 | 0 | heap_freetuple(newtup); |
1283 | 0 | systable_endscan(tgscan); |
1284 | 0 | } |
1285 | | |
1286 | | |
1287 | | /* |
1288 | | * Guts of trigger deletion. |
1289 | | */ |
1290 | | void |
1291 | | RemoveTriggerById(Oid trigOid) |
1292 | 0 | { |
1293 | 0 | Relation tgrel; |
1294 | 0 | SysScanDesc tgscan; |
1295 | 0 | ScanKeyData skey[1]; |
1296 | 0 | HeapTuple tup; |
1297 | 0 | Oid relid; |
1298 | 0 | Relation rel; |
1299 | |
|
1300 | 0 | tgrel = table_open(TriggerRelationId, RowExclusiveLock); |
1301 | | |
1302 | | /* |
1303 | | * Find the trigger to delete. |
1304 | | */ |
1305 | 0 | ScanKeyInit(&skey[0], |
1306 | 0 | Anum_pg_trigger_oid, |
1307 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1308 | 0 | ObjectIdGetDatum(trigOid)); |
1309 | |
|
1310 | 0 | tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true, |
1311 | 0 | NULL, 1, skey); |
1312 | |
|
1313 | 0 | tup = systable_getnext(tgscan); |
1314 | 0 | if (!HeapTupleIsValid(tup)) |
1315 | 0 | elog(ERROR, "could not find tuple for trigger %u", trigOid); |
1316 | | |
1317 | | /* |
1318 | | * Open and exclusive-lock the relation the trigger belongs to. |
1319 | | */ |
1320 | 0 | relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid; |
1321 | |
|
1322 | 0 | rel = table_open(relid, AccessExclusiveLock); |
1323 | |
|
1324 | 0 | if (rel->rd_rel->relkind != RELKIND_RELATION && |
1325 | 0 | rel->rd_rel->relkind != RELKIND_VIEW && |
1326 | 0 | rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && |
1327 | 0 | rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) |
1328 | 0 | ereport(ERROR, |
1329 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1330 | 0 | errmsg("relation \"%s\" cannot have triggers", |
1331 | 0 | RelationGetRelationName(rel)), |
1332 | 0 | errdetail_relkind_not_supported(rel->rd_rel->relkind))); |
1333 | | |
1334 | 0 | if (!allowSystemTableMods && IsSystemRelation(rel)) |
1335 | 0 | ereport(ERROR, |
1336 | 0 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
1337 | 0 | errmsg("permission denied: \"%s\" is a system catalog", |
1338 | 0 | RelationGetRelationName(rel)))); |
1339 | | |
1340 | | /* |
1341 | | * Delete the pg_trigger tuple. |
1342 | | */ |
1343 | 0 | CatalogTupleDelete(tgrel, &tup->t_self); |
1344 | |
|
1345 | 0 | systable_endscan(tgscan); |
1346 | 0 | table_close(tgrel, RowExclusiveLock); |
1347 | | |
1348 | | /* |
1349 | | * We do not bother to try to determine whether any other triggers remain, |
1350 | | * which would be needed in order to decide whether it's safe to clear the |
1351 | | * relation's relhastriggers. (In any case, there might be a concurrent |
1352 | | * process adding new triggers.) Instead, just force a relcache inval to |
1353 | | * make other backends (and this one too!) rebuild their relcache entries. |
1354 | | * There's no great harm in leaving relhastriggers true even if there are |
1355 | | * no triggers left. |
1356 | | */ |
1357 | 0 | CacheInvalidateRelcache(rel); |
1358 | | |
1359 | | /* Keep lock on trigger's rel until end of xact */ |
1360 | 0 | table_close(rel, NoLock); |
1361 | 0 | } |
1362 | | |
1363 | | /* |
1364 | | * get_trigger_oid - Look up a trigger by name to find its OID. |
1365 | | * |
1366 | | * If missing_ok is false, throw an error if trigger not found. If |
1367 | | * true, just return InvalidOid. |
1368 | | */ |
1369 | | Oid |
1370 | | get_trigger_oid(Oid relid, const char *trigname, bool missing_ok) |
1371 | 0 | { |
1372 | 0 | Relation tgrel; |
1373 | 0 | ScanKeyData skey[2]; |
1374 | 0 | SysScanDesc tgscan; |
1375 | 0 | HeapTuple tup; |
1376 | 0 | Oid oid; |
1377 | | |
1378 | | /* |
1379 | | * Find the trigger, verify permissions, set up object address |
1380 | | */ |
1381 | 0 | tgrel = table_open(TriggerRelationId, AccessShareLock); |
1382 | |
|
1383 | 0 | ScanKeyInit(&skey[0], |
1384 | 0 | Anum_pg_trigger_tgrelid, |
1385 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1386 | 0 | ObjectIdGetDatum(relid)); |
1387 | 0 | ScanKeyInit(&skey[1], |
1388 | 0 | Anum_pg_trigger_tgname, |
1389 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
1390 | 0 | CStringGetDatum(trigname)); |
1391 | |
|
1392 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1393 | 0 | NULL, 2, skey); |
1394 | |
|
1395 | 0 | tup = systable_getnext(tgscan); |
1396 | |
|
1397 | 0 | if (!HeapTupleIsValid(tup)) |
1398 | 0 | { |
1399 | 0 | if (!missing_ok) |
1400 | 0 | ereport(ERROR, |
1401 | 0 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
1402 | 0 | errmsg("trigger \"%s\" for table \"%s\" does not exist", |
1403 | 0 | trigname, get_rel_name(relid)))); |
1404 | 0 | oid = InvalidOid; |
1405 | 0 | } |
1406 | 0 | else |
1407 | 0 | { |
1408 | 0 | oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid; |
1409 | 0 | } |
1410 | | |
1411 | 0 | systable_endscan(tgscan); |
1412 | 0 | table_close(tgrel, AccessShareLock); |
1413 | 0 | return oid; |
1414 | 0 | } |
1415 | | |
1416 | | /* |
1417 | | * Perform permissions and integrity checks before acquiring a relation lock. |
1418 | | */ |
1419 | | static void |
1420 | | RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid, |
1421 | | void *arg) |
1422 | 0 | { |
1423 | 0 | HeapTuple tuple; |
1424 | 0 | Form_pg_class form; |
1425 | |
|
1426 | 0 | tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); |
1427 | 0 | if (!HeapTupleIsValid(tuple)) |
1428 | 0 | return; /* concurrently dropped */ |
1429 | 0 | form = (Form_pg_class) GETSTRUCT(tuple); |
1430 | | |
1431 | | /* only tables and views can have triggers */ |
1432 | 0 | if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW && |
1433 | 0 | form->relkind != RELKIND_FOREIGN_TABLE && |
1434 | 0 | form->relkind != RELKIND_PARTITIONED_TABLE) |
1435 | 0 | ereport(ERROR, |
1436 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1437 | 0 | errmsg("relation \"%s\" cannot have triggers", |
1438 | 0 | rv->relname), |
1439 | 0 | errdetail_relkind_not_supported(form->relkind))); |
1440 | | |
1441 | | /* you must own the table to rename one of its triggers */ |
1442 | 0 | if (!object_ownercheck(RelationRelationId, relid, GetUserId())) |
1443 | 0 | aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); |
1444 | 0 | if (!allowSystemTableMods && IsSystemClass(relid, form)) |
1445 | 0 | ereport(ERROR, |
1446 | 0 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
1447 | 0 | errmsg("permission denied: \"%s\" is a system catalog", |
1448 | 0 | rv->relname))); |
1449 | | |
1450 | 0 | ReleaseSysCache(tuple); |
1451 | 0 | } |
1452 | | |
1453 | | /* |
1454 | | * renametrig - changes the name of a trigger on a relation |
1455 | | * |
1456 | | * trigger name is changed in trigger catalog. |
1457 | | * No record of the previous name is kept. |
1458 | | * |
1459 | | * get proper relrelation from relation catalog (if not arg) |
1460 | | * scan trigger catalog |
1461 | | * for name conflict (within rel) |
1462 | | * for original trigger (if not arg) |
1463 | | * modify tgname in trigger tuple |
1464 | | * update row in catalog |
1465 | | */ |
1466 | | ObjectAddress |
1467 | | renametrig(RenameStmt *stmt) |
1468 | 0 | { |
1469 | 0 | Oid tgoid; |
1470 | 0 | Relation targetrel; |
1471 | 0 | Relation tgrel; |
1472 | 0 | HeapTuple tuple; |
1473 | 0 | SysScanDesc tgscan; |
1474 | 0 | ScanKeyData key[2]; |
1475 | 0 | Oid relid; |
1476 | 0 | ObjectAddress address; |
1477 | | |
1478 | | /* |
1479 | | * Look up name, check permissions, and acquire lock (which we will NOT |
1480 | | * release until end of transaction). |
1481 | | */ |
1482 | 0 | relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, |
1483 | 0 | 0, |
1484 | 0 | RangeVarCallbackForRenameTrigger, |
1485 | 0 | NULL); |
1486 | | |
1487 | | /* Have lock already, so just need to build relcache entry. */ |
1488 | 0 | targetrel = relation_open(relid, NoLock); |
1489 | | |
1490 | | /* |
1491 | | * On partitioned tables, this operation recurses to partitions. Lock all |
1492 | | * tables upfront. |
1493 | | */ |
1494 | 0 | if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
1495 | 0 | (void) find_all_inheritors(relid, AccessExclusiveLock, NULL); |
1496 | |
|
1497 | 0 | tgrel = table_open(TriggerRelationId, RowExclusiveLock); |
1498 | | |
1499 | | /* |
1500 | | * Search for the trigger to modify. |
1501 | | */ |
1502 | 0 | ScanKeyInit(&key[0], |
1503 | 0 | Anum_pg_trigger_tgrelid, |
1504 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1505 | 0 | ObjectIdGetDatum(relid)); |
1506 | 0 | ScanKeyInit(&key[1], |
1507 | 0 | Anum_pg_trigger_tgname, |
1508 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
1509 | 0 | PointerGetDatum(stmt->subname)); |
1510 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1511 | 0 | NULL, 2, key); |
1512 | 0 | if (HeapTupleIsValid(tuple = systable_getnext(tgscan))) |
1513 | 0 | { |
1514 | 0 | Form_pg_trigger trigform; |
1515 | |
|
1516 | 0 | trigform = (Form_pg_trigger) GETSTRUCT(tuple); |
1517 | 0 | tgoid = trigform->oid; |
1518 | | |
1519 | | /* |
1520 | | * If the trigger descends from a trigger on a parent partitioned |
1521 | | * table, reject the rename. We don't allow a trigger in a partition |
1522 | | * to differ in name from that of its parent: that would lead to an |
1523 | | * inconsistency that pg_dump would not reproduce. |
1524 | | */ |
1525 | 0 | if (OidIsValid(trigform->tgparentid)) |
1526 | 0 | ereport(ERROR, |
1527 | 0 | errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1528 | 0 | errmsg("cannot rename trigger \"%s\" on table \"%s\"", |
1529 | 0 | stmt->subname, RelationGetRelationName(targetrel)), |
1530 | 0 | errhint("Rename the trigger on the partitioned table \"%s\" instead.", |
1531 | 0 | get_rel_name(get_partition_parent(relid, false)))); |
1532 | | |
1533 | | |
1534 | | /* Rename the trigger on this relation ... */ |
1535 | 0 | renametrig_internal(tgrel, targetrel, tuple, stmt->newname, |
1536 | 0 | stmt->subname); |
1537 | | |
1538 | | /* ... and if it is partitioned, recurse to its partitions */ |
1539 | 0 | if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
1540 | 0 | { |
1541 | 0 | PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true); |
1542 | |
|
1543 | 0 | for (int i = 0; i < partdesc->nparts; i++) |
1544 | 0 | { |
1545 | 0 | Oid partitionId = partdesc->oids[i]; |
1546 | |
|
1547 | 0 | renametrig_partition(tgrel, partitionId, trigform->oid, |
1548 | 0 | stmt->newname, stmt->subname); |
1549 | 0 | } |
1550 | 0 | } |
1551 | 0 | } |
1552 | 0 | else |
1553 | 0 | { |
1554 | 0 | ereport(ERROR, |
1555 | 0 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
1556 | 0 | errmsg("trigger \"%s\" for table \"%s\" does not exist", |
1557 | 0 | stmt->subname, RelationGetRelationName(targetrel)))); |
1558 | 0 | } |
1559 | | |
1560 | 0 | ObjectAddressSet(address, TriggerRelationId, tgoid); |
1561 | |
|
1562 | 0 | systable_endscan(tgscan); |
1563 | |
|
1564 | 0 | table_close(tgrel, RowExclusiveLock); |
1565 | | |
1566 | | /* |
1567 | | * Close rel, but keep exclusive lock! |
1568 | | */ |
1569 | 0 | relation_close(targetrel, NoLock); |
1570 | |
|
1571 | 0 | return address; |
1572 | 0 | } |
1573 | | |
1574 | | /* |
1575 | | * Subroutine for renametrig -- perform the actual work of renaming one |
1576 | | * trigger on one table. |
1577 | | * |
1578 | | * If the trigger has a name different from the expected one, raise a |
1579 | | * NOTICE about it. |
1580 | | */ |
1581 | | static void |
1582 | | renametrig_internal(Relation tgrel, Relation targetrel, HeapTuple trigtup, |
1583 | | const char *newname, const char *expected_name) |
1584 | 0 | { |
1585 | 0 | HeapTuple tuple; |
1586 | 0 | Form_pg_trigger tgform; |
1587 | 0 | ScanKeyData key[2]; |
1588 | 0 | SysScanDesc tgscan; |
1589 | | |
1590 | | /* If the trigger already has the new name, nothing to do. */ |
1591 | 0 | tgform = (Form_pg_trigger) GETSTRUCT(trigtup); |
1592 | 0 | if (strcmp(NameStr(tgform->tgname), newname) == 0) |
1593 | 0 | return; |
1594 | | |
1595 | | /* |
1596 | | * Before actually trying the rename, search for triggers with the same |
1597 | | * name. The update would fail with an ugly message in that case, and it |
1598 | | * is better to throw a nicer error. |
1599 | | */ |
1600 | 0 | ScanKeyInit(&key[0], |
1601 | 0 | Anum_pg_trigger_tgrelid, |
1602 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1603 | 0 | ObjectIdGetDatum(RelationGetRelid(targetrel))); |
1604 | 0 | ScanKeyInit(&key[1], |
1605 | 0 | Anum_pg_trigger_tgname, |
1606 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
1607 | 0 | PointerGetDatum(newname)); |
1608 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1609 | 0 | NULL, 2, key); |
1610 | 0 | if (HeapTupleIsValid(tuple = systable_getnext(tgscan))) |
1611 | 0 | ereport(ERROR, |
1612 | 0 | (errcode(ERRCODE_DUPLICATE_OBJECT), |
1613 | 0 | errmsg("trigger \"%s\" for relation \"%s\" already exists", |
1614 | 0 | newname, RelationGetRelationName(targetrel)))); |
1615 | 0 | systable_endscan(tgscan); |
1616 | | |
1617 | | /* |
1618 | | * The target name is free; update the existing pg_trigger tuple with it. |
1619 | | */ |
1620 | 0 | tuple = heap_copytuple(trigtup); /* need a modifiable copy */ |
1621 | 0 | tgform = (Form_pg_trigger) GETSTRUCT(tuple); |
1622 | | |
1623 | | /* |
1624 | | * If the trigger has a name different from what we expected, let the user |
1625 | | * know. (We can proceed anyway, since we must have reached here following |
1626 | | * a tgparentid link.) |
1627 | | */ |
1628 | 0 | if (strcmp(NameStr(tgform->tgname), expected_name) != 0) |
1629 | 0 | ereport(NOTICE, |
1630 | 0 | errmsg("renamed trigger \"%s\" on relation \"%s\"", |
1631 | 0 | NameStr(tgform->tgname), |
1632 | 0 | RelationGetRelationName(targetrel))); |
1633 | | |
1634 | 0 | namestrcpy(&tgform->tgname, newname); |
1635 | |
|
1636 | 0 | CatalogTupleUpdate(tgrel, &tuple->t_self, tuple); |
1637 | |
|
1638 | 0 | InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0); |
1639 | | |
1640 | | /* |
1641 | | * Invalidate relation's relcache entry so that other backends (and this |
1642 | | * one too!) are sent SI message to make them rebuild relcache entries. |
1643 | | * (Ideally this should happen automatically...) |
1644 | | */ |
1645 | 0 | CacheInvalidateRelcache(targetrel); |
1646 | 0 | } |
1647 | | |
1648 | | /* |
1649 | | * Subroutine for renametrig -- Helper for recursing to partitions when |
1650 | | * renaming triggers on a partitioned table. |
1651 | | */ |
1652 | | static void |
1653 | | renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid, |
1654 | | const char *newname, const char *expected_name) |
1655 | 0 | { |
1656 | 0 | SysScanDesc tgscan; |
1657 | 0 | ScanKeyData key; |
1658 | 0 | HeapTuple tuple; |
1659 | | |
1660 | | /* |
1661 | | * Given a relation and the OID of a trigger on parent relation, find the |
1662 | | * corresponding trigger in the child and rename that trigger to the given |
1663 | | * name. |
1664 | | */ |
1665 | 0 | ScanKeyInit(&key, |
1666 | 0 | Anum_pg_trigger_tgrelid, |
1667 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1668 | 0 | ObjectIdGetDatum(partitionId)); |
1669 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1670 | 0 | NULL, 1, &key); |
1671 | 0 | while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) |
1672 | 0 | { |
1673 | 0 | Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple); |
1674 | 0 | Relation partitionRel; |
1675 | |
|
1676 | 0 | if (tgform->tgparentid != parentTriggerOid) |
1677 | 0 | continue; /* not our trigger */ |
1678 | | |
1679 | 0 | partitionRel = table_open(partitionId, NoLock); |
1680 | | |
1681 | | /* Rename the trigger on this partition */ |
1682 | 0 | renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name); |
1683 | | |
1684 | | /* And if this relation is partitioned, recurse to its partitions */ |
1685 | 0 | if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
1686 | 0 | { |
1687 | 0 | PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel, |
1688 | 0 | true); |
1689 | |
|
1690 | 0 | for (int i = 0; i < partdesc->nparts; i++) |
1691 | 0 | { |
1692 | 0 | Oid partoid = partdesc->oids[i]; |
1693 | |
|
1694 | 0 | renametrig_partition(tgrel, partoid, tgform->oid, newname, |
1695 | 0 | NameStr(tgform->tgname)); |
1696 | 0 | } |
1697 | 0 | } |
1698 | 0 | table_close(partitionRel, NoLock); |
1699 | | |
1700 | | /* There should be at most one matching tuple */ |
1701 | 0 | break; |
1702 | 0 | } |
1703 | 0 | systable_endscan(tgscan); |
1704 | 0 | } |
1705 | | |
1706 | | /* |
1707 | | * EnableDisableTrigger() |
1708 | | * |
1709 | | * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER |
1710 | | * to change 'tgenabled' field for the specified trigger(s) |
1711 | | * |
1712 | | * rel: relation to process (caller must hold suitable lock on it) |
1713 | | * tgname: name of trigger to process, or NULL to scan all triggers |
1714 | | * tgparent: if not zero, process only triggers with this tgparentid |
1715 | | * fires_when: new value for tgenabled field. In addition to generic |
1716 | | * enablement/disablement, this also defines when the trigger |
1717 | | * should be fired in session replication roles. |
1718 | | * skip_system: if true, skip "system" triggers (constraint triggers) |
1719 | | * recurse: if true, recurse to partitions |
1720 | | * |
1721 | | * Caller should have checked permissions for the table; here we also |
1722 | | * enforce that superuser privilege is required to alter the state of |
1723 | | * system triggers |
1724 | | */ |
1725 | | void |
1726 | | EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent, |
1727 | | char fires_when, bool skip_system, bool recurse, |
1728 | | LOCKMODE lockmode) |
1729 | 0 | { |
1730 | 0 | Relation tgrel; |
1731 | 0 | int nkeys; |
1732 | 0 | ScanKeyData keys[2]; |
1733 | 0 | SysScanDesc tgscan; |
1734 | 0 | HeapTuple tuple; |
1735 | 0 | bool found; |
1736 | 0 | bool changed; |
1737 | | |
1738 | | /* Scan the relevant entries in pg_triggers */ |
1739 | 0 | tgrel = table_open(TriggerRelationId, RowExclusiveLock); |
1740 | |
|
1741 | 0 | ScanKeyInit(&keys[0], |
1742 | 0 | Anum_pg_trigger_tgrelid, |
1743 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1744 | 0 | ObjectIdGetDatum(RelationGetRelid(rel))); |
1745 | 0 | if (tgname) |
1746 | 0 | { |
1747 | 0 | ScanKeyInit(&keys[1], |
1748 | 0 | Anum_pg_trigger_tgname, |
1749 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
1750 | 0 | CStringGetDatum(tgname)); |
1751 | 0 | nkeys = 2; |
1752 | 0 | } |
1753 | 0 | else |
1754 | 0 | nkeys = 1; |
1755 | |
|
1756 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1757 | 0 | NULL, nkeys, keys); |
1758 | |
|
1759 | 0 | found = changed = false; |
1760 | |
|
1761 | 0 | while (HeapTupleIsValid(tuple = systable_getnext(tgscan))) |
1762 | 0 | { |
1763 | 0 | Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple); |
1764 | |
|
1765 | 0 | if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid) |
1766 | 0 | continue; |
1767 | | |
1768 | 0 | if (oldtrig->tgisinternal) |
1769 | 0 | { |
1770 | | /* system trigger ... ok to process? */ |
1771 | 0 | if (skip_system) |
1772 | 0 | continue; |
1773 | 0 | if (!superuser()) |
1774 | 0 | ereport(ERROR, |
1775 | 0 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
1776 | 0 | errmsg("permission denied: \"%s\" is a system trigger", |
1777 | 0 | NameStr(oldtrig->tgname)))); |
1778 | 0 | } |
1779 | | |
1780 | 0 | found = true; |
1781 | |
|
1782 | 0 | if (oldtrig->tgenabled != fires_when) |
1783 | 0 | { |
1784 | | /* need to change this one ... make a copy to scribble on */ |
1785 | 0 | HeapTuple newtup = heap_copytuple(tuple); |
1786 | 0 | Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup); |
1787 | |
|
1788 | 0 | newtrig->tgenabled = fires_when; |
1789 | |
|
1790 | 0 | CatalogTupleUpdate(tgrel, &newtup->t_self, newtup); |
1791 | |
|
1792 | 0 | heap_freetuple(newtup); |
1793 | |
|
1794 | 0 | changed = true; |
1795 | 0 | } |
1796 | | |
1797 | | /* |
1798 | | * When altering FOR EACH ROW triggers on a partitioned table, do the |
1799 | | * same on the partitions as well, unless ONLY is specified. |
1800 | | * |
1801 | | * Note that we recurse even if we didn't change the trigger above, |
1802 | | * because the partitions' copy of the trigger may have a different |
1803 | | * value of tgenabled than the parent's trigger and thus might need to |
1804 | | * be changed. |
1805 | | */ |
1806 | 0 | if (recurse && |
1807 | 0 | rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && |
1808 | 0 | (TRIGGER_FOR_ROW(oldtrig->tgtype))) |
1809 | 0 | { |
1810 | 0 | PartitionDesc partdesc = RelationGetPartitionDesc(rel, true); |
1811 | 0 | int i; |
1812 | |
|
1813 | 0 | for (i = 0; i < partdesc->nparts; i++) |
1814 | 0 | { |
1815 | 0 | Relation part; |
1816 | |
|
1817 | 0 | part = relation_open(partdesc->oids[i], lockmode); |
1818 | | /* Match on child triggers' tgparentid, not their name */ |
1819 | 0 | EnableDisableTrigger(part, NULL, oldtrig->oid, |
1820 | 0 | fires_when, skip_system, recurse, |
1821 | 0 | lockmode); |
1822 | 0 | table_close(part, NoLock); /* keep lock till commit */ |
1823 | 0 | } |
1824 | 0 | } |
1825 | |
|
1826 | 0 | InvokeObjectPostAlterHook(TriggerRelationId, |
1827 | 0 | oldtrig->oid, 0); |
1828 | 0 | } |
1829 | | |
1830 | 0 | systable_endscan(tgscan); |
1831 | |
|
1832 | 0 | table_close(tgrel, RowExclusiveLock); |
1833 | |
|
1834 | 0 | if (tgname && !found) |
1835 | 0 | ereport(ERROR, |
1836 | 0 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
1837 | 0 | errmsg("trigger \"%s\" for table \"%s\" does not exist", |
1838 | 0 | tgname, RelationGetRelationName(rel)))); |
1839 | | |
1840 | | /* |
1841 | | * If we changed anything, broadcast a SI inval message to force each |
1842 | | * backend (including our own!) to rebuild relation's relcache entry. |
1843 | | * Otherwise they will fail to apply the change promptly. |
1844 | | */ |
1845 | 0 | if (changed) |
1846 | 0 | CacheInvalidateRelcache(rel); |
1847 | 0 | } |
1848 | | |
1849 | | |
1850 | | /* |
1851 | | * Build trigger data to attach to the given relcache entry. |
1852 | | * |
1853 | | * Note that trigger data attached to a relcache entry must be stored in |
1854 | | * CacheMemoryContext to ensure it survives as long as the relcache entry. |
1855 | | * But we should be running in a less long-lived working context. To avoid |
1856 | | * leaking cache memory if this routine fails partway through, we build a |
1857 | | * temporary TriggerDesc in working memory and then copy the completed |
1858 | | * structure into cache memory. |
1859 | | */ |
1860 | | void |
1861 | | RelationBuildTriggers(Relation relation) |
1862 | 0 | { |
1863 | 0 | TriggerDesc *trigdesc; |
1864 | 0 | int numtrigs; |
1865 | 0 | int maxtrigs; |
1866 | 0 | Trigger *triggers; |
1867 | 0 | Relation tgrel; |
1868 | 0 | ScanKeyData skey; |
1869 | 0 | SysScanDesc tgscan; |
1870 | 0 | HeapTuple htup; |
1871 | 0 | MemoryContext oldContext; |
1872 | 0 | int i; |
1873 | | |
1874 | | /* |
1875 | | * Allocate a working array to hold the triggers (the array is extended if |
1876 | | * necessary) |
1877 | | */ |
1878 | 0 | maxtrigs = 16; |
1879 | 0 | triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger)); |
1880 | 0 | numtrigs = 0; |
1881 | | |
1882 | | /* |
1883 | | * Note: since we scan the triggers using TriggerRelidNameIndexId, we will |
1884 | | * be reading the triggers in name order, except possibly during |
1885 | | * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn |
1886 | | * ensures that triggers will be fired in name order. |
1887 | | */ |
1888 | 0 | ScanKeyInit(&skey, |
1889 | 0 | Anum_pg_trigger_tgrelid, |
1890 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
1891 | 0 | ObjectIdGetDatum(RelationGetRelid(relation))); |
1892 | |
|
1893 | 0 | tgrel = table_open(TriggerRelationId, AccessShareLock); |
1894 | 0 | tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, |
1895 | 0 | NULL, 1, &skey); |
1896 | |
|
1897 | 0 | while (HeapTupleIsValid(htup = systable_getnext(tgscan))) |
1898 | 0 | { |
1899 | 0 | Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup); |
1900 | 0 | Trigger *build; |
1901 | 0 | Datum datum; |
1902 | 0 | bool isnull; |
1903 | |
|
1904 | 0 | if (numtrigs >= maxtrigs) |
1905 | 0 | { |
1906 | 0 | maxtrigs *= 2; |
1907 | 0 | triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger)); |
1908 | 0 | } |
1909 | 0 | build = &(triggers[numtrigs]); |
1910 | |
|
1911 | 0 | build->tgoid = pg_trigger->oid; |
1912 | 0 | build->tgname = DatumGetCString(DirectFunctionCall1(nameout, |
1913 | 0 | NameGetDatum(&pg_trigger->tgname))); |
1914 | 0 | build->tgfoid = pg_trigger->tgfoid; |
1915 | 0 | build->tgtype = pg_trigger->tgtype; |
1916 | 0 | build->tgenabled = pg_trigger->tgenabled; |
1917 | 0 | build->tgisinternal = pg_trigger->tgisinternal; |
1918 | 0 | build->tgisclone = OidIsValid(pg_trigger->tgparentid); |
1919 | 0 | build->tgconstrrelid = pg_trigger->tgconstrrelid; |
1920 | 0 | build->tgconstrindid = pg_trigger->tgconstrindid; |
1921 | 0 | build->tgconstraint = pg_trigger->tgconstraint; |
1922 | 0 | build->tgdeferrable = pg_trigger->tgdeferrable; |
1923 | 0 | build->tginitdeferred = pg_trigger->tginitdeferred; |
1924 | 0 | build->tgnargs = pg_trigger->tgnargs; |
1925 | | /* tgattr is first var-width field, so OK to access directly */ |
1926 | 0 | build->tgnattr = pg_trigger->tgattr.dim1; |
1927 | 0 | if (build->tgnattr > 0) |
1928 | 0 | { |
1929 | 0 | build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16)); |
1930 | 0 | memcpy(build->tgattr, &(pg_trigger->tgattr.values), |
1931 | 0 | build->tgnattr * sizeof(int16)); |
1932 | 0 | } |
1933 | 0 | else |
1934 | 0 | build->tgattr = NULL; |
1935 | 0 | if (build->tgnargs > 0) |
1936 | 0 | { |
1937 | 0 | bytea *val; |
1938 | 0 | char *p; |
1939 | |
|
1940 | 0 | val = DatumGetByteaPP(fastgetattr(htup, |
1941 | 0 | Anum_pg_trigger_tgargs, |
1942 | 0 | tgrel->rd_att, &isnull)); |
1943 | 0 | if (isnull) |
1944 | 0 | elog(ERROR, "tgargs is null in trigger for relation \"%s\"", |
1945 | 0 | RelationGetRelationName(relation)); |
1946 | 0 | p = (char *) VARDATA_ANY(val); |
1947 | 0 | build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *)); |
1948 | 0 | for (i = 0; i < build->tgnargs; i++) |
1949 | 0 | { |
1950 | 0 | build->tgargs[i] = pstrdup(p); |
1951 | 0 | p += strlen(p) + 1; |
1952 | 0 | } |
1953 | 0 | } |
1954 | 0 | else |
1955 | 0 | build->tgargs = NULL; |
1956 | | |
1957 | 0 | datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable, |
1958 | 0 | tgrel->rd_att, &isnull); |
1959 | 0 | if (!isnull) |
1960 | 0 | build->tgoldtable = |
1961 | 0 | DatumGetCString(DirectFunctionCall1(nameout, datum)); |
1962 | 0 | else |
1963 | 0 | build->tgoldtable = NULL; |
1964 | |
|
1965 | 0 | datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable, |
1966 | 0 | tgrel->rd_att, &isnull); |
1967 | 0 | if (!isnull) |
1968 | 0 | build->tgnewtable = |
1969 | 0 | DatumGetCString(DirectFunctionCall1(nameout, datum)); |
1970 | 0 | else |
1971 | 0 | build->tgnewtable = NULL; |
1972 | |
|
1973 | 0 | datum = fastgetattr(htup, Anum_pg_trigger_tgqual, |
1974 | 0 | tgrel->rd_att, &isnull); |
1975 | 0 | if (!isnull) |
1976 | 0 | build->tgqual = TextDatumGetCString(datum); |
1977 | 0 | else |
1978 | 0 | build->tgqual = NULL; |
1979 | |
|
1980 | 0 | numtrigs++; |
1981 | 0 | } |
1982 | | |
1983 | 0 | systable_endscan(tgscan); |
1984 | 0 | table_close(tgrel, AccessShareLock); |
1985 | | |
1986 | | /* There might not be any triggers */ |
1987 | 0 | if (numtrigs == 0) |
1988 | 0 | { |
1989 | 0 | pfree(triggers); |
1990 | 0 | return; |
1991 | 0 | } |
1992 | | |
1993 | | /* Build trigdesc */ |
1994 | 0 | trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc)); |
1995 | 0 | trigdesc->triggers = triggers; |
1996 | 0 | trigdesc->numtriggers = numtrigs; |
1997 | 0 | for (i = 0; i < numtrigs; i++) |
1998 | 0 | SetTriggerFlags(trigdesc, &(triggers[i])); |
1999 | | |
2000 | | /* Copy completed trigdesc into cache storage */ |
2001 | 0 | oldContext = MemoryContextSwitchTo(CacheMemoryContext); |
2002 | 0 | relation->trigdesc = CopyTriggerDesc(trigdesc); |
2003 | 0 | MemoryContextSwitchTo(oldContext); |
2004 | | |
2005 | | /* Release working memory */ |
2006 | 0 | FreeTriggerDesc(trigdesc); |
2007 | 0 | } |
2008 | | |
2009 | | /* |
2010 | | * Update the TriggerDesc's hint flags to include the specified trigger |
2011 | | */ |
2012 | | static void |
2013 | | SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger) |
2014 | 0 | { |
2015 | 0 | int16 tgtype = trigger->tgtype; |
2016 | |
|
2017 | 0 | trigdesc->trig_insert_before_row |= |
2018 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2019 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT); |
2020 | 0 | trigdesc->trig_insert_after_row |= |
2021 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2022 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT); |
2023 | 0 | trigdesc->trig_insert_instead_row |= |
2024 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2025 | 0 | TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT); |
2026 | 0 | trigdesc->trig_insert_before_statement |= |
2027 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2028 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT); |
2029 | 0 | trigdesc->trig_insert_after_statement |= |
2030 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2031 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT); |
2032 | 0 | trigdesc->trig_update_before_row |= |
2033 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2034 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE); |
2035 | 0 | trigdesc->trig_update_after_row |= |
2036 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2037 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE); |
2038 | 0 | trigdesc->trig_update_instead_row |= |
2039 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2040 | 0 | TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE); |
2041 | 0 | trigdesc->trig_update_before_statement |= |
2042 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2043 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE); |
2044 | 0 | trigdesc->trig_update_after_statement |= |
2045 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2046 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE); |
2047 | 0 | trigdesc->trig_delete_before_row |= |
2048 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2049 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE); |
2050 | 0 | trigdesc->trig_delete_after_row |= |
2051 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2052 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE); |
2053 | 0 | trigdesc->trig_delete_instead_row |= |
2054 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW, |
2055 | 0 | TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE); |
2056 | 0 | trigdesc->trig_delete_before_statement |= |
2057 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2058 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE); |
2059 | 0 | trigdesc->trig_delete_after_statement |= |
2060 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2061 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE); |
2062 | | /* there are no row-level truncate triggers */ |
2063 | 0 | trigdesc->trig_truncate_before_statement |= |
2064 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2065 | 0 | TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE); |
2066 | 0 | trigdesc->trig_truncate_after_statement |= |
2067 | 0 | TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT, |
2068 | 0 | TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE); |
2069 | |
|
2070 | 0 | trigdesc->trig_insert_new_table |= |
2071 | 0 | (TRIGGER_FOR_INSERT(tgtype) && |
2072 | 0 | TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable)); |
2073 | 0 | trigdesc->trig_update_old_table |= |
2074 | 0 | (TRIGGER_FOR_UPDATE(tgtype) && |
2075 | 0 | TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable)); |
2076 | 0 | trigdesc->trig_update_new_table |= |
2077 | 0 | (TRIGGER_FOR_UPDATE(tgtype) && |
2078 | 0 | TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable)); |
2079 | 0 | trigdesc->trig_delete_old_table |= |
2080 | 0 | (TRIGGER_FOR_DELETE(tgtype) && |
2081 | 0 | TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable)); |
2082 | 0 | } |
2083 | | |
2084 | | /* |
2085 | | * Copy a TriggerDesc data structure. |
2086 | | * |
2087 | | * The copy is allocated in the current memory context. |
2088 | | */ |
2089 | | TriggerDesc * |
2090 | | CopyTriggerDesc(TriggerDesc *trigdesc) |
2091 | 0 | { |
2092 | 0 | TriggerDesc *newdesc; |
2093 | 0 | Trigger *trigger; |
2094 | 0 | int i; |
2095 | |
|
2096 | 0 | if (trigdesc == NULL || trigdesc->numtriggers <= 0) |
2097 | 0 | return NULL; |
2098 | | |
2099 | 0 | newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc)); |
2100 | 0 | memcpy(newdesc, trigdesc, sizeof(TriggerDesc)); |
2101 | |
|
2102 | 0 | trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger)); |
2103 | 0 | memcpy(trigger, trigdesc->triggers, |
2104 | 0 | trigdesc->numtriggers * sizeof(Trigger)); |
2105 | 0 | newdesc->triggers = trigger; |
2106 | |
|
2107 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2108 | 0 | { |
2109 | 0 | trigger->tgname = pstrdup(trigger->tgname); |
2110 | 0 | if (trigger->tgnattr > 0) |
2111 | 0 | { |
2112 | 0 | int16 *newattr; |
2113 | |
|
2114 | 0 | newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16)); |
2115 | 0 | memcpy(newattr, trigger->tgattr, |
2116 | 0 | trigger->tgnattr * sizeof(int16)); |
2117 | 0 | trigger->tgattr = newattr; |
2118 | 0 | } |
2119 | 0 | if (trigger->tgnargs > 0) |
2120 | 0 | { |
2121 | 0 | char **newargs; |
2122 | 0 | int16 j; |
2123 | |
|
2124 | 0 | newargs = (char **) palloc(trigger->tgnargs * sizeof(char *)); |
2125 | 0 | for (j = 0; j < trigger->tgnargs; j++) |
2126 | 0 | newargs[j] = pstrdup(trigger->tgargs[j]); |
2127 | 0 | trigger->tgargs = newargs; |
2128 | 0 | } |
2129 | 0 | if (trigger->tgqual) |
2130 | 0 | trigger->tgqual = pstrdup(trigger->tgqual); |
2131 | 0 | if (trigger->tgoldtable) |
2132 | 0 | trigger->tgoldtable = pstrdup(trigger->tgoldtable); |
2133 | 0 | if (trigger->tgnewtable) |
2134 | 0 | trigger->tgnewtable = pstrdup(trigger->tgnewtable); |
2135 | 0 | trigger++; |
2136 | 0 | } |
2137 | |
|
2138 | 0 | return newdesc; |
2139 | 0 | } |
2140 | | |
2141 | | /* |
2142 | | * Free a TriggerDesc data structure. |
2143 | | */ |
2144 | | void |
2145 | | FreeTriggerDesc(TriggerDesc *trigdesc) |
2146 | 0 | { |
2147 | 0 | Trigger *trigger; |
2148 | 0 | int i; |
2149 | |
|
2150 | 0 | if (trigdesc == NULL) |
2151 | 0 | return; |
2152 | | |
2153 | 0 | trigger = trigdesc->triggers; |
2154 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2155 | 0 | { |
2156 | 0 | pfree(trigger->tgname); |
2157 | 0 | if (trigger->tgnattr > 0) |
2158 | 0 | pfree(trigger->tgattr); |
2159 | 0 | if (trigger->tgnargs > 0) |
2160 | 0 | { |
2161 | 0 | while (--(trigger->tgnargs) >= 0) |
2162 | 0 | pfree(trigger->tgargs[trigger->tgnargs]); |
2163 | 0 | pfree(trigger->tgargs); |
2164 | 0 | } |
2165 | 0 | if (trigger->tgqual) |
2166 | 0 | pfree(trigger->tgqual); |
2167 | 0 | if (trigger->tgoldtable) |
2168 | 0 | pfree(trigger->tgoldtable); |
2169 | 0 | if (trigger->tgnewtable) |
2170 | 0 | pfree(trigger->tgnewtable); |
2171 | 0 | trigger++; |
2172 | 0 | } |
2173 | 0 | pfree(trigdesc->triggers); |
2174 | 0 | pfree(trigdesc); |
2175 | 0 | } |
2176 | | |
2177 | | /* |
2178 | | * Compare two TriggerDesc structures for logical equality. |
2179 | | */ |
2180 | | #ifdef NOT_USED |
2181 | | bool |
2182 | | equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2) |
2183 | | { |
2184 | | int i, |
2185 | | j; |
2186 | | |
2187 | | /* |
2188 | | * We need not examine the hint flags, just the trigger array itself; if |
2189 | | * we have the same triggers with the same types, the flags should match. |
2190 | | * |
2191 | | * As of 7.3 we assume trigger set ordering is significant in the |
2192 | | * comparison; so we just compare corresponding slots of the two sets. |
2193 | | * |
2194 | | * Note: comparing the stringToNode forms of the WHEN clauses means that |
2195 | | * parse column locations will affect the result. This is okay as long as |
2196 | | * this function is only used for detecting exact equality, as for example |
2197 | | * in checking for staleness of a cache entry. |
2198 | | */ |
2199 | | if (trigdesc1 != NULL) |
2200 | | { |
2201 | | if (trigdesc2 == NULL) |
2202 | | return false; |
2203 | | if (trigdesc1->numtriggers != trigdesc2->numtriggers) |
2204 | | return false; |
2205 | | for (i = 0; i < trigdesc1->numtriggers; i++) |
2206 | | { |
2207 | | Trigger *trig1 = trigdesc1->triggers + i; |
2208 | | Trigger *trig2 = trigdesc2->triggers + i; |
2209 | | |
2210 | | if (trig1->tgoid != trig2->tgoid) |
2211 | | return false; |
2212 | | if (strcmp(trig1->tgname, trig2->tgname) != 0) |
2213 | | return false; |
2214 | | if (trig1->tgfoid != trig2->tgfoid) |
2215 | | return false; |
2216 | | if (trig1->tgtype != trig2->tgtype) |
2217 | | return false; |
2218 | | if (trig1->tgenabled != trig2->tgenabled) |
2219 | | return false; |
2220 | | if (trig1->tgisinternal != trig2->tgisinternal) |
2221 | | return false; |
2222 | | if (trig1->tgisclone != trig2->tgisclone) |
2223 | | return false; |
2224 | | if (trig1->tgconstrrelid != trig2->tgconstrrelid) |
2225 | | return false; |
2226 | | if (trig1->tgconstrindid != trig2->tgconstrindid) |
2227 | | return false; |
2228 | | if (trig1->tgconstraint != trig2->tgconstraint) |
2229 | | return false; |
2230 | | if (trig1->tgdeferrable != trig2->tgdeferrable) |
2231 | | return false; |
2232 | | if (trig1->tginitdeferred != trig2->tginitdeferred) |
2233 | | return false; |
2234 | | if (trig1->tgnargs != trig2->tgnargs) |
2235 | | return false; |
2236 | | if (trig1->tgnattr != trig2->tgnattr) |
2237 | | return false; |
2238 | | if (trig1->tgnattr > 0 && |
2239 | | memcmp(trig1->tgattr, trig2->tgattr, |
2240 | | trig1->tgnattr * sizeof(int16)) != 0) |
2241 | | return false; |
2242 | | for (j = 0; j < trig1->tgnargs; j++) |
2243 | | if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0) |
2244 | | return false; |
2245 | | if (trig1->tgqual == NULL && trig2->tgqual == NULL) |
2246 | | /* ok */ ; |
2247 | | else if (trig1->tgqual == NULL || trig2->tgqual == NULL) |
2248 | | return false; |
2249 | | else if (strcmp(trig1->tgqual, trig2->tgqual) != 0) |
2250 | | return false; |
2251 | | if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL) |
2252 | | /* ok */ ; |
2253 | | else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL) |
2254 | | return false; |
2255 | | else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0) |
2256 | | return false; |
2257 | | if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL) |
2258 | | /* ok */ ; |
2259 | | else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL) |
2260 | | return false; |
2261 | | else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0) |
2262 | | return false; |
2263 | | } |
2264 | | } |
2265 | | else if (trigdesc2 != NULL) |
2266 | | return false; |
2267 | | return true; |
2268 | | } |
2269 | | #endif /* NOT_USED */ |
2270 | | |
2271 | | /* |
2272 | | * Check if there is a row-level trigger with transition tables that prevents |
2273 | | * a table from becoming an inheritance child or partition. Return the name |
2274 | | * of the first such incompatible trigger, or NULL if there is none. |
2275 | | */ |
2276 | | const char * |
2277 | | FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc) |
2278 | 0 | { |
2279 | 0 | if (trigdesc != NULL) |
2280 | 0 | { |
2281 | 0 | int i; |
2282 | |
|
2283 | 0 | for (i = 0; i < trigdesc->numtriggers; ++i) |
2284 | 0 | { |
2285 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2286 | |
|
2287 | 0 | if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL) |
2288 | 0 | return trigger->tgname; |
2289 | 0 | } |
2290 | 0 | } |
2291 | | |
2292 | 0 | return NULL; |
2293 | 0 | } |
2294 | | |
2295 | | /* |
2296 | | * Call a trigger function. |
2297 | | * |
2298 | | * trigdata: trigger descriptor. |
2299 | | * tgindx: trigger's index in finfo and instr arrays. |
2300 | | * finfo: array of cached trigger function call information. |
2301 | | * instr: optional array of EXPLAIN ANALYZE instrumentation state. |
2302 | | * per_tuple_context: memory context to execute the function in. |
2303 | | * |
2304 | | * Returns the tuple (or NULL) as returned by the function. |
2305 | | */ |
2306 | | static HeapTuple |
2307 | | ExecCallTriggerFunc(TriggerData *trigdata, |
2308 | | int tgindx, |
2309 | | FmgrInfo *finfo, |
2310 | | Instrumentation *instr, |
2311 | | MemoryContext per_tuple_context) |
2312 | 0 | { |
2313 | 0 | LOCAL_FCINFO(fcinfo, 0); |
2314 | 0 | PgStat_FunctionCallUsage fcusage; |
2315 | 0 | Datum result; |
2316 | 0 | MemoryContext oldContext; |
2317 | | |
2318 | | /* |
2319 | | * Protect against code paths that may fail to initialize transition table |
2320 | | * info. |
2321 | | */ |
2322 | 0 | Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) || |
2323 | 0 | TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) || |
2324 | 0 | TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) && |
2325 | 0 | TRIGGER_FIRED_AFTER(trigdata->tg_event) && |
2326 | 0 | !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) && |
2327 | 0 | !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) || |
2328 | 0 | (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL)); |
2329 | |
|
2330 | 0 | finfo += tgindx; |
2331 | | |
2332 | | /* |
2333 | | * We cache fmgr lookup info, to avoid making the lookup again on each |
2334 | | * call. |
2335 | | */ |
2336 | 0 | if (finfo->fn_oid == InvalidOid) |
2337 | 0 | fmgr_info(trigdata->tg_trigger->tgfoid, finfo); |
2338 | |
|
2339 | 0 | Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid); |
2340 | | |
2341 | | /* |
2342 | | * If doing EXPLAIN ANALYZE, start charging time to this trigger. |
2343 | | */ |
2344 | 0 | if (instr) |
2345 | 0 | InstrStartNode(instr + tgindx); |
2346 | | |
2347 | | /* |
2348 | | * Do the function evaluation in the per-tuple memory context, so that |
2349 | | * leaked memory will be reclaimed once per tuple. Note in particular that |
2350 | | * any new tuple created by the trigger function will live till the end of |
2351 | | * the tuple cycle. |
2352 | | */ |
2353 | 0 | oldContext = MemoryContextSwitchTo(per_tuple_context); |
2354 | | |
2355 | | /* |
2356 | | * Call the function, passing no arguments but setting a context. |
2357 | | */ |
2358 | 0 | InitFunctionCallInfoData(*fcinfo, finfo, 0, |
2359 | 0 | InvalidOid, (Node *) trigdata, NULL); |
2360 | |
|
2361 | 0 | pgstat_init_function_usage(fcinfo, &fcusage); |
2362 | |
|
2363 | 0 | MyTriggerDepth++; |
2364 | 0 | PG_TRY(); |
2365 | 0 | { |
2366 | 0 | result = FunctionCallInvoke(fcinfo); |
2367 | 0 | } |
2368 | 0 | PG_FINALLY(); |
2369 | 0 | { |
2370 | 0 | MyTriggerDepth--; |
2371 | 0 | } |
2372 | 0 | PG_END_TRY(); |
2373 | |
|
2374 | 0 | pgstat_end_function_usage(&fcusage, true); |
2375 | |
|
2376 | 0 | MemoryContextSwitchTo(oldContext); |
2377 | | |
2378 | | /* |
2379 | | * Trigger protocol allows function to return a null pointer, but NOT to |
2380 | | * set the isnull result flag. |
2381 | | */ |
2382 | 0 | if (fcinfo->isnull) |
2383 | 0 | ereport(ERROR, |
2384 | 0 | (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), |
2385 | 0 | errmsg("trigger function %u returned null value", |
2386 | 0 | fcinfo->flinfo->fn_oid))); |
2387 | | |
2388 | | /* |
2389 | | * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count |
2390 | | * one "tuple returned" (really the number of firings). |
2391 | | */ |
2392 | 0 | if (instr) |
2393 | 0 | InstrStopNode(instr + tgindx, 1); |
2394 | |
|
2395 | 0 | return (HeapTuple) DatumGetPointer(result); |
2396 | 0 | } |
2397 | | |
2398 | | void |
2399 | | ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo) |
2400 | 0 | { |
2401 | 0 | TriggerDesc *trigdesc; |
2402 | 0 | int i; |
2403 | 0 | TriggerData LocTriggerData = {0}; |
2404 | |
|
2405 | 0 | trigdesc = relinfo->ri_TrigDesc; |
2406 | |
|
2407 | 0 | if (trigdesc == NULL) |
2408 | 0 | return; |
2409 | 0 | if (!trigdesc->trig_insert_before_statement) |
2410 | 0 | return; |
2411 | | |
2412 | | /* no-op if we already fired BS triggers in this context */ |
2413 | 0 | if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), |
2414 | 0 | CMD_INSERT)) |
2415 | 0 | return; |
2416 | | |
2417 | 0 | LocTriggerData.type = T_TriggerData; |
2418 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_INSERT | |
2419 | 0 | TRIGGER_EVENT_BEFORE; |
2420 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2421 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2422 | 0 | { |
2423 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2424 | 0 | HeapTuple newtuple; |
2425 | |
|
2426 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2427 | 0 | TRIGGER_TYPE_STATEMENT, |
2428 | 0 | TRIGGER_TYPE_BEFORE, |
2429 | 0 | TRIGGER_TYPE_INSERT)) |
2430 | 0 | continue; |
2431 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2432 | 0 | NULL, NULL, NULL)) |
2433 | 0 | continue; |
2434 | | |
2435 | 0 | LocTriggerData.tg_trigger = trigger; |
2436 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2437 | 0 | i, |
2438 | 0 | relinfo->ri_TrigFunctions, |
2439 | 0 | relinfo->ri_TrigInstrument, |
2440 | 0 | GetPerTupleMemoryContext(estate)); |
2441 | |
|
2442 | 0 | if (newtuple) |
2443 | 0 | ereport(ERROR, |
2444 | 0 | (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), |
2445 | 0 | errmsg("BEFORE STATEMENT trigger cannot return a value"))); |
2446 | 0 | } |
2447 | 0 | } |
2448 | | |
2449 | | void |
2450 | | ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo, |
2451 | | TransitionCaptureState *transition_capture) |
2452 | 0 | { |
2453 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2454 | |
|
2455 | 0 | if (trigdesc && trigdesc->trig_insert_after_statement) |
2456 | 0 | AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, |
2457 | 0 | TRIGGER_EVENT_INSERT, |
2458 | 0 | false, NULL, NULL, NIL, NULL, transition_capture, |
2459 | 0 | false); |
2460 | 0 | } |
2461 | | |
2462 | | bool |
2463 | | ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, |
2464 | | TupleTableSlot *slot) |
2465 | 0 | { |
2466 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2467 | 0 | HeapTuple newtuple = NULL; |
2468 | 0 | bool should_free; |
2469 | 0 | TriggerData LocTriggerData = {0}; |
2470 | 0 | int i; |
2471 | |
|
2472 | 0 | LocTriggerData.type = T_TriggerData; |
2473 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_INSERT | |
2474 | 0 | TRIGGER_EVENT_ROW | |
2475 | 0 | TRIGGER_EVENT_BEFORE; |
2476 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2477 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2478 | 0 | { |
2479 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2480 | 0 | HeapTuple oldtuple; |
2481 | |
|
2482 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2483 | 0 | TRIGGER_TYPE_ROW, |
2484 | 0 | TRIGGER_TYPE_BEFORE, |
2485 | 0 | TRIGGER_TYPE_INSERT)) |
2486 | 0 | continue; |
2487 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2488 | 0 | NULL, NULL, slot)) |
2489 | 0 | continue; |
2490 | | |
2491 | 0 | if (!newtuple) |
2492 | 0 | newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free); |
2493 | |
|
2494 | 0 | LocTriggerData.tg_trigslot = slot; |
2495 | 0 | LocTriggerData.tg_trigtuple = oldtuple = newtuple; |
2496 | 0 | LocTriggerData.tg_trigger = trigger; |
2497 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2498 | 0 | i, |
2499 | 0 | relinfo->ri_TrigFunctions, |
2500 | 0 | relinfo->ri_TrigInstrument, |
2501 | 0 | GetPerTupleMemoryContext(estate)); |
2502 | 0 | if (newtuple == NULL) |
2503 | 0 | { |
2504 | 0 | if (should_free) |
2505 | 0 | heap_freetuple(oldtuple); |
2506 | 0 | return false; /* "do nothing" */ |
2507 | 0 | } |
2508 | 0 | else if (newtuple != oldtuple) |
2509 | 0 | { |
2510 | 0 | newtuple = check_modified_virtual_generated(RelationGetDescr(relinfo->ri_RelationDesc), newtuple); |
2511 | |
|
2512 | 0 | ExecForceStoreHeapTuple(newtuple, slot, false); |
2513 | | |
2514 | | /* |
2515 | | * After a tuple in a partition goes through a trigger, the user |
2516 | | * could have changed the partition key enough that the tuple no |
2517 | | * longer fits the partition. Verify that. |
2518 | | */ |
2519 | 0 | if (trigger->tgisclone && |
2520 | 0 | !ExecPartitionCheck(relinfo, slot, estate, false)) |
2521 | 0 | ereport(ERROR, |
2522 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
2523 | 0 | errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"), |
2524 | 0 | errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".", |
2525 | 0 | trigger->tgname, |
2526 | 0 | get_namespace_name(RelationGetNamespace(relinfo->ri_RelationDesc)), |
2527 | 0 | RelationGetRelationName(relinfo->ri_RelationDesc)))); |
2528 | | |
2529 | 0 | if (should_free) |
2530 | 0 | heap_freetuple(oldtuple); |
2531 | | |
2532 | | /* signal tuple should be re-fetched if used */ |
2533 | 0 | newtuple = NULL; |
2534 | 0 | } |
2535 | 0 | } |
2536 | | |
2537 | 0 | return true; |
2538 | 0 | } |
2539 | | |
2540 | | void |
2541 | | ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, |
2542 | | TupleTableSlot *slot, List *recheckIndexes, |
2543 | | TransitionCaptureState *transition_capture) |
2544 | 0 | { |
2545 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2546 | |
|
2547 | 0 | if ((trigdesc && trigdesc->trig_insert_after_row) || |
2548 | 0 | (transition_capture && transition_capture->tcs_insert_new_table)) |
2549 | 0 | AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, |
2550 | 0 | TRIGGER_EVENT_INSERT, |
2551 | 0 | true, NULL, slot, |
2552 | 0 | recheckIndexes, NULL, |
2553 | 0 | transition_capture, |
2554 | 0 | false); |
2555 | 0 | } |
2556 | | |
2557 | | bool |
2558 | | ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, |
2559 | | TupleTableSlot *slot) |
2560 | 0 | { |
2561 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2562 | 0 | HeapTuple newtuple = NULL; |
2563 | 0 | bool should_free; |
2564 | 0 | TriggerData LocTriggerData = {0}; |
2565 | 0 | int i; |
2566 | |
|
2567 | 0 | LocTriggerData.type = T_TriggerData; |
2568 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_INSERT | |
2569 | 0 | TRIGGER_EVENT_ROW | |
2570 | 0 | TRIGGER_EVENT_INSTEAD; |
2571 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2572 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2573 | 0 | { |
2574 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2575 | 0 | HeapTuple oldtuple; |
2576 | |
|
2577 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2578 | 0 | TRIGGER_TYPE_ROW, |
2579 | 0 | TRIGGER_TYPE_INSTEAD, |
2580 | 0 | TRIGGER_TYPE_INSERT)) |
2581 | 0 | continue; |
2582 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2583 | 0 | NULL, NULL, slot)) |
2584 | 0 | continue; |
2585 | | |
2586 | 0 | if (!newtuple) |
2587 | 0 | newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free); |
2588 | |
|
2589 | 0 | LocTriggerData.tg_trigslot = slot; |
2590 | 0 | LocTriggerData.tg_trigtuple = oldtuple = newtuple; |
2591 | 0 | LocTriggerData.tg_trigger = trigger; |
2592 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2593 | 0 | i, |
2594 | 0 | relinfo->ri_TrigFunctions, |
2595 | 0 | relinfo->ri_TrigInstrument, |
2596 | 0 | GetPerTupleMemoryContext(estate)); |
2597 | 0 | if (newtuple == NULL) |
2598 | 0 | { |
2599 | 0 | if (should_free) |
2600 | 0 | heap_freetuple(oldtuple); |
2601 | 0 | return false; /* "do nothing" */ |
2602 | 0 | } |
2603 | 0 | else if (newtuple != oldtuple) |
2604 | 0 | { |
2605 | 0 | ExecForceStoreHeapTuple(newtuple, slot, false); |
2606 | |
|
2607 | 0 | if (should_free) |
2608 | 0 | heap_freetuple(oldtuple); |
2609 | | |
2610 | | /* signal tuple should be re-fetched if used */ |
2611 | 0 | newtuple = NULL; |
2612 | 0 | } |
2613 | 0 | } |
2614 | | |
2615 | 0 | return true; |
2616 | 0 | } |
2617 | | |
2618 | | void |
2619 | | ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo) |
2620 | 0 | { |
2621 | 0 | TriggerDesc *trigdesc; |
2622 | 0 | int i; |
2623 | 0 | TriggerData LocTriggerData = {0}; |
2624 | |
|
2625 | 0 | trigdesc = relinfo->ri_TrigDesc; |
2626 | |
|
2627 | 0 | if (trigdesc == NULL) |
2628 | 0 | return; |
2629 | 0 | if (!trigdesc->trig_delete_before_statement) |
2630 | 0 | return; |
2631 | | |
2632 | | /* no-op if we already fired BS triggers in this context */ |
2633 | 0 | if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), |
2634 | 0 | CMD_DELETE)) |
2635 | 0 | return; |
2636 | | |
2637 | 0 | LocTriggerData.type = T_TriggerData; |
2638 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_DELETE | |
2639 | 0 | TRIGGER_EVENT_BEFORE; |
2640 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2641 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2642 | 0 | { |
2643 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2644 | 0 | HeapTuple newtuple; |
2645 | |
|
2646 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2647 | 0 | TRIGGER_TYPE_STATEMENT, |
2648 | 0 | TRIGGER_TYPE_BEFORE, |
2649 | 0 | TRIGGER_TYPE_DELETE)) |
2650 | 0 | continue; |
2651 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2652 | 0 | NULL, NULL, NULL)) |
2653 | 0 | continue; |
2654 | | |
2655 | 0 | LocTriggerData.tg_trigger = trigger; |
2656 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2657 | 0 | i, |
2658 | 0 | relinfo->ri_TrigFunctions, |
2659 | 0 | relinfo->ri_TrigInstrument, |
2660 | 0 | GetPerTupleMemoryContext(estate)); |
2661 | |
|
2662 | 0 | if (newtuple) |
2663 | 0 | ereport(ERROR, |
2664 | 0 | (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), |
2665 | 0 | errmsg("BEFORE STATEMENT trigger cannot return a value"))); |
2666 | 0 | } |
2667 | 0 | } |
2668 | | |
2669 | | void |
2670 | | ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo, |
2671 | | TransitionCaptureState *transition_capture) |
2672 | 0 | { |
2673 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2674 | |
|
2675 | 0 | if (trigdesc && trigdesc->trig_delete_after_statement) |
2676 | 0 | AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, |
2677 | 0 | TRIGGER_EVENT_DELETE, |
2678 | 0 | false, NULL, NULL, NIL, NULL, transition_capture, |
2679 | 0 | false); |
2680 | 0 | } |
2681 | | |
2682 | | /* |
2683 | | * Execute BEFORE ROW DELETE triggers. |
2684 | | * |
2685 | | * True indicates caller can proceed with the delete. False indicates caller |
2686 | | * need to suppress the delete and additionally if requested, we need to pass |
2687 | | * back the concurrently updated tuple if any. |
2688 | | */ |
2689 | | bool |
2690 | | ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, |
2691 | | ResultRelInfo *relinfo, |
2692 | | ItemPointer tupleid, |
2693 | | HeapTuple fdw_trigtuple, |
2694 | | TupleTableSlot **epqslot, |
2695 | | TM_Result *tmresult, |
2696 | | TM_FailureData *tmfd) |
2697 | 0 | { |
2698 | 0 | TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo); |
2699 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2700 | 0 | bool result = true; |
2701 | 0 | TriggerData LocTriggerData = {0}; |
2702 | 0 | HeapTuple trigtuple; |
2703 | 0 | bool should_free = false; |
2704 | 0 | int i; |
2705 | |
|
2706 | 0 | Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid)); |
2707 | 0 | if (fdw_trigtuple == NULL) |
2708 | 0 | { |
2709 | 0 | TupleTableSlot *epqslot_candidate = NULL; |
2710 | |
|
2711 | 0 | if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid, |
2712 | 0 | LockTupleExclusive, slot, &epqslot_candidate, |
2713 | 0 | tmresult, tmfd)) |
2714 | 0 | return false; |
2715 | | |
2716 | | /* |
2717 | | * If the tuple was concurrently updated and the caller of this |
2718 | | * function requested for the updated tuple, skip the trigger |
2719 | | * execution. |
2720 | | */ |
2721 | 0 | if (epqslot_candidate != NULL && epqslot != NULL) |
2722 | 0 | { |
2723 | 0 | *epqslot = epqslot_candidate; |
2724 | 0 | return false; |
2725 | 0 | } |
2726 | | |
2727 | 0 | trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free); |
2728 | 0 | } |
2729 | 0 | else |
2730 | 0 | { |
2731 | 0 | trigtuple = fdw_trigtuple; |
2732 | 0 | ExecForceStoreHeapTuple(trigtuple, slot, false); |
2733 | 0 | } |
2734 | | |
2735 | 0 | LocTriggerData.type = T_TriggerData; |
2736 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_DELETE | |
2737 | 0 | TRIGGER_EVENT_ROW | |
2738 | 0 | TRIGGER_EVENT_BEFORE; |
2739 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2740 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2741 | 0 | { |
2742 | 0 | HeapTuple newtuple; |
2743 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2744 | |
|
2745 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2746 | 0 | TRIGGER_TYPE_ROW, |
2747 | 0 | TRIGGER_TYPE_BEFORE, |
2748 | 0 | TRIGGER_TYPE_DELETE)) |
2749 | 0 | continue; |
2750 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2751 | 0 | NULL, slot, NULL)) |
2752 | 0 | continue; |
2753 | | |
2754 | 0 | LocTriggerData.tg_trigslot = slot; |
2755 | 0 | LocTriggerData.tg_trigtuple = trigtuple; |
2756 | 0 | LocTriggerData.tg_trigger = trigger; |
2757 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2758 | 0 | i, |
2759 | 0 | relinfo->ri_TrigFunctions, |
2760 | 0 | relinfo->ri_TrigInstrument, |
2761 | 0 | GetPerTupleMemoryContext(estate)); |
2762 | 0 | if (newtuple == NULL) |
2763 | 0 | { |
2764 | 0 | result = false; /* tell caller to suppress delete */ |
2765 | 0 | break; |
2766 | 0 | } |
2767 | 0 | if (newtuple != trigtuple) |
2768 | 0 | heap_freetuple(newtuple); |
2769 | 0 | } |
2770 | 0 | if (should_free) |
2771 | 0 | heap_freetuple(trigtuple); |
2772 | |
|
2773 | 0 | return result; |
2774 | 0 | } |
2775 | | |
2776 | | /* |
2777 | | * Note: is_crosspart_update must be true if the DELETE is being performed |
2778 | | * as part of a cross-partition update. |
2779 | | */ |
2780 | | void |
2781 | | ExecARDeleteTriggers(EState *estate, |
2782 | | ResultRelInfo *relinfo, |
2783 | | ItemPointer tupleid, |
2784 | | HeapTuple fdw_trigtuple, |
2785 | | TransitionCaptureState *transition_capture, |
2786 | | bool is_crosspart_update) |
2787 | 0 | { |
2788 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2789 | |
|
2790 | 0 | if ((trigdesc && trigdesc->trig_delete_after_row) || |
2791 | 0 | (transition_capture && transition_capture->tcs_delete_old_table)) |
2792 | 0 | { |
2793 | 0 | TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo); |
2794 | |
|
2795 | 0 | Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid)); |
2796 | 0 | if (fdw_trigtuple == NULL) |
2797 | 0 | GetTupleForTrigger(estate, |
2798 | 0 | NULL, |
2799 | 0 | relinfo, |
2800 | 0 | tupleid, |
2801 | 0 | LockTupleExclusive, |
2802 | 0 | slot, |
2803 | 0 | NULL, |
2804 | 0 | NULL, |
2805 | 0 | NULL); |
2806 | 0 | else |
2807 | 0 | ExecForceStoreHeapTuple(fdw_trigtuple, slot, false); |
2808 | |
|
2809 | 0 | AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, |
2810 | 0 | TRIGGER_EVENT_DELETE, |
2811 | 0 | true, slot, NULL, NIL, NULL, |
2812 | 0 | transition_capture, |
2813 | 0 | is_crosspart_update); |
2814 | 0 | } |
2815 | 0 | } |
2816 | | |
2817 | | bool |
2818 | | ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, |
2819 | | HeapTuple trigtuple) |
2820 | 0 | { |
2821 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2822 | 0 | TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo); |
2823 | 0 | TriggerData LocTriggerData = {0}; |
2824 | 0 | int i; |
2825 | |
|
2826 | 0 | LocTriggerData.type = T_TriggerData; |
2827 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_DELETE | |
2828 | 0 | TRIGGER_EVENT_ROW | |
2829 | 0 | TRIGGER_EVENT_INSTEAD; |
2830 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2831 | |
|
2832 | 0 | ExecForceStoreHeapTuple(trigtuple, slot, false); |
2833 | |
|
2834 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2835 | 0 | { |
2836 | 0 | HeapTuple rettuple; |
2837 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2838 | |
|
2839 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2840 | 0 | TRIGGER_TYPE_ROW, |
2841 | 0 | TRIGGER_TYPE_INSTEAD, |
2842 | 0 | TRIGGER_TYPE_DELETE)) |
2843 | 0 | continue; |
2844 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2845 | 0 | NULL, slot, NULL)) |
2846 | 0 | continue; |
2847 | | |
2848 | 0 | LocTriggerData.tg_trigslot = slot; |
2849 | 0 | LocTriggerData.tg_trigtuple = trigtuple; |
2850 | 0 | LocTriggerData.tg_trigger = trigger; |
2851 | 0 | rettuple = ExecCallTriggerFunc(&LocTriggerData, |
2852 | 0 | i, |
2853 | 0 | relinfo->ri_TrigFunctions, |
2854 | 0 | relinfo->ri_TrigInstrument, |
2855 | 0 | GetPerTupleMemoryContext(estate)); |
2856 | 0 | if (rettuple == NULL) |
2857 | 0 | return false; /* Delete was suppressed */ |
2858 | 0 | if (rettuple != trigtuple) |
2859 | 0 | heap_freetuple(rettuple); |
2860 | 0 | } |
2861 | 0 | return true; |
2862 | 0 | } |
2863 | | |
2864 | | void |
2865 | | ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo) |
2866 | 0 | { |
2867 | 0 | TriggerDesc *trigdesc; |
2868 | 0 | int i; |
2869 | 0 | TriggerData LocTriggerData = {0}; |
2870 | 0 | Bitmapset *updatedCols; |
2871 | |
|
2872 | 0 | trigdesc = relinfo->ri_TrigDesc; |
2873 | |
|
2874 | 0 | if (trigdesc == NULL) |
2875 | 0 | return; |
2876 | 0 | if (!trigdesc->trig_update_before_statement) |
2877 | 0 | return; |
2878 | | |
2879 | | /* no-op if we already fired BS triggers in this context */ |
2880 | 0 | if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), |
2881 | 0 | CMD_UPDATE)) |
2882 | 0 | return; |
2883 | | |
2884 | | /* statement-level triggers operate on the parent table */ |
2885 | 0 | Assert(relinfo->ri_RootResultRelInfo == NULL); |
2886 | |
|
2887 | 0 | updatedCols = ExecGetAllUpdatedCols(relinfo, estate); |
2888 | |
|
2889 | 0 | LocTriggerData.type = T_TriggerData; |
2890 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE | |
2891 | 0 | TRIGGER_EVENT_BEFORE; |
2892 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
2893 | 0 | LocTriggerData.tg_updatedcols = updatedCols; |
2894 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
2895 | 0 | { |
2896 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
2897 | 0 | HeapTuple newtuple; |
2898 | |
|
2899 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
2900 | 0 | TRIGGER_TYPE_STATEMENT, |
2901 | 0 | TRIGGER_TYPE_BEFORE, |
2902 | 0 | TRIGGER_TYPE_UPDATE)) |
2903 | 0 | continue; |
2904 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
2905 | 0 | updatedCols, NULL, NULL)) |
2906 | 0 | continue; |
2907 | | |
2908 | 0 | LocTriggerData.tg_trigger = trigger; |
2909 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
2910 | 0 | i, |
2911 | 0 | relinfo->ri_TrigFunctions, |
2912 | 0 | relinfo->ri_TrigInstrument, |
2913 | 0 | GetPerTupleMemoryContext(estate)); |
2914 | |
|
2915 | 0 | if (newtuple) |
2916 | 0 | ereport(ERROR, |
2917 | 0 | (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), |
2918 | 0 | errmsg("BEFORE STATEMENT trigger cannot return a value"))); |
2919 | 0 | } |
2920 | 0 | } |
2921 | | |
2922 | | void |
2923 | | ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo, |
2924 | | TransitionCaptureState *transition_capture) |
2925 | 0 | { |
2926 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2927 | | |
2928 | | /* statement-level triggers operate on the parent table */ |
2929 | 0 | Assert(relinfo->ri_RootResultRelInfo == NULL); |
2930 | |
|
2931 | 0 | if (trigdesc && trigdesc->trig_update_after_statement) |
2932 | 0 | AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, |
2933 | 0 | TRIGGER_EVENT_UPDATE, |
2934 | 0 | false, NULL, NULL, NIL, |
2935 | 0 | ExecGetAllUpdatedCols(relinfo, estate), |
2936 | 0 | transition_capture, |
2937 | 0 | false); |
2938 | 0 | } |
2939 | | |
2940 | | bool |
2941 | | ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, |
2942 | | ResultRelInfo *relinfo, |
2943 | | ItemPointer tupleid, |
2944 | | HeapTuple fdw_trigtuple, |
2945 | | TupleTableSlot *newslot, |
2946 | | TM_Result *tmresult, |
2947 | | TM_FailureData *tmfd) |
2948 | 0 | { |
2949 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
2950 | 0 | TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo); |
2951 | 0 | HeapTuple newtuple = NULL; |
2952 | 0 | HeapTuple trigtuple; |
2953 | 0 | bool should_free_trig = false; |
2954 | 0 | bool should_free_new = false; |
2955 | 0 | TriggerData LocTriggerData = {0}; |
2956 | 0 | int i; |
2957 | 0 | Bitmapset *updatedCols; |
2958 | 0 | LockTupleMode lockmode; |
2959 | | |
2960 | | /* Determine lock mode to use */ |
2961 | 0 | lockmode = ExecUpdateLockMode(estate, relinfo); |
2962 | |
|
2963 | 0 | Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid)); |
2964 | 0 | if (fdw_trigtuple == NULL) |
2965 | 0 | { |
2966 | 0 | TupleTableSlot *epqslot_candidate = NULL; |
2967 | | |
2968 | | /* get a copy of the on-disk tuple we are planning to update */ |
2969 | 0 | if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid, |
2970 | 0 | lockmode, oldslot, &epqslot_candidate, |
2971 | 0 | tmresult, tmfd)) |
2972 | 0 | return false; /* cancel the update action */ |
2973 | | |
2974 | | /* |
2975 | | * In READ COMMITTED isolation level it's possible that target tuple |
2976 | | * was changed due to concurrent update. In that case we have a raw |
2977 | | * subplan output tuple in epqslot_candidate, and need to form a new |
2978 | | * insertable tuple using ExecGetUpdateNewTuple to replace the one we |
2979 | | * received in newslot. Neither we nor our callers have any further |
2980 | | * interest in the passed-in tuple, so it's okay to overwrite newslot |
2981 | | * with the newer data. |
2982 | | */ |
2983 | 0 | if (epqslot_candidate != NULL) |
2984 | 0 | { |
2985 | 0 | TupleTableSlot *epqslot_clean; |
2986 | |
|
2987 | 0 | epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate, |
2988 | 0 | oldslot); |
2989 | | |
2990 | | /* |
2991 | | * Typically, the caller's newslot was also generated by |
2992 | | * ExecGetUpdateNewTuple, so that epqslot_clean will be the same |
2993 | | * slot and copying is not needed. But do the right thing if it |
2994 | | * isn't. |
2995 | | */ |
2996 | 0 | if (unlikely(newslot != epqslot_clean)) |
2997 | 0 | ExecCopySlot(newslot, epqslot_clean); |
2998 | | |
2999 | | /* |
3000 | | * At this point newslot contains a virtual tuple that may |
3001 | | * reference some fields of oldslot's tuple in some disk buffer. |
3002 | | * If that tuple is in a different page than the original target |
3003 | | * tuple, then our only pin on that buffer is oldslot's, and we're |
3004 | | * about to release it. Hence we'd better materialize newslot to |
3005 | | * ensure it doesn't contain references into an unpinned buffer. |
3006 | | * (We'd materialize it below anyway, but too late for safety.) |
3007 | | */ |
3008 | 0 | ExecMaterializeSlot(newslot); |
3009 | 0 | } |
3010 | | |
3011 | | /* |
3012 | | * Here we convert oldslot to a materialized slot holding trigtuple. |
3013 | | * Neither slot passed to the triggers will hold any buffer pin. |
3014 | | */ |
3015 | 0 | trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig); |
3016 | 0 | } |
3017 | 0 | else |
3018 | 0 | { |
3019 | | /* Put the FDW-supplied tuple into oldslot to unify the cases */ |
3020 | 0 | ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false); |
3021 | 0 | trigtuple = fdw_trigtuple; |
3022 | 0 | } |
3023 | | |
3024 | 0 | LocTriggerData.type = T_TriggerData; |
3025 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE | |
3026 | 0 | TRIGGER_EVENT_ROW | |
3027 | 0 | TRIGGER_EVENT_BEFORE; |
3028 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
3029 | 0 | updatedCols = ExecGetAllUpdatedCols(relinfo, estate); |
3030 | 0 | LocTriggerData.tg_updatedcols = updatedCols; |
3031 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
3032 | 0 | { |
3033 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
3034 | 0 | HeapTuple oldtuple; |
3035 | |
|
3036 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
3037 | 0 | TRIGGER_TYPE_ROW, |
3038 | 0 | TRIGGER_TYPE_BEFORE, |
3039 | 0 | TRIGGER_TYPE_UPDATE)) |
3040 | 0 | continue; |
3041 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
3042 | 0 | updatedCols, oldslot, newslot)) |
3043 | 0 | continue; |
3044 | | |
3045 | 0 | if (!newtuple) |
3046 | 0 | newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new); |
3047 | |
|
3048 | 0 | LocTriggerData.tg_trigslot = oldslot; |
3049 | 0 | LocTriggerData.tg_trigtuple = trigtuple; |
3050 | 0 | LocTriggerData.tg_newtuple = oldtuple = newtuple; |
3051 | 0 | LocTriggerData.tg_newslot = newslot; |
3052 | 0 | LocTriggerData.tg_trigger = trigger; |
3053 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
3054 | 0 | i, |
3055 | 0 | relinfo->ri_TrigFunctions, |
3056 | 0 | relinfo->ri_TrigInstrument, |
3057 | 0 | GetPerTupleMemoryContext(estate)); |
3058 | |
|
3059 | 0 | if (newtuple == NULL) |
3060 | 0 | { |
3061 | 0 | if (should_free_trig) |
3062 | 0 | heap_freetuple(trigtuple); |
3063 | 0 | if (should_free_new) |
3064 | 0 | heap_freetuple(oldtuple); |
3065 | 0 | return false; /* "do nothing" */ |
3066 | 0 | } |
3067 | 0 | else if (newtuple != oldtuple) |
3068 | 0 | { |
3069 | 0 | newtuple = check_modified_virtual_generated(RelationGetDescr(relinfo->ri_RelationDesc), newtuple); |
3070 | |
|
3071 | 0 | ExecForceStoreHeapTuple(newtuple, newslot, false); |
3072 | | |
3073 | | /* |
3074 | | * If the tuple returned by the trigger / being stored, is the old |
3075 | | * row version, and the heap tuple passed to the trigger was |
3076 | | * allocated locally, materialize the slot. Otherwise we might |
3077 | | * free it while still referenced by the slot. |
3078 | | */ |
3079 | 0 | if (should_free_trig && newtuple == trigtuple) |
3080 | 0 | ExecMaterializeSlot(newslot); |
3081 | |
|
3082 | 0 | if (should_free_new) |
3083 | 0 | heap_freetuple(oldtuple); |
3084 | | |
3085 | | /* signal tuple should be re-fetched if used */ |
3086 | 0 | newtuple = NULL; |
3087 | 0 | } |
3088 | 0 | } |
3089 | 0 | if (should_free_trig) |
3090 | 0 | heap_freetuple(trigtuple); |
3091 | |
|
3092 | 0 | return true; |
3093 | 0 | } |
3094 | | |
3095 | | /* |
3096 | | * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source |
3097 | | * and destination partitions, respectively, of a cross-partition update of |
3098 | | * the root partitioned table mentioned in the query, given by 'relinfo'. |
3099 | | * 'tupleid' in that case refers to the ctid of the "old" tuple in the source |
3100 | | * partition, and 'newslot' contains the "new" tuple in the destination |
3101 | | * partition. This interface allows to support the requirements of |
3102 | | * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in |
3103 | | * that case. |
3104 | | */ |
3105 | | void |
3106 | | ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, |
3107 | | ResultRelInfo *src_partinfo, |
3108 | | ResultRelInfo *dst_partinfo, |
3109 | | ItemPointer tupleid, |
3110 | | HeapTuple fdw_trigtuple, |
3111 | | TupleTableSlot *newslot, |
3112 | | List *recheckIndexes, |
3113 | | TransitionCaptureState *transition_capture, |
3114 | | bool is_crosspart_update) |
3115 | 0 | { |
3116 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
3117 | |
|
3118 | 0 | if ((trigdesc && trigdesc->trig_update_after_row) || |
3119 | 0 | (transition_capture && |
3120 | 0 | (transition_capture->tcs_update_old_table || |
3121 | 0 | transition_capture->tcs_update_new_table))) |
3122 | 0 | { |
3123 | | /* |
3124 | | * Note: if the UPDATE is converted into a DELETE+INSERT as part of |
3125 | | * update-partition-key operation, then this function is also called |
3126 | | * separately for DELETE and INSERT to capture transition table rows. |
3127 | | * In such case, either old tuple or new tuple can be NULL. |
3128 | | */ |
3129 | 0 | TupleTableSlot *oldslot; |
3130 | 0 | ResultRelInfo *tupsrc; |
3131 | |
|
3132 | 0 | Assert((src_partinfo != NULL && dst_partinfo != NULL) || |
3133 | 0 | !is_crosspart_update); |
3134 | |
|
3135 | 0 | tupsrc = src_partinfo ? src_partinfo : relinfo; |
3136 | 0 | oldslot = ExecGetTriggerOldSlot(estate, tupsrc); |
3137 | |
|
3138 | 0 | if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid)) |
3139 | 0 | GetTupleForTrigger(estate, |
3140 | 0 | NULL, |
3141 | 0 | tupsrc, |
3142 | 0 | tupleid, |
3143 | 0 | LockTupleExclusive, |
3144 | 0 | oldslot, |
3145 | 0 | NULL, |
3146 | 0 | NULL, |
3147 | 0 | NULL); |
3148 | 0 | else if (fdw_trigtuple != NULL) |
3149 | 0 | ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false); |
3150 | 0 | else |
3151 | 0 | ExecClearTuple(oldslot); |
3152 | |
|
3153 | 0 | AfterTriggerSaveEvent(estate, relinfo, |
3154 | 0 | src_partinfo, dst_partinfo, |
3155 | 0 | TRIGGER_EVENT_UPDATE, |
3156 | 0 | true, |
3157 | 0 | oldslot, newslot, recheckIndexes, |
3158 | 0 | ExecGetAllUpdatedCols(relinfo, estate), |
3159 | 0 | transition_capture, |
3160 | 0 | is_crosspart_update); |
3161 | 0 | } |
3162 | 0 | } |
3163 | | |
3164 | | bool |
3165 | | ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, |
3166 | | HeapTuple trigtuple, TupleTableSlot *newslot) |
3167 | 0 | { |
3168 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
3169 | 0 | TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo); |
3170 | 0 | HeapTuple newtuple = NULL; |
3171 | 0 | bool should_free; |
3172 | 0 | TriggerData LocTriggerData = {0}; |
3173 | 0 | int i; |
3174 | |
|
3175 | 0 | LocTriggerData.type = T_TriggerData; |
3176 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE | |
3177 | 0 | TRIGGER_EVENT_ROW | |
3178 | 0 | TRIGGER_EVENT_INSTEAD; |
3179 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
3180 | |
|
3181 | 0 | ExecForceStoreHeapTuple(trigtuple, oldslot, false); |
3182 | |
|
3183 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
3184 | 0 | { |
3185 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
3186 | 0 | HeapTuple oldtuple; |
3187 | |
|
3188 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
3189 | 0 | TRIGGER_TYPE_ROW, |
3190 | 0 | TRIGGER_TYPE_INSTEAD, |
3191 | 0 | TRIGGER_TYPE_UPDATE)) |
3192 | 0 | continue; |
3193 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
3194 | 0 | NULL, oldslot, newslot)) |
3195 | 0 | continue; |
3196 | | |
3197 | 0 | if (!newtuple) |
3198 | 0 | newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free); |
3199 | |
|
3200 | 0 | LocTriggerData.tg_trigslot = oldslot; |
3201 | 0 | LocTriggerData.tg_trigtuple = trigtuple; |
3202 | 0 | LocTriggerData.tg_newslot = newslot; |
3203 | 0 | LocTriggerData.tg_newtuple = oldtuple = newtuple; |
3204 | |
|
3205 | 0 | LocTriggerData.tg_trigger = trigger; |
3206 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
3207 | 0 | i, |
3208 | 0 | relinfo->ri_TrigFunctions, |
3209 | 0 | relinfo->ri_TrigInstrument, |
3210 | 0 | GetPerTupleMemoryContext(estate)); |
3211 | 0 | if (newtuple == NULL) |
3212 | 0 | { |
3213 | 0 | return false; /* "do nothing" */ |
3214 | 0 | } |
3215 | 0 | else if (newtuple != oldtuple) |
3216 | 0 | { |
3217 | 0 | ExecForceStoreHeapTuple(newtuple, newslot, false); |
3218 | |
|
3219 | 0 | if (should_free) |
3220 | 0 | heap_freetuple(oldtuple); |
3221 | | |
3222 | | /* signal tuple should be re-fetched if used */ |
3223 | 0 | newtuple = NULL; |
3224 | 0 | } |
3225 | 0 | } |
3226 | | |
3227 | 0 | return true; |
3228 | 0 | } |
3229 | | |
3230 | | void |
3231 | | ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo) |
3232 | 0 | { |
3233 | 0 | TriggerDesc *trigdesc; |
3234 | 0 | int i; |
3235 | 0 | TriggerData LocTriggerData = {0}; |
3236 | |
|
3237 | 0 | trigdesc = relinfo->ri_TrigDesc; |
3238 | |
|
3239 | 0 | if (trigdesc == NULL) |
3240 | 0 | return; |
3241 | 0 | if (!trigdesc->trig_truncate_before_statement) |
3242 | 0 | return; |
3243 | | |
3244 | 0 | LocTriggerData.type = T_TriggerData; |
3245 | 0 | LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE | |
3246 | 0 | TRIGGER_EVENT_BEFORE; |
3247 | 0 | LocTriggerData.tg_relation = relinfo->ri_RelationDesc; |
3248 | |
|
3249 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
3250 | 0 | { |
3251 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
3252 | 0 | HeapTuple newtuple; |
3253 | |
|
3254 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
3255 | 0 | TRIGGER_TYPE_STATEMENT, |
3256 | 0 | TRIGGER_TYPE_BEFORE, |
3257 | 0 | TRIGGER_TYPE_TRUNCATE)) |
3258 | 0 | continue; |
3259 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event, |
3260 | 0 | NULL, NULL, NULL)) |
3261 | 0 | continue; |
3262 | | |
3263 | 0 | LocTriggerData.tg_trigger = trigger; |
3264 | 0 | newtuple = ExecCallTriggerFunc(&LocTriggerData, |
3265 | 0 | i, |
3266 | 0 | relinfo->ri_TrigFunctions, |
3267 | 0 | relinfo->ri_TrigInstrument, |
3268 | 0 | GetPerTupleMemoryContext(estate)); |
3269 | |
|
3270 | 0 | if (newtuple) |
3271 | 0 | ereport(ERROR, |
3272 | 0 | (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), |
3273 | 0 | errmsg("BEFORE STATEMENT trigger cannot return a value"))); |
3274 | 0 | } |
3275 | 0 | } |
3276 | | |
3277 | | void |
3278 | | ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo) |
3279 | 0 | { |
3280 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
3281 | |
|
3282 | 0 | if (trigdesc && trigdesc->trig_truncate_after_statement) |
3283 | 0 | AfterTriggerSaveEvent(estate, relinfo, |
3284 | 0 | NULL, NULL, |
3285 | 0 | TRIGGER_EVENT_TRUNCATE, |
3286 | 0 | false, NULL, NULL, NIL, NULL, NULL, |
3287 | 0 | false); |
3288 | 0 | } |
3289 | | |
3290 | | |
3291 | | /* |
3292 | | * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary |
3293 | | */ |
3294 | | static bool |
3295 | | GetTupleForTrigger(EState *estate, |
3296 | | EPQState *epqstate, |
3297 | | ResultRelInfo *relinfo, |
3298 | | ItemPointer tid, |
3299 | | LockTupleMode lockmode, |
3300 | | TupleTableSlot *oldslot, |
3301 | | TupleTableSlot **epqslot, |
3302 | | TM_Result *tmresultp, |
3303 | | TM_FailureData *tmfdp) |
3304 | 0 | { |
3305 | 0 | Relation relation = relinfo->ri_RelationDesc; |
3306 | |
|
3307 | 0 | if (epqslot != NULL) |
3308 | 0 | { |
3309 | 0 | TM_Result test; |
3310 | 0 | TM_FailureData tmfd; |
3311 | 0 | int lockflags = 0; |
3312 | |
|
3313 | 0 | *epqslot = NULL; |
3314 | | |
3315 | | /* caller must pass an epqstate if EvalPlanQual is possible */ |
3316 | 0 | Assert(epqstate != NULL); |
3317 | | |
3318 | | /* |
3319 | | * lock tuple for update |
3320 | | */ |
3321 | 0 | if (!IsolationUsesXactSnapshot()) |
3322 | 0 | lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; |
3323 | 0 | test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot, |
3324 | 0 | estate->es_output_cid, |
3325 | 0 | lockmode, LockWaitBlock, |
3326 | 0 | lockflags, |
3327 | 0 | &tmfd); |
3328 | | |
3329 | | /* Let the caller know about the status of this operation */ |
3330 | 0 | if (tmresultp) |
3331 | 0 | *tmresultp = test; |
3332 | 0 | if (tmfdp) |
3333 | 0 | *tmfdp = tmfd; |
3334 | |
|
3335 | 0 | switch (test) |
3336 | 0 | { |
3337 | 0 | case TM_SelfModified: |
3338 | | |
3339 | | /* |
3340 | | * The target tuple was already updated or deleted by the |
3341 | | * current command, or by a later command in the current |
3342 | | * transaction. We ignore the tuple in the former case, and |
3343 | | * throw error in the latter case, for the same reasons |
3344 | | * enumerated in ExecUpdate and ExecDelete in |
3345 | | * nodeModifyTable.c. |
3346 | | */ |
3347 | 0 | if (tmfd.cmax != estate->es_output_cid) |
3348 | 0 | ereport(ERROR, |
3349 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
3350 | 0 | errmsg("tuple to be updated was already modified by an operation triggered by the current command"), |
3351 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
3352 | | |
3353 | | /* treat it as deleted; do not process */ |
3354 | 0 | return false; |
3355 | | |
3356 | 0 | case TM_Ok: |
3357 | 0 | if (tmfd.traversed) |
3358 | 0 | { |
3359 | | /* |
3360 | | * Recheck the tuple using EPQ. For MERGE, we leave this |
3361 | | * to the caller (it must do additional rechecking, and |
3362 | | * might end up executing a different action entirely). |
3363 | | */ |
3364 | 0 | if (estate->es_plannedstmt->commandType == CMD_MERGE) |
3365 | 0 | { |
3366 | 0 | if (tmresultp) |
3367 | 0 | *tmresultp = TM_Updated; |
3368 | 0 | return false; |
3369 | 0 | } |
3370 | | |
3371 | 0 | *epqslot = EvalPlanQual(epqstate, |
3372 | 0 | relation, |
3373 | 0 | relinfo->ri_RangeTableIndex, |
3374 | 0 | oldslot); |
3375 | | |
3376 | | /* |
3377 | | * If PlanQual failed for updated tuple - we must not |
3378 | | * process this tuple! |
3379 | | */ |
3380 | 0 | if (TupIsNull(*epqslot)) |
3381 | 0 | { |
3382 | 0 | *epqslot = NULL; |
3383 | 0 | return false; |
3384 | 0 | } |
3385 | 0 | } |
3386 | 0 | break; |
3387 | | |
3388 | 0 | case TM_Updated: |
3389 | 0 | if (IsolationUsesXactSnapshot()) |
3390 | 0 | ereport(ERROR, |
3391 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
3392 | 0 | errmsg("could not serialize access due to concurrent update"))); |
3393 | 0 | elog(ERROR, "unexpected table_tuple_lock status: %u", test); |
3394 | 0 | break; |
3395 | | |
3396 | 0 | case TM_Deleted: |
3397 | 0 | if (IsolationUsesXactSnapshot()) |
3398 | 0 | ereport(ERROR, |
3399 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
3400 | 0 | errmsg("could not serialize access due to concurrent delete"))); |
3401 | | /* tuple was deleted */ |
3402 | 0 | return false; |
3403 | | |
3404 | 0 | case TM_Invisible: |
3405 | 0 | elog(ERROR, "attempted to lock invisible tuple"); |
3406 | 0 | break; |
3407 | | |
3408 | 0 | default: |
3409 | 0 | elog(ERROR, "unrecognized table_tuple_lock status: %u", test); |
3410 | 0 | return false; /* keep compiler quiet */ |
3411 | 0 | } |
3412 | 0 | } |
3413 | 0 | else |
3414 | 0 | { |
3415 | | /* |
3416 | | * We expect the tuple to be present, thus very simple error handling |
3417 | | * suffices. |
3418 | | */ |
3419 | 0 | if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny, |
3420 | 0 | oldslot)) |
3421 | 0 | elog(ERROR, "failed to fetch tuple for trigger"); |
3422 | 0 | } |
3423 | | |
3424 | 0 | return true; |
3425 | 0 | } |
3426 | | |
3427 | | /* |
3428 | | * Is trigger enabled to fire? |
3429 | | */ |
3430 | | static bool |
3431 | | TriggerEnabled(EState *estate, ResultRelInfo *relinfo, |
3432 | | Trigger *trigger, TriggerEvent event, |
3433 | | Bitmapset *modifiedCols, |
3434 | | TupleTableSlot *oldslot, TupleTableSlot *newslot) |
3435 | 0 | { |
3436 | | /* Check replication-role-dependent enable state */ |
3437 | 0 | if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA) |
3438 | 0 | { |
3439 | 0 | if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN || |
3440 | 0 | trigger->tgenabled == TRIGGER_DISABLED) |
3441 | 0 | return false; |
3442 | 0 | } |
3443 | 0 | else /* ORIGIN or LOCAL role */ |
3444 | 0 | { |
3445 | 0 | if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA || |
3446 | 0 | trigger->tgenabled == TRIGGER_DISABLED) |
3447 | 0 | return false; |
3448 | 0 | } |
3449 | | |
3450 | | /* |
3451 | | * Check for column-specific trigger (only possible for UPDATE, and in |
3452 | | * fact we *must* ignore tgattr for other event types) |
3453 | | */ |
3454 | 0 | if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event)) |
3455 | 0 | { |
3456 | 0 | int i; |
3457 | 0 | bool modified; |
3458 | |
|
3459 | 0 | modified = false; |
3460 | 0 | for (i = 0; i < trigger->tgnattr; i++) |
3461 | 0 | { |
3462 | 0 | if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber, |
3463 | 0 | modifiedCols)) |
3464 | 0 | { |
3465 | 0 | modified = true; |
3466 | 0 | break; |
3467 | 0 | } |
3468 | 0 | } |
3469 | 0 | if (!modified) |
3470 | 0 | return false; |
3471 | 0 | } |
3472 | | |
3473 | | /* Check for WHEN clause */ |
3474 | 0 | if (trigger->tgqual) |
3475 | 0 | { |
3476 | 0 | ExprState **predicate; |
3477 | 0 | ExprContext *econtext; |
3478 | 0 | MemoryContext oldContext; |
3479 | 0 | int i; |
3480 | |
|
3481 | 0 | Assert(estate != NULL); |
3482 | | |
3483 | | /* |
3484 | | * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the |
3485 | | * matching element of relinfo->ri_TrigWhenExprs[] |
3486 | | */ |
3487 | 0 | i = trigger - relinfo->ri_TrigDesc->triggers; |
3488 | 0 | predicate = &relinfo->ri_TrigWhenExprs[i]; |
3489 | | |
3490 | | /* |
3491 | | * If first time through for this WHEN expression, build expression |
3492 | | * nodetrees for it. Keep them in the per-query memory context so |
3493 | | * they'll survive throughout the query. |
3494 | | */ |
3495 | 0 | if (*predicate == NULL) |
3496 | 0 | { |
3497 | 0 | Node *tgqual; |
3498 | |
|
3499 | 0 | oldContext = MemoryContextSwitchTo(estate->es_query_cxt); |
3500 | 0 | tgqual = stringToNode(trigger->tgqual); |
3501 | 0 | tgqual = expand_generated_columns_in_expr(tgqual, relinfo->ri_RelationDesc, PRS2_OLD_VARNO); |
3502 | 0 | tgqual = expand_generated_columns_in_expr(tgqual, relinfo->ri_RelationDesc, PRS2_NEW_VARNO); |
3503 | | /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */ |
3504 | 0 | ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0); |
3505 | 0 | ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0); |
3506 | | /* ExecPrepareQual wants implicit-AND form */ |
3507 | 0 | tgqual = (Node *) make_ands_implicit((Expr *) tgqual); |
3508 | 0 | *predicate = ExecPrepareQual((List *) tgqual, estate); |
3509 | 0 | MemoryContextSwitchTo(oldContext); |
3510 | 0 | } |
3511 | | |
3512 | | /* |
3513 | | * We will use the EState's per-tuple context for evaluating WHEN |
3514 | | * expressions (creating it if it's not already there). |
3515 | | */ |
3516 | 0 | econtext = GetPerTupleExprContext(estate); |
3517 | | |
3518 | | /* |
3519 | | * Finally evaluate the expression, making the old and/or new tuples |
3520 | | * available as INNER_VAR/OUTER_VAR respectively. |
3521 | | */ |
3522 | 0 | econtext->ecxt_innertuple = oldslot; |
3523 | 0 | econtext->ecxt_outertuple = newslot; |
3524 | 0 | if (!ExecQual(*predicate, econtext)) |
3525 | 0 | return false; |
3526 | 0 | } |
3527 | | |
3528 | 0 | return true; |
3529 | 0 | } |
3530 | | |
3531 | | |
3532 | | /* ---------- |
3533 | | * After-trigger stuff |
3534 | | * |
3535 | | * The AfterTriggersData struct holds data about pending AFTER trigger events |
3536 | | * during the current transaction tree. (BEFORE triggers are fired |
3537 | | * immediately so we don't need any persistent state about them.) The struct |
3538 | | * and most of its subsidiary data are kept in TopTransactionContext; however |
3539 | | * some data that can be discarded sooner appears in the CurTransactionContext |
3540 | | * of the relevant subtransaction. Also, the individual event records are |
3541 | | * kept in a separate sub-context of TopTransactionContext. This is done |
3542 | | * mainly so that it's easy to tell from a memory context dump how much space |
3543 | | * is being eaten by trigger events. |
3544 | | * |
3545 | | * Because the list of pending events can grow large, we go to some |
3546 | | * considerable effort to minimize per-event memory consumption. The event |
3547 | | * records are grouped into chunks and common data for similar events in the |
3548 | | * same chunk is only stored once. |
3549 | | * |
3550 | | * XXX We need to be able to save the per-event data in a file if it grows too |
3551 | | * large. |
3552 | | * ---------- |
3553 | | */ |
3554 | | |
3555 | | /* Per-trigger SET CONSTRAINT status */ |
3556 | | typedef struct SetConstraintTriggerData |
3557 | | { |
3558 | | Oid sct_tgoid; |
3559 | | bool sct_tgisdeferred; |
3560 | | } SetConstraintTriggerData; |
3561 | | |
3562 | | typedef struct SetConstraintTriggerData *SetConstraintTrigger; |
3563 | | |
3564 | | /* |
3565 | | * SET CONSTRAINT intra-transaction status. |
3566 | | * |
3567 | | * We make this a single palloc'd object so it can be copied and freed easily. |
3568 | | * |
3569 | | * all_isset and all_isdeferred are used to keep track |
3570 | | * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}. |
3571 | | * |
3572 | | * trigstates[] stores per-trigger tgisdeferred settings. |
3573 | | */ |
3574 | | typedef struct SetConstraintStateData |
3575 | | { |
3576 | | bool all_isset; |
3577 | | bool all_isdeferred; |
3578 | | int numstates; /* number of trigstates[] entries in use */ |
3579 | | int numalloc; /* allocated size of trigstates[] */ |
3580 | | SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER]; |
3581 | | } SetConstraintStateData; |
3582 | | |
3583 | | typedef SetConstraintStateData *SetConstraintState; |
3584 | | |
3585 | | |
3586 | | /* |
3587 | | * Per-trigger-event data |
3588 | | * |
3589 | | * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS |
3590 | | * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions. |
3591 | | * Each event record also has an associated AfterTriggerSharedData that is |
3592 | | * shared across all instances of similar events within a "chunk". |
3593 | | * |
3594 | | * For row-level triggers, we arrange not to waste storage on unneeded ctid |
3595 | | * fields. Updates of regular tables use two; inserts and deletes of regular |
3596 | | * tables use one; foreign tables always use zero and save the tuple(s) to a |
3597 | | * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to |
3598 | | * retrieve a fresh tuple or pair of tuples from that tuplestore, while |
3599 | | * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved |
3600 | | * tuple(s). This permits storing tuples once regardless of the number of |
3601 | | * row-level triggers on a foreign table. |
3602 | | * |
3603 | | * When updates on partitioned tables cause rows to move between partitions, |
3604 | | * the OIDs of both partitions are stored too, so that the tuples can be |
3605 | | * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross- |
3606 | | * partition update"). |
3607 | | * |
3608 | | * Note that we need triggers on foreign tables to be fired in exactly the |
3609 | | * order they were queued, so that the tuples come out of the tuplestore in |
3610 | | * the right order. To ensure that, we forbid deferrable (constraint) |
3611 | | * triggers on foreign tables. This also ensures that such triggers do not |
3612 | | * get deferred into outer trigger query levels, meaning that it's okay to |
3613 | | * destroy the tuplestore at the end of the query level. |
3614 | | * |
3615 | | * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they |
3616 | | * require no ctid field. We lack the flag bit space to neatly represent that |
3617 | | * distinct case, and it seems unlikely to be worth much trouble. |
3618 | | * |
3619 | | * Note: ats_firing_id is initially zero and is set to something else when |
3620 | | * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing |
3621 | | * cycle the trigger will be fired in (or was fired in, if DONE is set). |
3622 | | * Although this is mutable state, we can keep it in AfterTriggerSharedData |
3623 | | * because all instances of the same type of event in a given event list will |
3624 | | * be fired at the same time, if they were queued between the same firing |
3625 | | * cycles. So we need only ensure that ats_firing_id is zero when attaching |
3626 | | * a new event to an existing AfterTriggerSharedData record. |
3627 | | */ |
3628 | | typedef uint32 TriggerFlags; |
3629 | | |
3630 | 0 | #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */ |
3631 | 0 | #define AFTER_TRIGGER_DONE 0x80000000 |
3632 | 0 | #define AFTER_TRIGGER_IN_PROGRESS 0x40000000 |
3633 | | /* bits describing the size and tuple sources of this event */ |
3634 | 0 | #define AFTER_TRIGGER_FDW_REUSE 0x00000000 |
3635 | 0 | #define AFTER_TRIGGER_FDW_FETCH 0x20000000 |
3636 | 0 | #define AFTER_TRIGGER_1CTID 0x10000000 |
3637 | 0 | #define AFTER_TRIGGER_2CTID 0x30000000 |
3638 | 0 | #define AFTER_TRIGGER_CP_UPDATE 0x08000000 |
3639 | 0 | #define AFTER_TRIGGER_TUP_BITS 0x38000000 |
3640 | | typedef struct AfterTriggerSharedData *AfterTriggerShared; |
3641 | | |
3642 | | typedef struct AfterTriggerSharedData |
3643 | | { |
3644 | | TriggerEvent ats_event; /* event type indicator, see trigger.h */ |
3645 | | Oid ats_tgoid; /* the trigger's ID */ |
3646 | | Oid ats_relid; /* the relation it's on */ |
3647 | | Oid ats_rolid; /* role to execute the trigger */ |
3648 | | CommandId ats_firing_id; /* ID for firing cycle */ |
3649 | | struct AfterTriggersTableData *ats_table; /* transition table access */ |
3650 | | Bitmapset *ats_modifiedcols; /* modified columns */ |
3651 | | } AfterTriggerSharedData; |
3652 | | |
3653 | | typedef struct AfterTriggerEventData *AfterTriggerEvent; |
3654 | | |
3655 | | typedef struct AfterTriggerEventData |
3656 | | { |
3657 | | TriggerFlags ate_flags; /* status bits and offset to shared data */ |
3658 | | ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */ |
3659 | | ItemPointerData ate_ctid2; /* new updated tuple */ |
3660 | | |
3661 | | /* |
3662 | | * During a cross-partition update of a partitioned table, we also store |
3663 | | * the OIDs of source and destination partitions that are needed to fetch |
3664 | | * the old (ctid1) and the new tuple (ctid2) from, respectively. |
3665 | | */ |
3666 | | Oid ate_src_part; |
3667 | | Oid ate_dst_part; |
3668 | | } AfterTriggerEventData; |
3669 | | |
3670 | | /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */ |
3671 | | typedef struct AfterTriggerEventDataNoOids |
3672 | | { |
3673 | | TriggerFlags ate_flags; |
3674 | | ItemPointerData ate_ctid1; |
3675 | | ItemPointerData ate_ctid2; |
3676 | | } AfterTriggerEventDataNoOids; |
3677 | | |
3678 | | /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */ |
3679 | | typedef struct AfterTriggerEventDataOneCtid |
3680 | | { |
3681 | | TriggerFlags ate_flags; /* status bits and offset to shared data */ |
3682 | | ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */ |
3683 | | } AfterTriggerEventDataOneCtid; |
3684 | | |
3685 | | /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */ |
3686 | | typedef struct AfterTriggerEventDataZeroCtids |
3687 | | { |
3688 | | TriggerFlags ate_flags; /* status bits and offset to shared data */ |
3689 | | } AfterTriggerEventDataZeroCtids; |
3690 | | |
3691 | | #define SizeofTriggerEvent(evt) \ |
3692 | 0 | (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \ |
3693 | 0 | sizeof(AfterTriggerEventData) : \ |
3694 | 0 | (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \ |
3695 | 0 | sizeof(AfterTriggerEventDataNoOids) : \ |
3696 | 0 | (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \ |
3697 | 0 | sizeof(AfterTriggerEventDataOneCtid) : \ |
3698 | 0 | sizeof(AfterTriggerEventDataZeroCtids)))) |
3699 | | |
3700 | | #define GetTriggerSharedData(evt) \ |
3701 | 0 | ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET))) |
3702 | | |
3703 | | /* |
3704 | | * To avoid palloc overhead, we keep trigger events in arrays in successively- |
3705 | | * larger chunks (a slightly more sophisticated version of an expansible |
3706 | | * array). The space between CHUNK_DATA_START and freeptr is occupied by |
3707 | | * AfterTriggerEventData records; the space between endfree and endptr is |
3708 | | * occupied by AfterTriggerSharedData records. |
3709 | | */ |
3710 | | typedef struct AfterTriggerEventChunk |
3711 | | { |
3712 | | struct AfterTriggerEventChunk *next; /* list link */ |
3713 | | char *freeptr; /* start of free space in chunk */ |
3714 | | char *endfree; /* end of free space in chunk */ |
3715 | | char *endptr; /* end of chunk */ |
3716 | | /* event data follows here */ |
3717 | | } AfterTriggerEventChunk; |
3718 | | |
3719 | 0 | #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk))) |
3720 | | |
3721 | | /* A list of events */ |
3722 | | typedef struct AfterTriggerEventList |
3723 | | { |
3724 | | AfterTriggerEventChunk *head; |
3725 | | AfterTriggerEventChunk *tail; |
3726 | | char *tailfree; /* freeptr of tail chunk */ |
3727 | | } AfterTriggerEventList; |
3728 | | |
3729 | | /* Macros to help in iterating over a list of events */ |
3730 | | #define for_each_chunk(cptr, evtlist) \ |
3731 | 0 | for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next) |
3732 | | #define for_each_event(eptr, cptr) \ |
3733 | 0 | for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \ |
3734 | 0 | (char *) eptr < (cptr)->freeptr; \ |
3735 | 0 | eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr))) |
3736 | | /* Use this if no special per-chunk processing is needed */ |
3737 | | #define for_each_event_chunk(eptr, cptr, evtlist) \ |
3738 | 0 | for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr) |
3739 | | |
3740 | | /* Macros for iterating from a start point that might not be list start */ |
3741 | | #define for_each_chunk_from(cptr) \ |
3742 | 0 | for (; cptr != NULL; cptr = cptr->next) |
3743 | | #define for_each_event_from(eptr, cptr) \ |
3744 | 0 | for (; \ |
3745 | 0 | (char *) eptr < (cptr)->freeptr; \ |
3746 | 0 | eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr))) |
3747 | | |
3748 | | |
3749 | | /* |
3750 | | * All per-transaction data for the AFTER TRIGGERS module. |
3751 | | * |
3752 | | * AfterTriggersData has the following fields: |
3753 | | * |
3754 | | * firing_counter is incremented for each call of afterTriggerInvokeEvents. |
3755 | | * We mark firable events with the current firing cycle's ID so that we can |
3756 | | * tell which ones to work on. This ensures sane behavior if a trigger |
3757 | | * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will |
3758 | | * only fire those events that weren't already scheduled for firing. |
3759 | | * |
3760 | | * state keeps track of the transaction-local effects of SET CONSTRAINTS. |
3761 | | * This is saved and restored across failed subtransactions. |
3762 | | * |
3763 | | * events is the current list of deferred events. This is global across |
3764 | | * all subtransactions of the current transaction. In a subtransaction |
3765 | | * abort, we know that the events added by the subtransaction are at the |
3766 | | * end of the list, so it is relatively easy to discard them. The event |
3767 | | * list chunks themselves are stored in event_cxt. |
3768 | | * |
3769 | | * query_depth is the current depth of nested AfterTriggerBeginQuery calls |
3770 | | * (-1 when the stack is empty). |
3771 | | * |
3772 | | * query_stack[query_depth] is the per-query-level data, including these fields: |
3773 | | * |
3774 | | * events is a list of AFTER trigger events queued by the current query. |
3775 | | * None of these are valid until the matching AfterTriggerEndQuery call |
3776 | | * occurs. At that point we fire immediate-mode triggers, and append any |
3777 | | * deferred events to the main events list. |
3778 | | * |
3779 | | * fdw_tuplestore is a tuplestore containing the foreign-table tuples |
3780 | | * needed by events queued by the current query. (Note: we use just one |
3781 | | * tuplestore even though more than one foreign table might be involved. |
3782 | | * This is okay because tuplestores don't really care what's in the tuples |
3783 | | * they store; but it's possible that someday it'd break.) |
3784 | | * |
3785 | | * tables is a List of AfterTriggersTableData structs for target tables |
3786 | | * of the current query (see below). |
3787 | | * |
3788 | | * maxquerydepth is just the allocated length of query_stack. |
3789 | | * |
3790 | | * trans_stack holds per-subtransaction data, including these fields: |
3791 | | * |
3792 | | * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS |
3793 | | * state data. Each subtransaction level that modifies that state first |
3794 | | * saves a copy, which we use to restore the state if we abort. |
3795 | | * |
3796 | | * events is a copy of the events head/tail pointers, |
3797 | | * which we use to restore those values during subtransaction abort. |
3798 | | * |
3799 | | * query_depth is the subtransaction-start-time value of query_depth, |
3800 | | * which we similarly use to clean up at subtransaction abort. |
3801 | | * |
3802 | | * firing_counter is the subtransaction-start-time value of firing_counter. |
3803 | | * We use this to recognize which deferred triggers were fired (or marked |
3804 | | * for firing) within an aborted subtransaction. |
3805 | | * |
3806 | | * We use GetCurrentTransactionNestLevel() to determine the correct array |
3807 | | * index in trans_stack. maxtransdepth is the number of allocated entries in |
3808 | | * trans_stack. (By not keeping our own stack pointer, we can avoid trouble |
3809 | | * in cases where errors during subxact abort cause multiple invocations |
3810 | | * of AfterTriggerEndSubXact() at the same nesting depth.) |
3811 | | * |
3812 | | * We create an AfterTriggersTableData struct for each target table of the |
3813 | | * current query, and each operation mode (INSERT/UPDATE/DELETE), that has |
3814 | | * either transition tables or statement-level triggers. This is used to |
3815 | | * hold the relevant transition tables, as well as info tracking whether |
3816 | | * we already queued the statement triggers. (We use that info to prevent |
3817 | | * firing the same statement triggers more than once per statement, or really |
3818 | | * once per transition table set.) These structs, along with the transition |
3819 | | * table tuplestores, live in the (sub)transaction's CurTransactionContext. |
3820 | | * That's sufficient lifespan because we don't allow transition tables to be |
3821 | | * used by deferrable triggers, so they only need to survive until |
3822 | | * AfterTriggerEndQuery. |
3823 | | */ |
3824 | | typedef struct AfterTriggersQueryData AfterTriggersQueryData; |
3825 | | typedef struct AfterTriggersTransData AfterTriggersTransData; |
3826 | | typedef struct AfterTriggersTableData AfterTriggersTableData; |
3827 | | |
3828 | | typedef struct AfterTriggersData |
3829 | | { |
3830 | | CommandId firing_counter; /* next firing ID to assign */ |
3831 | | SetConstraintState state; /* the active S C state */ |
3832 | | AfterTriggerEventList events; /* deferred-event list */ |
3833 | | MemoryContext event_cxt; /* memory context for events, if any */ |
3834 | | |
3835 | | /* per-query-level data: */ |
3836 | | AfterTriggersQueryData *query_stack; /* array of structs shown below */ |
3837 | | int query_depth; /* current index in above array */ |
3838 | | int maxquerydepth; /* allocated len of above array */ |
3839 | | |
3840 | | /* per-subtransaction-level data: */ |
3841 | | AfterTriggersTransData *trans_stack; /* array of structs shown below */ |
3842 | | int maxtransdepth; /* allocated len of above array */ |
3843 | | } AfterTriggersData; |
3844 | | |
3845 | | struct AfterTriggersQueryData |
3846 | | { |
3847 | | AfterTriggerEventList events; /* events pending from this query */ |
3848 | | Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */ |
3849 | | List *tables; /* list of AfterTriggersTableData, see below */ |
3850 | | }; |
3851 | | |
3852 | | struct AfterTriggersTransData |
3853 | | { |
3854 | | /* these fields are just for resetting at subtrans abort: */ |
3855 | | SetConstraintState state; /* saved S C state, or NULL if not yet saved */ |
3856 | | AfterTriggerEventList events; /* saved list pointer */ |
3857 | | int query_depth; /* saved query_depth */ |
3858 | | CommandId firing_counter; /* saved firing_counter */ |
3859 | | }; |
3860 | | |
3861 | | struct AfterTriggersTableData |
3862 | | { |
3863 | | /* relid + cmdType form the lookup key for these structs: */ |
3864 | | Oid relid; /* target table's OID */ |
3865 | | CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */ |
3866 | | bool closed; /* true when no longer OK to add tuples */ |
3867 | | bool before_trig_done; /* did we already queue BS triggers? */ |
3868 | | bool after_trig_done; /* did we already queue AS triggers? */ |
3869 | | AfterTriggerEventList after_trig_events; /* if so, saved list pointer */ |
3870 | | |
3871 | | /* |
3872 | | * We maintain separate transition tables for UPDATE/INSERT/DELETE since |
3873 | | * MERGE can run all three actions in a single statement. Note that UPDATE |
3874 | | * needs both old and new transition tables whereas INSERT needs only new, |
3875 | | * and DELETE needs only old. |
3876 | | */ |
3877 | | |
3878 | | /* "old" transition table for UPDATE, if any */ |
3879 | | Tuplestorestate *old_upd_tuplestore; |
3880 | | /* "new" transition table for UPDATE, if any */ |
3881 | | Tuplestorestate *new_upd_tuplestore; |
3882 | | /* "old" transition table for DELETE, if any */ |
3883 | | Tuplestorestate *old_del_tuplestore; |
3884 | | /* "new" transition table for INSERT, if any */ |
3885 | | Tuplestorestate *new_ins_tuplestore; |
3886 | | |
3887 | | TupleTableSlot *storeslot; /* for converting to tuplestore's format */ |
3888 | | }; |
3889 | | |
3890 | | static AfterTriggersData afterTriggers; |
3891 | | |
3892 | | static void AfterTriggerExecute(EState *estate, |
3893 | | AfterTriggerEvent event, |
3894 | | ResultRelInfo *relInfo, |
3895 | | ResultRelInfo *src_relInfo, |
3896 | | ResultRelInfo *dst_relInfo, |
3897 | | TriggerDesc *trigdesc, |
3898 | | FmgrInfo *finfo, |
3899 | | Instrumentation *instr, |
3900 | | MemoryContext per_tuple_context, |
3901 | | TupleTableSlot *trig_tuple_slot1, |
3902 | | TupleTableSlot *trig_tuple_slot2); |
3903 | | static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid, |
3904 | | CmdType cmdType); |
3905 | | static TupleTableSlot *GetAfterTriggersStoreSlot(AfterTriggersTableData *table, |
3906 | | TupleDesc tupdesc); |
3907 | | static Tuplestorestate *GetAfterTriggersTransitionTable(int event, |
3908 | | TupleTableSlot *oldslot, |
3909 | | TupleTableSlot *newslot, |
3910 | | TransitionCaptureState *transition_capture); |
3911 | | static void TransitionTableAddTuple(EState *estate, |
3912 | | TransitionCaptureState *transition_capture, |
3913 | | ResultRelInfo *relinfo, |
3914 | | TupleTableSlot *slot, |
3915 | | TupleTableSlot *original_insert_tuple, |
3916 | | Tuplestorestate *tuplestore); |
3917 | | static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs); |
3918 | | static SetConstraintState SetConstraintStateCreate(int numalloc); |
3919 | | static SetConstraintState SetConstraintStateCopy(SetConstraintState origstate); |
3920 | | static SetConstraintState SetConstraintStateAddItem(SetConstraintState state, |
3921 | | Oid tgoid, bool tgisdeferred); |
3922 | | static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent); |
3923 | | |
3924 | | |
3925 | | /* |
3926 | | * Get the FDW tuplestore for the current trigger query level, creating it |
3927 | | * if necessary. |
3928 | | */ |
3929 | | static Tuplestorestate * |
3930 | | GetCurrentFDWTuplestore(void) |
3931 | 0 | { |
3932 | 0 | Tuplestorestate *ret; |
3933 | |
|
3934 | 0 | ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore; |
3935 | 0 | if (ret == NULL) |
3936 | 0 | { |
3937 | 0 | MemoryContext oldcxt; |
3938 | 0 | ResourceOwner saveResourceOwner; |
3939 | | |
3940 | | /* |
3941 | | * Make the tuplestore valid until end of subtransaction. We really |
3942 | | * only need it until AfterTriggerEndQuery(). |
3943 | | */ |
3944 | 0 | oldcxt = MemoryContextSwitchTo(CurTransactionContext); |
3945 | 0 | saveResourceOwner = CurrentResourceOwner; |
3946 | 0 | CurrentResourceOwner = CurTransactionResourceOwner; |
3947 | |
|
3948 | 0 | ret = tuplestore_begin_heap(false, false, work_mem); |
3949 | |
|
3950 | 0 | CurrentResourceOwner = saveResourceOwner; |
3951 | 0 | MemoryContextSwitchTo(oldcxt); |
3952 | |
|
3953 | 0 | afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret; |
3954 | 0 | } |
3955 | |
|
3956 | 0 | return ret; |
3957 | 0 | } |
3958 | | |
3959 | | /* ---------- |
3960 | | * afterTriggerCheckState() |
3961 | | * |
3962 | | * Returns true if the trigger event is actually in state DEFERRED. |
3963 | | * ---------- |
3964 | | */ |
3965 | | static bool |
3966 | | afterTriggerCheckState(AfterTriggerShared evtshared) |
3967 | 0 | { |
3968 | 0 | Oid tgoid = evtshared->ats_tgoid; |
3969 | 0 | SetConstraintState state = afterTriggers.state; |
3970 | 0 | int i; |
3971 | | |
3972 | | /* |
3973 | | * For not-deferrable triggers (i.e. normal AFTER ROW triggers and |
3974 | | * constraints declared NOT DEFERRABLE), the state is always false. |
3975 | | */ |
3976 | 0 | if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0) |
3977 | 0 | return false; |
3978 | | |
3979 | | /* |
3980 | | * If constraint state exists, SET CONSTRAINTS might have been executed |
3981 | | * either for this trigger or for all triggers. |
3982 | | */ |
3983 | 0 | if (state != NULL) |
3984 | 0 | { |
3985 | | /* Check for SET CONSTRAINTS for this specific trigger. */ |
3986 | 0 | for (i = 0; i < state->numstates; i++) |
3987 | 0 | { |
3988 | 0 | if (state->trigstates[i].sct_tgoid == tgoid) |
3989 | 0 | return state->trigstates[i].sct_tgisdeferred; |
3990 | 0 | } |
3991 | | |
3992 | | /* Check for SET CONSTRAINTS ALL. */ |
3993 | 0 | if (state->all_isset) |
3994 | 0 | return state->all_isdeferred; |
3995 | 0 | } |
3996 | | |
3997 | | /* |
3998 | | * Otherwise return the default state for the trigger. |
3999 | | */ |
4000 | 0 | return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0); |
4001 | 0 | } |
4002 | | |
4003 | | /* ---------- |
4004 | | * afterTriggerCopyBitmap() |
4005 | | * |
4006 | | * Copy bitmap into AfterTriggerEvents memory context, which is where the after |
4007 | | * trigger events are kept. |
4008 | | * ---------- |
4009 | | */ |
4010 | | static Bitmapset * |
4011 | | afterTriggerCopyBitmap(Bitmapset *src) |
4012 | 0 | { |
4013 | 0 | Bitmapset *dst; |
4014 | 0 | MemoryContext oldcxt; |
4015 | |
|
4016 | 0 | if (src == NULL) |
4017 | 0 | return NULL; |
4018 | | |
4019 | 0 | oldcxt = MemoryContextSwitchTo(afterTriggers.event_cxt); |
4020 | |
|
4021 | 0 | dst = bms_copy(src); |
4022 | |
|
4023 | 0 | MemoryContextSwitchTo(oldcxt); |
4024 | |
|
4025 | 0 | return dst; |
4026 | 0 | } |
4027 | | |
4028 | | /* ---------- |
4029 | | * afterTriggerAddEvent() |
4030 | | * |
4031 | | * Add a new trigger event to the specified queue. |
4032 | | * The passed-in event data is copied. |
4033 | | * ---------- |
4034 | | */ |
4035 | | static void |
4036 | | afterTriggerAddEvent(AfterTriggerEventList *events, |
4037 | | AfterTriggerEvent event, AfterTriggerShared evtshared) |
4038 | 0 | { |
4039 | 0 | Size eventsize = SizeofTriggerEvent(event); |
4040 | 0 | Size needed = eventsize + sizeof(AfterTriggerSharedData); |
4041 | 0 | AfterTriggerEventChunk *chunk; |
4042 | 0 | AfterTriggerShared newshared; |
4043 | 0 | AfterTriggerEvent newevent; |
4044 | | |
4045 | | /* |
4046 | | * If empty list or not enough room in the tail chunk, make a new chunk. |
4047 | | * We assume here that a new shared record will always be needed. |
4048 | | */ |
4049 | 0 | chunk = events->tail; |
4050 | 0 | if (chunk == NULL || |
4051 | 0 | chunk->endfree - chunk->freeptr < needed) |
4052 | 0 | { |
4053 | 0 | Size chunksize; |
4054 | | |
4055 | | /* Create event context if we didn't already */ |
4056 | 0 | if (afterTriggers.event_cxt == NULL) |
4057 | 0 | afterTriggers.event_cxt = |
4058 | 0 | AllocSetContextCreate(TopTransactionContext, |
4059 | 0 | "AfterTriggerEvents", |
4060 | 0 | ALLOCSET_DEFAULT_SIZES); |
4061 | | |
4062 | | /* |
4063 | | * Chunk size starts at 1KB and is allowed to increase up to 1MB. |
4064 | | * These numbers are fairly arbitrary, though there is a hard limit at |
4065 | | * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their |
4066 | | * shared records using the available space in ate_flags. Another |
4067 | | * constraint is that if the chunk size gets too huge, the search loop |
4068 | | * below would get slow given a (not too common) usage pattern with |
4069 | | * many distinct event types in a chunk. Therefore, we double the |
4070 | | * preceding chunk size only if there weren't too many shared records |
4071 | | * in the preceding chunk; otherwise we halve it. This gives us some |
4072 | | * ability to adapt to the actual usage pattern of the current query |
4073 | | * while still having large chunk sizes in typical usage. All chunk |
4074 | | * sizes used should be MAXALIGN multiples, to ensure that the shared |
4075 | | * records will be aligned safely. |
4076 | | */ |
4077 | 0 | #define MIN_CHUNK_SIZE 1024 |
4078 | 0 | #define MAX_CHUNK_SIZE (1024*1024) |
4079 | |
|
4080 | | #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1) |
4081 | | #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET |
4082 | | #endif |
4083 | |
|
4084 | 0 | if (chunk == NULL) |
4085 | 0 | chunksize = MIN_CHUNK_SIZE; |
4086 | 0 | else |
4087 | 0 | { |
4088 | | /* preceding chunk size... */ |
4089 | 0 | chunksize = chunk->endptr - (char *) chunk; |
4090 | | /* check number of shared records in preceding chunk */ |
4091 | 0 | if ((chunk->endptr - chunk->endfree) <= |
4092 | 0 | (100 * sizeof(AfterTriggerSharedData))) |
4093 | 0 | chunksize *= 2; /* okay, double it */ |
4094 | 0 | else |
4095 | 0 | chunksize /= 2; /* too many shared records */ |
4096 | 0 | chunksize = Min(chunksize, MAX_CHUNK_SIZE); |
4097 | 0 | } |
4098 | 0 | chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize); |
4099 | 0 | chunk->next = NULL; |
4100 | 0 | chunk->freeptr = CHUNK_DATA_START(chunk); |
4101 | 0 | chunk->endptr = chunk->endfree = (char *) chunk + chunksize; |
4102 | 0 | Assert(chunk->endfree - chunk->freeptr >= needed); |
4103 | |
|
4104 | 0 | if (events->tail == NULL) |
4105 | 0 | { |
4106 | 0 | Assert(events->head == NULL); |
4107 | 0 | events->head = chunk; |
4108 | 0 | } |
4109 | 0 | else |
4110 | 0 | events->tail->next = chunk; |
4111 | 0 | events->tail = chunk; |
4112 | | /* events->tailfree is now out of sync, but we'll fix it below */ |
4113 | 0 | } |
4114 | | |
4115 | | /* |
4116 | | * Try to locate a matching shared-data record already in the chunk. If |
4117 | | * none, make a new one. The search begins with the most recently added |
4118 | | * record, since newer ones are most likely to match. |
4119 | | */ |
4120 | 0 | for (newshared = (AfterTriggerShared) chunk->endfree; |
4121 | 0 | (char *) newshared < chunk->endptr; |
4122 | 0 | newshared++) |
4123 | 0 | { |
4124 | | /* compare fields roughly by probability of them being different */ |
4125 | 0 | if (newshared->ats_tgoid == evtshared->ats_tgoid && |
4126 | 0 | newshared->ats_event == evtshared->ats_event && |
4127 | 0 | newshared->ats_firing_id == 0 && |
4128 | 0 | newshared->ats_table == evtshared->ats_table && |
4129 | 0 | newshared->ats_relid == evtshared->ats_relid && |
4130 | 0 | newshared->ats_rolid == evtshared->ats_rolid && |
4131 | 0 | bms_equal(newshared->ats_modifiedcols, |
4132 | 0 | evtshared->ats_modifiedcols)) |
4133 | 0 | break; |
4134 | 0 | } |
4135 | 0 | if ((char *) newshared >= chunk->endptr) |
4136 | 0 | { |
4137 | 0 | newshared = ((AfterTriggerShared) chunk->endfree) - 1; |
4138 | 0 | *newshared = *evtshared; |
4139 | | /* now we must make a suitably-long-lived copy of the bitmap */ |
4140 | 0 | newshared->ats_modifiedcols = afterTriggerCopyBitmap(evtshared->ats_modifiedcols); |
4141 | 0 | newshared->ats_firing_id = 0; /* just to be sure */ |
4142 | 0 | chunk->endfree = (char *) newshared; |
4143 | 0 | } |
4144 | | |
4145 | | /* Insert the data */ |
4146 | 0 | newevent = (AfterTriggerEvent) chunk->freeptr; |
4147 | 0 | memcpy(newevent, event, eventsize); |
4148 | | /* ... and link the new event to its shared record */ |
4149 | 0 | newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET; |
4150 | 0 | newevent->ate_flags |= (char *) newshared - (char *) newevent; |
4151 | |
|
4152 | 0 | chunk->freeptr += eventsize; |
4153 | 0 | events->tailfree = chunk->freeptr; |
4154 | 0 | } |
4155 | | |
4156 | | /* ---------- |
4157 | | * afterTriggerFreeEventList() |
4158 | | * |
4159 | | * Free all the event storage in the given list. |
4160 | | * ---------- |
4161 | | */ |
4162 | | static void |
4163 | | afterTriggerFreeEventList(AfterTriggerEventList *events) |
4164 | 0 | { |
4165 | 0 | AfterTriggerEventChunk *chunk; |
4166 | |
|
4167 | 0 | while ((chunk = events->head) != NULL) |
4168 | 0 | { |
4169 | 0 | events->head = chunk->next; |
4170 | 0 | pfree(chunk); |
4171 | 0 | } |
4172 | 0 | events->tail = NULL; |
4173 | 0 | events->tailfree = NULL; |
4174 | 0 | } |
4175 | | |
4176 | | /* ---------- |
4177 | | * afterTriggerRestoreEventList() |
4178 | | * |
4179 | | * Restore an event list to its prior length, removing all the events |
4180 | | * added since it had the value old_events. |
4181 | | * ---------- |
4182 | | */ |
4183 | | static void |
4184 | | afterTriggerRestoreEventList(AfterTriggerEventList *events, |
4185 | | const AfterTriggerEventList *old_events) |
4186 | 0 | { |
4187 | 0 | AfterTriggerEventChunk *chunk; |
4188 | 0 | AfterTriggerEventChunk *next_chunk; |
4189 | |
|
4190 | 0 | if (old_events->tail == NULL) |
4191 | 0 | { |
4192 | | /* restoring to a completely empty state, so free everything */ |
4193 | 0 | afterTriggerFreeEventList(events); |
4194 | 0 | } |
4195 | 0 | else |
4196 | 0 | { |
4197 | 0 | *events = *old_events; |
4198 | | /* free any chunks after the last one we want to keep */ |
4199 | 0 | for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk) |
4200 | 0 | { |
4201 | 0 | next_chunk = chunk->next; |
4202 | 0 | pfree(chunk); |
4203 | 0 | } |
4204 | | /* and clean up the tail chunk to be the right length */ |
4205 | 0 | events->tail->next = NULL; |
4206 | 0 | events->tail->freeptr = events->tailfree; |
4207 | | |
4208 | | /* |
4209 | | * We don't make any effort to remove now-unused shared data records. |
4210 | | * They might still be useful, anyway. |
4211 | | */ |
4212 | 0 | } |
4213 | 0 | } |
4214 | | |
4215 | | /* ---------- |
4216 | | * afterTriggerDeleteHeadEventChunk() |
4217 | | * |
4218 | | * Remove the first chunk of events from the query level's event list. |
4219 | | * Keep any event list pointers elsewhere in the query level's data |
4220 | | * structures in sync. |
4221 | | * ---------- |
4222 | | */ |
4223 | | static void |
4224 | | afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs) |
4225 | 0 | { |
4226 | 0 | AfterTriggerEventChunk *target = qs->events.head; |
4227 | 0 | ListCell *lc; |
4228 | |
|
4229 | 0 | Assert(target && target->next); |
4230 | | |
4231 | | /* |
4232 | | * First, update any pointers in the per-table data, so that they won't be |
4233 | | * dangling. Resetting obsoleted pointers to NULL will make |
4234 | | * cancel_prior_stmt_triggers start from the list head, which is fine. |
4235 | | */ |
4236 | 0 | foreach(lc, qs->tables) |
4237 | 0 | { |
4238 | 0 | AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc); |
4239 | |
|
4240 | 0 | if (table->after_trig_done && |
4241 | 0 | table->after_trig_events.tail == target) |
4242 | 0 | { |
4243 | 0 | table->after_trig_events.head = NULL; |
4244 | 0 | table->after_trig_events.tail = NULL; |
4245 | 0 | table->after_trig_events.tailfree = NULL; |
4246 | 0 | } |
4247 | 0 | } |
4248 | | |
4249 | | /* Now we can flush the head chunk */ |
4250 | 0 | qs->events.head = target->next; |
4251 | 0 | pfree(target); |
4252 | 0 | } |
4253 | | |
4254 | | |
4255 | | /* ---------- |
4256 | | * AfterTriggerExecute() |
4257 | | * |
4258 | | * Fetch the required tuples back from the heap and fire one |
4259 | | * single trigger function. |
4260 | | * |
4261 | | * Frequently, this will be fired many times in a row for triggers of |
4262 | | * a single relation. Therefore, we cache the open relation and provide |
4263 | | * fmgr lookup cache space at the caller level. (For triggers fired at |
4264 | | * the end of a query, we can even piggyback on the executor's state.) |
4265 | | * |
4266 | | * When fired for a cross-partition update of a partitioned table, the old |
4267 | | * tuple is fetched using 'src_relInfo' (the source leaf partition) and |
4268 | | * the new tuple using 'dst_relInfo' (the destination leaf partition), though |
4269 | | * both are converted into the root partitioned table's format before passing |
4270 | | * to the trigger function. |
4271 | | * |
4272 | | * event: event currently being fired. |
4273 | | * relInfo: result relation for event. |
4274 | | * src_relInfo: source partition of a cross-partition update |
4275 | | * dst_relInfo: its destination partition |
4276 | | * trigdesc: working copy of rel's trigger info. |
4277 | | * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc). |
4278 | | * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger), |
4279 | | * or NULL if no instrumentation is wanted. |
4280 | | * per_tuple_context: memory context to call trigger function in. |
4281 | | * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only) |
4282 | | * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only) |
4283 | | * ---------- |
4284 | | */ |
4285 | | static void |
4286 | | AfterTriggerExecute(EState *estate, |
4287 | | AfterTriggerEvent event, |
4288 | | ResultRelInfo *relInfo, |
4289 | | ResultRelInfo *src_relInfo, |
4290 | | ResultRelInfo *dst_relInfo, |
4291 | | TriggerDesc *trigdesc, |
4292 | | FmgrInfo *finfo, Instrumentation *instr, |
4293 | | MemoryContext per_tuple_context, |
4294 | | TupleTableSlot *trig_tuple_slot1, |
4295 | | TupleTableSlot *trig_tuple_slot2) |
4296 | 0 | { |
4297 | 0 | Relation rel = relInfo->ri_RelationDesc; |
4298 | 0 | Relation src_rel = src_relInfo->ri_RelationDesc; |
4299 | 0 | Relation dst_rel = dst_relInfo->ri_RelationDesc; |
4300 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
4301 | 0 | Oid tgoid = evtshared->ats_tgoid; |
4302 | 0 | TriggerData LocTriggerData = {0}; |
4303 | 0 | Oid save_rolid; |
4304 | 0 | int save_sec_context; |
4305 | 0 | HeapTuple rettuple; |
4306 | 0 | int tgindx; |
4307 | 0 | bool should_free_trig = false; |
4308 | 0 | bool should_free_new = false; |
4309 | | |
4310 | | /* |
4311 | | * Locate trigger in trigdesc. It might not be present, and in fact the |
4312 | | * trigdesc could be NULL, if the trigger was dropped since the event was |
4313 | | * queued. In that case, silently do nothing. |
4314 | | */ |
4315 | 0 | if (trigdesc == NULL) |
4316 | 0 | return; |
4317 | 0 | for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++) |
4318 | 0 | { |
4319 | 0 | if (trigdesc->triggers[tgindx].tgoid == tgoid) |
4320 | 0 | { |
4321 | 0 | LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]); |
4322 | 0 | break; |
4323 | 0 | } |
4324 | 0 | } |
4325 | 0 | if (LocTriggerData.tg_trigger == NULL) |
4326 | 0 | return; |
4327 | | |
4328 | | /* |
4329 | | * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want |
4330 | | * to include time spent re-fetching tuples in the trigger cost. |
4331 | | */ |
4332 | 0 | if (instr) |
4333 | 0 | InstrStartNode(instr + tgindx); |
4334 | | |
4335 | | /* |
4336 | | * Fetch the required tuple(s). |
4337 | | */ |
4338 | 0 | switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS) |
4339 | 0 | { |
4340 | 0 | case AFTER_TRIGGER_FDW_FETCH: |
4341 | 0 | { |
4342 | 0 | Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore(); |
4343 | |
|
4344 | 0 | if (!tuplestore_gettupleslot(fdw_tuplestore, true, false, |
4345 | 0 | trig_tuple_slot1)) |
4346 | 0 | elog(ERROR, "failed to fetch tuple1 for AFTER trigger"); |
4347 | | |
4348 | 0 | if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) == |
4349 | 0 | TRIGGER_EVENT_UPDATE && |
4350 | 0 | !tuplestore_gettupleslot(fdw_tuplestore, true, false, |
4351 | 0 | trig_tuple_slot2)) |
4352 | 0 | elog(ERROR, "failed to fetch tuple2 for AFTER trigger"); |
4353 | 0 | } |
4354 | | /* fall through */ |
4355 | 0 | case AFTER_TRIGGER_FDW_REUSE: |
4356 | | |
4357 | | /* |
4358 | | * Store tuple in the slot so that tg_trigtuple does not reference |
4359 | | * tuplestore memory. (It is formally possible for the trigger |
4360 | | * function to queue trigger events that add to the same |
4361 | | * tuplestore, which can push other tuples out of memory.) The |
4362 | | * distinction is academic, because we start with a minimal tuple |
4363 | | * that is stored as a heap tuple, constructed in different memory |
4364 | | * context, in the slot anyway. |
4365 | | */ |
4366 | 0 | LocTriggerData.tg_trigslot = trig_tuple_slot1; |
4367 | 0 | LocTriggerData.tg_trigtuple = |
4368 | 0 | ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig); |
4369 | |
|
4370 | 0 | if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) == |
4371 | 0 | TRIGGER_EVENT_UPDATE) |
4372 | 0 | { |
4373 | 0 | LocTriggerData.tg_newslot = trig_tuple_slot2; |
4374 | 0 | LocTriggerData.tg_newtuple = |
4375 | 0 | ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new); |
4376 | 0 | } |
4377 | 0 | else |
4378 | 0 | { |
4379 | 0 | LocTriggerData.tg_newtuple = NULL; |
4380 | 0 | } |
4381 | 0 | break; |
4382 | | |
4383 | 0 | default: |
4384 | 0 | if (ItemPointerIsValid(&(event->ate_ctid1))) |
4385 | 0 | { |
4386 | 0 | TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate, |
4387 | 0 | src_relInfo); |
4388 | |
|
4389 | 0 | if (!table_tuple_fetch_row_version(src_rel, |
4390 | 0 | &(event->ate_ctid1), |
4391 | 0 | SnapshotAny, |
4392 | 0 | src_slot)) |
4393 | 0 | elog(ERROR, "failed to fetch tuple1 for AFTER trigger"); |
4394 | | |
4395 | | /* |
4396 | | * Store the tuple fetched from the source partition into the |
4397 | | * target (root partitioned) table slot, converting if needed. |
4398 | | */ |
4399 | 0 | if (src_relInfo != relInfo) |
4400 | 0 | { |
4401 | 0 | TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo); |
4402 | |
|
4403 | 0 | LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo); |
4404 | 0 | if (map) |
4405 | 0 | { |
4406 | 0 | execute_attr_map_slot(map->attrMap, |
4407 | 0 | src_slot, |
4408 | 0 | LocTriggerData.tg_trigslot); |
4409 | 0 | } |
4410 | 0 | else |
4411 | 0 | ExecCopySlot(LocTriggerData.tg_trigslot, src_slot); |
4412 | 0 | } |
4413 | 0 | else |
4414 | 0 | LocTriggerData.tg_trigslot = src_slot; |
4415 | 0 | LocTriggerData.tg_trigtuple = |
4416 | 0 | ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig); |
4417 | 0 | } |
4418 | 0 | else |
4419 | 0 | { |
4420 | 0 | LocTriggerData.tg_trigtuple = NULL; |
4421 | 0 | } |
4422 | | |
4423 | | /* don't touch ctid2 if not there */ |
4424 | 0 | if (((event->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID || |
4425 | 0 | (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) && |
4426 | 0 | ItemPointerIsValid(&(event->ate_ctid2))) |
4427 | 0 | { |
4428 | 0 | TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate, |
4429 | 0 | dst_relInfo); |
4430 | |
|
4431 | 0 | if (!table_tuple_fetch_row_version(dst_rel, |
4432 | 0 | &(event->ate_ctid2), |
4433 | 0 | SnapshotAny, |
4434 | 0 | dst_slot)) |
4435 | 0 | elog(ERROR, "failed to fetch tuple2 for AFTER trigger"); |
4436 | | |
4437 | | /* |
4438 | | * Store the tuple fetched from the destination partition into |
4439 | | * the target (root partitioned) table slot, converting if |
4440 | | * needed. |
4441 | | */ |
4442 | 0 | if (dst_relInfo != relInfo) |
4443 | 0 | { |
4444 | 0 | TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo); |
4445 | |
|
4446 | 0 | LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo); |
4447 | 0 | if (map) |
4448 | 0 | { |
4449 | 0 | execute_attr_map_slot(map->attrMap, |
4450 | 0 | dst_slot, |
4451 | 0 | LocTriggerData.tg_newslot); |
4452 | 0 | } |
4453 | 0 | else |
4454 | 0 | ExecCopySlot(LocTriggerData.tg_newslot, dst_slot); |
4455 | 0 | } |
4456 | 0 | else |
4457 | 0 | LocTriggerData.tg_newslot = dst_slot; |
4458 | 0 | LocTriggerData.tg_newtuple = |
4459 | 0 | ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new); |
4460 | 0 | } |
4461 | 0 | else |
4462 | 0 | { |
4463 | 0 | LocTriggerData.tg_newtuple = NULL; |
4464 | 0 | } |
4465 | 0 | } |
4466 | | |
4467 | | /* |
4468 | | * Set up the tuplestore information to let the trigger have access to |
4469 | | * transition tables. When we first make a transition table available to |
4470 | | * a trigger, mark it "closed" so that it cannot change anymore. If any |
4471 | | * additional events of the same type get queued in the current trigger |
4472 | | * query level, they'll go into new transition tables. |
4473 | | */ |
4474 | 0 | LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL; |
4475 | 0 | if (evtshared->ats_table) |
4476 | 0 | { |
4477 | 0 | if (LocTriggerData.tg_trigger->tgoldtable) |
4478 | 0 | { |
4479 | 0 | if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event)) |
4480 | 0 | LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore; |
4481 | 0 | else |
4482 | 0 | LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore; |
4483 | 0 | evtshared->ats_table->closed = true; |
4484 | 0 | } |
4485 | |
|
4486 | 0 | if (LocTriggerData.tg_trigger->tgnewtable) |
4487 | 0 | { |
4488 | 0 | if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event)) |
4489 | 0 | LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore; |
4490 | 0 | else |
4491 | 0 | LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore; |
4492 | 0 | evtshared->ats_table->closed = true; |
4493 | 0 | } |
4494 | 0 | } |
4495 | | |
4496 | | /* |
4497 | | * Setup the remaining trigger information |
4498 | | */ |
4499 | 0 | LocTriggerData.type = T_TriggerData; |
4500 | 0 | LocTriggerData.tg_event = |
4501 | 0 | evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW); |
4502 | 0 | LocTriggerData.tg_relation = rel; |
4503 | 0 | if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype)) |
4504 | 0 | LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols; |
4505 | |
|
4506 | 0 | MemoryContextReset(per_tuple_context); |
4507 | | |
4508 | | /* |
4509 | | * If necessary, become the role that was active when the trigger got |
4510 | | * queued. Note that the role might have been dropped since the trigger |
4511 | | * was queued, but if that is a problem, we will get an error later. |
4512 | | * Checking here would still leave a race condition. |
4513 | | */ |
4514 | 0 | GetUserIdAndSecContext(&save_rolid, &save_sec_context); |
4515 | 0 | if (save_rolid != evtshared->ats_rolid) |
4516 | 0 | SetUserIdAndSecContext(evtshared->ats_rolid, |
4517 | 0 | save_sec_context | SECURITY_LOCAL_USERID_CHANGE); |
4518 | | |
4519 | | /* |
4520 | | * Call the trigger and throw away any possibly returned updated tuple. |
4521 | | * (Don't let ExecCallTriggerFunc measure EXPLAIN time.) |
4522 | | */ |
4523 | 0 | rettuple = ExecCallTriggerFunc(&LocTriggerData, |
4524 | 0 | tgindx, |
4525 | 0 | finfo, |
4526 | 0 | NULL, |
4527 | 0 | per_tuple_context); |
4528 | 0 | if (rettuple != NULL && |
4529 | 0 | rettuple != LocTriggerData.tg_trigtuple && |
4530 | 0 | rettuple != LocTriggerData.tg_newtuple) |
4531 | 0 | heap_freetuple(rettuple); |
4532 | | |
4533 | | /* Restore the current role if necessary */ |
4534 | 0 | if (save_rolid != evtshared->ats_rolid) |
4535 | 0 | SetUserIdAndSecContext(save_rolid, save_sec_context); |
4536 | | |
4537 | | /* |
4538 | | * Release resources |
4539 | | */ |
4540 | 0 | if (should_free_trig) |
4541 | 0 | heap_freetuple(LocTriggerData.tg_trigtuple); |
4542 | 0 | if (should_free_new) |
4543 | 0 | heap_freetuple(LocTriggerData.tg_newtuple); |
4544 | | |
4545 | | /* don't clear slots' contents if foreign table */ |
4546 | 0 | if (trig_tuple_slot1 == NULL) |
4547 | 0 | { |
4548 | 0 | if (LocTriggerData.tg_trigslot) |
4549 | 0 | ExecClearTuple(LocTriggerData.tg_trigslot); |
4550 | 0 | if (LocTriggerData.tg_newslot) |
4551 | 0 | ExecClearTuple(LocTriggerData.tg_newslot); |
4552 | 0 | } |
4553 | | |
4554 | | /* |
4555 | | * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count |
4556 | | * one "tuple returned" (really the number of firings). |
4557 | | */ |
4558 | 0 | if (instr) |
4559 | 0 | InstrStopNode(instr + tgindx, 1); |
4560 | 0 | } |
4561 | | |
4562 | | |
4563 | | /* |
4564 | | * afterTriggerMarkEvents() |
4565 | | * |
4566 | | * Scan the given event list for not yet invoked events. Mark the ones |
4567 | | * that can be invoked now with the current firing ID. |
4568 | | * |
4569 | | * If move_list isn't NULL, events that are not to be invoked now are |
4570 | | * transferred to move_list. |
4571 | | * |
4572 | | * When immediate_only is true, do not invoke currently-deferred triggers. |
4573 | | * (This will be false only at main transaction exit.) |
4574 | | * |
4575 | | * Returns true if any invokable events were found. |
4576 | | */ |
4577 | | static bool |
4578 | | afterTriggerMarkEvents(AfterTriggerEventList *events, |
4579 | | AfterTriggerEventList *move_list, |
4580 | | bool immediate_only) |
4581 | 0 | { |
4582 | 0 | bool found = false; |
4583 | 0 | bool deferred_found = false; |
4584 | 0 | AfterTriggerEvent event; |
4585 | 0 | AfterTriggerEventChunk *chunk; |
4586 | |
|
4587 | 0 | for_each_event_chunk(event, chunk, *events) |
4588 | 0 | { |
4589 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
4590 | 0 | bool defer_it = false; |
4591 | |
|
4592 | 0 | if (!(event->ate_flags & |
4593 | 0 | (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))) |
4594 | 0 | { |
4595 | | /* |
4596 | | * This trigger hasn't been called or scheduled yet. Check if we |
4597 | | * should call it now. |
4598 | | */ |
4599 | 0 | if (immediate_only && afterTriggerCheckState(evtshared)) |
4600 | 0 | { |
4601 | 0 | defer_it = true; |
4602 | 0 | } |
4603 | 0 | else |
4604 | 0 | { |
4605 | | /* |
4606 | | * Mark it as to be fired in this firing cycle. |
4607 | | */ |
4608 | 0 | evtshared->ats_firing_id = afterTriggers.firing_counter; |
4609 | 0 | event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS; |
4610 | 0 | found = true; |
4611 | 0 | } |
4612 | 0 | } |
4613 | | |
4614 | | /* |
4615 | | * If it's deferred, move it to move_list, if requested. |
4616 | | */ |
4617 | 0 | if (defer_it && move_list != NULL) |
4618 | 0 | { |
4619 | 0 | deferred_found = true; |
4620 | | /* add it to move_list */ |
4621 | 0 | afterTriggerAddEvent(move_list, event, evtshared); |
4622 | | /* mark original copy "done" so we don't do it again */ |
4623 | 0 | event->ate_flags |= AFTER_TRIGGER_DONE; |
4624 | 0 | } |
4625 | 0 | } |
4626 | | |
4627 | | /* |
4628 | | * We could allow deferred triggers if, before the end of the |
4629 | | * security-restricted operation, we were to verify that a SET CONSTRAINTS |
4630 | | * ... IMMEDIATE has fired all such triggers. For now, don't bother. |
4631 | | */ |
4632 | 0 | if (deferred_found && InSecurityRestrictedOperation()) |
4633 | 0 | ereport(ERROR, |
4634 | 0 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
4635 | 0 | errmsg("cannot fire deferred trigger within security-restricted operation"))); |
4636 | | |
4637 | 0 | return found; |
4638 | 0 | } |
4639 | | |
4640 | | /* |
4641 | | * afterTriggerInvokeEvents() |
4642 | | * |
4643 | | * Scan the given event list for events that are marked as to be fired |
4644 | | * in the current firing cycle, and fire them. |
4645 | | * |
4646 | | * If estate isn't NULL, we use its result relation info to avoid repeated |
4647 | | * openings and closing of trigger target relations. If it is NULL, we |
4648 | | * make one locally to cache the info in case there are multiple trigger |
4649 | | * events per rel. |
4650 | | * |
4651 | | * When delete_ok is true, it's safe to delete fully-processed events. |
4652 | | * (We are not very tense about that: we simply reset a chunk to be empty |
4653 | | * if all its events got fired. The objective here is just to avoid useless |
4654 | | * rescanning of events when a trigger queues new events during transaction |
4655 | | * end, so it's not necessary to worry much about the case where only |
4656 | | * some events are fired.) |
4657 | | * |
4658 | | * Returns true if no unfired events remain in the list (this allows us |
4659 | | * to avoid repeating afterTriggerMarkEvents). |
4660 | | */ |
4661 | | static bool |
4662 | | afterTriggerInvokeEvents(AfterTriggerEventList *events, |
4663 | | CommandId firing_id, |
4664 | | EState *estate, |
4665 | | bool delete_ok) |
4666 | 0 | { |
4667 | 0 | bool all_fired = true; |
4668 | 0 | AfterTriggerEventChunk *chunk; |
4669 | 0 | MemoryContext per_tuple_context; |
4670 | 0 | bool local_estate = false; |
4671 | 0 | ResultRelInfo *rInfo = NULL; |
4672 | 0 | Relation rel = NULL; |
4673 | 0 | TriggerDesc *trigdesc = NULL; |
4674 | 0 | FmgrInfo *finfo = NULL; |
4675 | 0 | Instrumentation *instr = NULL; |
4676 | 0 | TupleTableSlot *slot1 = NULL, |
4677 | 0 | *slot2 = NULL; |
4678 | | |
4679 | | /* Make a local EState if need be */ |
4680 | 0 | if (estate == NULL) |
4681 | 0 | { |
4682 | 0 | estate = CreateExecutorState(); |
4683 | 0 | local_estate = true; |
4684 | 0 | } |
4685 | | |
4686 | | /* Make a per-tuple memory context for trigger function calls */ |
4687 | 0 | per_tuple_context = |
4688 | 0 | AllocSetContextCreate(CurrentMemoryContext, |
4689 | 0 | "AfterTriggerTupleContext", |
4690 | 0 | ALLOCSET_DEFAULT_SIZES); |
4691 | |
|
4692 | 0 | for_each_chunk(chunk, *events) |
4693 | 0 | { |
4694 | 0 | AfterTriggerEvent event; |
4695 | 0 | bool all_fired_in_chunk = true; |
4696 | |
|
4697 | 0 | for_each_event(event, chunk) |
4698 | 0 | { |
4699 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
4700 | | |
4701 | | /* |
4702 | | * Is it one for me to fire? |
4703 | | */ |
4704 | 0 | if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) && |
4705 | 0 | evtshared->ats_firing_id == firing_id) |
4706 | 0 | { |
4707 | 0 | ResultRelInfo *src_rInfo, |
4708 | 0 | *dst_rInfo; |
4709 | | |
4710 | | /* |
4711 | | * So let's fire it... but first, find the correct relation if |
4712 | | * this is not the same relation as before. |
4713 | | */ |
4714 | 0 | if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid) |
4715 | 0 | { |
4716 | 0 | rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid, |
4717 | 0 | NULL); |
4718 | 0 | rel = rInfo->ri_RelationDesc; |
4719 | | /* Catch calls with insufficient relcache refcounting */ |
4720 | 0 | Assert(!RelationHasReferenceCountZero(rel)); |
4721 | 0 | trigdesc = rInfo->ri_TrigDesc; |
4722 | | /* caution: trigdesc could be NULL here */ |
4723 | 0 | finfo = rInfo->ri_TrigFunctions; |
4724 | 0 | instr = rInfo->ri_TrigInstrument; |
4725 | 0 | if (slot1 != NULL) |
4726 | 0 | { |
4727 | 0 | ExecDropSingleTupleTableSlot(slot1); |
4728 | 0 | ExecDropSingleTupleTableSlot(slot2); |
4729 | 0 | slot1 = slot2 = NULL; |
4730 | 0 | } |
4731 | 0 | if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
4732 | 0 | { |
4733 | 0 | slot1 = MakeSingleTupleTableSlot(rel->rd_att, |
4734 | 0 | &TTSOpsMinimalTuple); |
4735 | 0 | slot2 = MakeSingleTupleTableSlot(rel->rd_att, |
4736 | 0 | &TTSOpsMinimalTuple); |
4737 | 0 | } |
4738 | 0 | } |
4739 | | |
4740 | | /* |
4741 | | * Look up source and destination partition result rels of a |
4742 | | * cross-partition update event. |
4743 | | */ |
4744 | 0 | if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) == |
4745 | 0 | AFTER_TRIGGER_CP_UPDATE) |
4746 | 0 | { |
4747 | 0 | Assert(OidIsValid(event->ate_src_part) && |
4748 | 0 | OidIsValid(event->ate_dst_part)); |
4749 | 0 | src_rInfo = ExecGetTriggerResultRel(estate, |
4750 | 0 | event->ate_src_part, |
4751 | 0 | rInfo); |
4752 | 0 | dst_rInfo = ExecGetTriggerResultRel(estate, |
4753 | 0 | event->ate_dst_part, |
4754 | 0 | rInfo); |
4755 | 0 | } |
4756 | 0 | else |
4757 | 0 | src_rInfo = dst_rInfo = rInfo; |
4758 | | |
4759 | | /* |
4760 | | * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is |
4761 | | * still set, so recursive examinations of the event list |
4762 | | * won't try to re-fire it. |
4763 | | */ |
4764 | 0 | AfterTriggerExecute(estate, event, rInfo, |
4765 | 0 | src_rInfo, dst_rInfo, |
4766 | 0 | trigdesc, finfo, instr, |
4767 | 0 | per_tuple_context, slot1, slot2); |
4768 | | |
4769 | | /* |
4770 | | * Mark the event as done. |
4771 | | */ |
4772 | 0 | event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS; |
4773 | 0 | event->ate_flags |= AFTER_TRIGGER_DONE; |
4774 | 0 | } |
4775 | 0 | else if (!(event->ate_flags & AFTER_TRIGGER_DONE)) |
4776 | 0 | { |
4777 | | /* something remains to be done */ |
4778 | 0 | all_fired = all_fired_in_chunk = false; |
4779 | 0 | } |
4780 | 0 | } |
4781 | | |
4782 | | /* Clear the chunk if delete_ok and nothing left of interest */ |
4783 | 0 | if (delete_ok && all_fired_in_chunk) |
4784 | 0 | { |
4785 | 0 | chunk->freeptr = CHUNK_DATA_START(chunk); |
4786 | 0 | chunk->endfree = chunk->endptr; |
4787 | | |
4788 | | /* |
4789 | | * If it's last chunk, must sync event list's tailfree too. Note |
4790 | | * that delete_ok must NOT be passed as true if there could be |
4791 | | * additional AfterTriggerEventList values pointing at this event |
4792 | | * list, since we'd fail to fix their copies of tailfree. |
4793 | | */ |
4794 | 0 | if (chunk == events->tail) |
4795 | 0 | events->tailfree = chunk->freeptr; |
4796 | 0 | } |
4797 | 0 | } |
4798 | 0 | if (slot1 != NULL) |
4799 | 0 | { |
4800 | 0 | ExecDropSingleTupleTableSlot(slot1); |
4801 | 0 | ExecDropSingleTupleTableSlot(slot2); |
4802 | 0 | } |
4803 | | |
4804 | | /* Release working resources */ |
4805 | 0 | MemoryContextDelete(per_tuple_context); |
4806 | |
|
4807 | 0 | if (local_estate) |
4808 | 0 | { |
4809 | 0 | ExecCloseResultRelations(estate); |
4810 | 0 | ExecResetTupleTable(estate->es_tupleTable, false); |
4811 | 0 | FreeExecutorState(estate); |
4812 | 0 | } |
4813 | |
|
4814 | 0 | return all_fired; |
4815 | 0 | } |
4816 | | |
4817 | | |
4818 | | /* |
4819 | | * GetAfterTriggersTableData |
4820 | | * |
4821 | | * Find or create an AfterTriggersTableData struct for the specified |
4822 | | * trigger event (relation + operation type). Ignore existing structs |
4823 | | * marked "closed"; we don't want to put any additional tuples into them, |
4824 | | * nor change their stmt-triggers-fired state. |
4825 | | * |
4826 | | * Note: the AfterTriggersTableData list is allocated in the current |
4827 | | * (sub)transaction's CurTransactionContext. This is OK because |
4828 | | * we don't need it to live past AfterTriggerEndQuery. |
4829 | | */ |
4830 | | static AfterTriggersTableData * |
4831 | | GetAfterTriggersTableData(Oid relid, CmdType cmdType) |
4832 | 0 | { |
4833 | 0 | AfterTriggersTableData *table; |
4834 | 0 | AfterTriggersQueryData *qs; |
4835 | 0 | MemoryContext oldcxt; |
4836 | 0 | ListCell *lc; |
4837 | | |
4838 | | /* Caller should have ensured query_depth is OK. */ |
4839 | 0 | Assert(afterTriggers.query_depth >= 0 && |
4840 | 0 | afterTriggers.query_depth < afterTriggers.maxquerydepth); |
4841 | 0 | qs = &afterTriggers.query_stack[afterTriggers.query_depth]; |
4842 | |
|
4843 | 0 | foreach(lc, qs->tables) |
4844 | 0 | { |
4845 | 0 | table = (AfterTriggersTableData *) lfirst(lc); |
4846 | 0 | if (table->relid == relid && table->cmdType == cmdType && |
4847 | 0 | !table->closed) |
4848 | 0 | return table; |
4849 | 0 | } |
4850 | | |
4851 | 0 | oldcxt = MemoryContextSwitchTo(CurTransactionContext); |
4852 | |
|
4853 | 0 | table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData)); |
4854 | 0 | table->relid = relid; |
4855 | 0 | table->cmdType = cmdType; |
4856 | 0 | qs->tables = lappend(qs->tables, table); |
4857 | |
|
4858 | 0 | MemoryContextSwitchTo(oldcxt); |
4859 | |
|
4860 | 0 | return table; |
4861 | 0 | } |
4862 | | |
4863 | | /* |
4864 | | * Returns a TupleTableSlot suitable for holding the tuples to be put |
4865 | | * into AfterTriggersTableData's transition table tuplestores. |
4866 | | */ |
4867 | | static TupleTableSlot * |
4868 | | GetAfterTriggersStoreSlot(AfterTriggersTableData *table, |
4869 | | TupleDesc tupdesc) |
4870 | 0 | { |
4871 | | /* Create it if not already done. */ |
4872 | 0 | if (!table->storeslot) |
4873 | 0 | { |
4874 | 0 | MemoryContext oldcxt; |
4875 | | |
4876 | | /* |
4877 | | * We need this slot only until AfterTriggerEndQuery, but making it |
4878 | | * last till end-of-subxact is good enough. It'll be freed by |
4879 | | * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have |
4880 | | * a different lifespan, so we'd better make a copy of that. |
4881 | | */ |
4882 | 0 | oldcxt = MemoryContextSwitchTo(CurTransactionContext); |
4883 | 0 | tupdesc = CreateTupleDescCopy(tupdesc); |
4884 | 0 | table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual); |
4885 | 0 | MemoryContextSwitchTo(oldcxt); |
4886 | 0 | } |
4887 | |
|
4888 | 0 | return table->storeslot; |
4889 | 0 | } |
4890 | | |
4891 | | /* |
4892 | | * MakeTransitionCaptureState |
4893 | | * |
4894 | | * Make a TransitionCaptureState object for the given TriggerDesc, target |
4895 | | * relation, and operation type. The TCS object holds all the state needed |
4896 | | * to decide whether to capture tuples in transition tables. |
4897 | | * |
4898 | | * If there are no triggers in 'trigdesc' that request relevant transition |
4899 | | * tables, then return NULL. |
4900 | | * |
4901 | | * The resulting object can be passed to the ExecAR* functions. When |
4902 | | * dealing with child tables, the caller can set tcs_original_insert_tuple |
4903 | | * to avoid having to reconstruct the original tuple in the root table's |
4904 | | * format. |
4905 | | * |
4906 | | * Note that we copy the flags from a parent table into this struct (rather |
4907 | | * than subsequently using the relation's TriggerDesc directly) so that we can |
4908 | | * use it to control collection of transition tuples from child tables. |
4909 | | * |
4910 | | * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE) |
4911 | | * on the same table during one query should share one transition table. |
4912 | | * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct |
4913 | | * looked up using the table OID + CmdType, and are merely referenced by |
4914 | | * the TransitionCaptureState objects we hand out to callers. |
4915 | | */ |
4916 | | TransitionCaptureState * |
4917 | | MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType) |
4918 | 0 | { |
4919 | 0 | TransitionCaptureState *state; |
4920 | 0 | bool need_old_upd, |
4921 | 0 | need_new_upd, |
4922 | 0 | need_old_del, |
4923 | 0 | need_new_ins; |
4924 | 0 | AfterTriggersTableData *table; |
4925 | 0 | MemoryContext oldcxt; |
4926 | 0 | ResourceOwner saveResourceOwner; |
4927 | |
|
4928 | 0 | if (trigdesc == NULL) |
4929 | 0 | return NULL; |
4930 | | |
4931 | | /* Detect which table(s) we need. */ |
4932 | 0 | switch (cmdType) |
4933 | 0 | { |
4934 | 0 | case CMD_INSERT: |
4935 | 0 | need_old_upd = need_old_del = need_new_upd = false; |
4936 | 0 | need_new_ins = trigdesc->trig_insert_new_table; |
4937 | 0 | break; |
4938 | 0 | case CMD_UPDATE: |
4939 | 0 | need_old_upd = trigdesc->trig_update_old_table; |
4940 | 0 | need_new_upd = trigdesc->trig_update_new_table; |
4941 | 0 | need_old_del = need_new_ins = false; |
4942 | 0 | break; |
4943 | 0 | case CMD_DELETE: |
4944 | 0 | need_old_del = trigdesc->trig_delete_old_table; |
4945 | 0 | need_old_upd = need_new_upd = need_new_ins = false; |
4946 | 0 | break; |
4947 | 0 | case CMD_MERGE: |
4948 | 0 | need_old_upd = trigdesc->trig_update_old_table; |
4949 | 0 | need_new_upd = trigdesc->trig_update_new_table; |
4950 | 0 | need_old_del = trigdesc->trig_delete_old_table; |
4951 | 0 | need_new_ins = trigdesc->trig_insert_new_table; |
4952 | 0 | break; |
4953 | 0 | default: |
4954 | 0 | elog(ERROR, "unexpected CmdType: %d", (int) cmdType); |
4955 | | /* keep compiler quiet */ |
4956 | 0 | need_old_upd = need_new_upd = need_old_del = need_new_ins = false; |
4957 | 0 | break; |
4958 | 0 | } |
4959 | 0 | if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del) |
4960 | 0 | return NULL; |
4961 | | |
4962 | | /* Check state, like AfterTriggerSaveEvent. */ |
4963 | 0 | if (afterTriggers.query_depth < 0) |
4964 | 0 | elog(ERROR, "MakeTransitionCaptureState() called outside of query"); |
4965 | | |
4966 | | /* Be sure we have enough space to record events at this query depth. */ |
4967 | 0 | if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) |
4968 | 0 | AfterTriggerEnlargeQueryState(); |
4969 | | |
4970 | | /* |
4971 | | * Find or create an AfterTriggersTableData struct to hold the |
4972 | | * tuplestore(s). If there's a matching struct but it's marked closed, |
4973 | | * ignore it; we need a newer one. |
4974 | | * |
4975 | | * Note: the AfterTriggersTableData list, as well as the tuplestores, are |
4976 | | * allocated in the current (sub)transaction's CurTransactionContext, and |
4977 | | * the tuplestores are managed by the (sub)transaction's resource owner. |
4978 | | * This is sufficient lifespan because we do not allow triggers using |
4979 | | * transition tables to be deferrable; they will be fired during |
4980 | | * AfterTriggerEndQuery, after which it's okay to delete the data. |
4981 | | */ |
4982 | 0 | table = GetAfterTriggersTableData(relid, cmdType); |
4983 | | |
4984 | | /* Now create required tuplestore(s), if we don't have them already. */ |
4985 | 0 | oldcxt = MemoryContextSwitchTo(CurTransactionContext); |
4986 | 0 | saveResourceOwner = CurrentResourceOwner; |
4987 | 0 | CurrentResourceOwner = CurTransactionResourceOwner; |
4988 | |
|
4989 | 0 | if (need_old_upd && table->old_upd_tuplestore == NULL) |
4990 | 0 | table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem); |
4991 | 0 | if (need_new_upd && table->new_upd_tuplestore == NULL) |
4992 | 0 | table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem); |
4993 | 0 | if (need_old_del && table->old_del_tuplestore == NULL) |
4994 | 0 | table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem); |
4995 | 0 | if (need_new_ins && table->new_ins_tuplestore == NULL) |
4996 | 0 | table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem); |
4997 | |
|
4998 | 0 | CurrentResourceOwner = saveResourceOwner; |
4999 | 0 | MemoryContextSwitchTo(oldcxt); |
5000 | | |
5001 | | /* Now build the TransitionCaptureState struct, in caller's context */ |
5002 | 0 | state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState)); |
5003 | 0 | state->tcs_delete_old_table = need_old_del; |
5004 | 0 | state->tcs_update_old_table = need_old_upd; |
5005 | 0 | state->tcs_update_new_table = need_new_upd; |
5006 | 0 | state->tcs_insert_new_table = need_new_ins; |
5007 | 0 | state->tcs_private = table; |
5008 | |
|
5009 | 0 | return state; |
5010 | 0 | } |
5011 | | |
5012 | | |
5013 | | /* ---------- |
5014 | | * AfterTriggerBeginXact() |
5015 | | * |
5016 | | * Called at transaction start (either BEGIN or implicit for single |
5017 | | * statement outside of transaction block). |
5018 | | * ---------- |
5019 | | */ |
5020 | | void |
5021 | | AfterTriggerBeginXact(void) |
5022 | 0 | { |
5023 | | /* |
5024 | | * Initialize after-trigger state structure to empty |
5025 | | */ |
5026 | 0 | afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */ |
5027 | 0 | afterTriggers.query_depth = -1; |
5028 | | |
5029 | | /* |
5030 | | * Verify that there is no leftover state remaining. If these assertions |
5031 | | * trip, it means that AfterTriggerEndXact wasn't called or didn't clean |
5032 | | * up properly. |
5033 | | */ |
5034 | 0 | Assert(afterTriggers.state == NULL); |
5035 | 0 | Assert(afterTriggers.query_stack == NULL); |
5036 | 0 | Assert(afterTriggers.maxquerydepth == 0); |
5037 | 0 | Assert(afterTriggers.event_cxt == NULL); |
5038 | 0 | Assert(afterTriggers.events.head == NULL); |
5039 | 0 | Assert(afterTriggers.trans_stack == NULL); |
5040 | 0 | Assert(afterTriggers.maxtransdepth == 0); |
5041 | 0 | } |
5042 | | |
5043 | | |
5044 | | /* ---------- |
5045 | | * AfterTriggerBeginQuery() |
5046 | | * |
5047 | | * Called just before we start processing a single query within a |
5048 | | * transaction (or subtransaction). Most of the real work gets deferred |
5049 | | * until somebody actually tries to queue a trigger event. |
5050 | | * ---------- |
5051 | | */ |
5052 | | void |
5053 | | AfterTriggerBeginQuery(void) |
5054 | 0 | { |
5055 | | /* Increase the query stack depth */ |
5056 | 0 | afterTriggers.query_depth++; |
5057 | 0 | } |
5058 | | |
5059 | | |
5060 | | /* ---------- |
5061 | | * AfterTriggerEndQuery() |
5062 | | * |
5063 | | * Called after one query has been completely processed. At this time |
5064 | | * we invoke all AFTER IMMEDIATE trigger events queued by the query, and |
5065 | | * transfer deferred trigger events to the global deferred-trigger list. |
5066 | | * |
5067 | | * Note that this must be called BEFORE closing down the executor |
5068 | | * with ExecutorEnd, because we make use of the EState's info about |
5069 | | * target relations. Normally it is called from ExecutorFinish. |
5070 | | * ---------- |
5071 | | */ |
5072 | | void |
5073 | | AfterTriggerEndQuery(EState *estate) |
5074 | 0 | { |
5075 | 0 | AfterTriggersQueryData *qs; |
5076 | | |
5077 | | /* Must be inside a query, too */ |
5078 | 0 | Assert(afterTriggers.query_depth >= 0); |
5079 | | |
5080 | | /* |
5081 | | * If we never even got as far as initializing the event stack, there |
5082 | | * certainly won't be any events, so exit quickly. |
5083 | | */ |
5084 | 0 | if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) |
5085 | 0 | { |
5086 | 0 | afterTriggers.query_depth--; |
5087 | 0 | return; |
5088 | 0 | } |
5089 | | |
5090 | | /* |
5091 | | * Process all immediate-mode triggers queued by the query, and move the |
5092 | | * deferred ones to the main list of deferred events. |
5093 | | * |
5094 | | * Notice that we decide which ones will be fired, and put the deferred |
5095 | | * ones on the main list, before anything is actually fired. This ensures |
5096 | | * reasonably sane behavior if a trigger function does SET CONSTRAINTS ... |
5097 | | * IMMEDIATE: all events we have decided to defer will be available for it |
5098 | | * to fire. |
5099 | | * |
5100 | | * We loop in case a trigger queues more events at the same query level. |
5101 | | * Ordinary trigger functions, including all PL/pgSQL trigger functions, |
5102 | | * will instead fire any triggers in a dedicated query level. Foreign key |
5103 | | * enforcement triggers do add to the current query level, thanks to their |
5104 | | * passing fire_triggers = false to SPI_execute_snapshot(). Other |
5105 | | * C-language triggers might do likewise. |
5106 | | * |
5107 | | * If we find no firable events, we don't have to increment |
5108 | | * firing_counter. |
5109 | | */ |
5110 | 0 | qs = &afterTriggers.query_stack[afterTriggers.query_depth]; |
5111 | |
|
5112 | 0 | for (;;) |
5113 | 0 | { |
5114 | 0 | if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true)) |
5115 | 0 | { |
5116 | 0 | CommandId firing_id = afterTriggers.firing_counter++; |
5117 | 0 | AfterTriggerEventChunk *oldtail = qs->events.tail; |
5118 | |
|
5119 | 0 | if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false)) |
5120 | 0 | break; /* all fired */ |
5121 | | |
5122 | | /* |
5123 | | * Firing a trigger could result in query_stack being repalloc'd, |
5124 | | * so we must recalculate qs after each afterTriggerInvokeEvents |
5125 | | * call. Furthermore, it's unsafe to pass delete_ok = true here, |
5126 | | * because that could cause afterTriggerInvokeEvents to try to |
5127 | | * access qs->events after the stack has been repalloc'd. |
5128 | | */ |
5129 | 0 | qs = &afterTriggers.query_stack[afterTriggers.query_depth]; |
5130 | | |
5131 | | /* |
5132 | | * We'll need to scan the events list again. To reduce the cost |
5133 | | * of doing so, get rid of completely-fired chunks. We know that |
5134 | | * all events were marked IN_PROGRESS or DONE at the conclusion of |
5135 | | * afterTriggerMarkEvents, so any still-interesting events must |
5136 | | * have been added after that, and so must be in the chunk that |
5137 | | * was then the tail chunk, or in later chunks. So, zap all |
5138 | | * chunks before oldtail. This is approximately the same set of |
5139 | | * events we would have gotten rid of by passing delete_ok = true. |
5140 | | */ |
5141 | 0 | Assert(oldtail != NULL); |
5142 | 0 | while (qs->events.head != oldtail) |
5143 | 0 | afterTriggerDeleteHeadEventChunk(qs); |
5144 | 0 | } |
5145 | 0 | else |
5146 | 0 | break; |
5147 | 0 | } |
5148 | | |
5149 | | /* Release query-level-local storage, including tuplestores if any */ |
5150 | 0 | AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]); |
5151 | |
|
5152 | 0 | afterTriggers.query_depth--; |
5153 | 0 | } |
5154 | | |
5155 | | |
5156 | | /* |
5157 | | * AfterTriggerFreeQuery |
5158 | | * Release subsidiary storage for a trigger query level. |
5159 | | * This includes closing down tuplestores. |
5160 | | * Note: it's important for this to be safe if interrupted by an error |
5161 | | * and then called again for the same query level. |
5162 | | */ |
5163 | | static void |
5164 | | AfterTriggerFreeQuery(AfterTriggersQueryData *qs) |
5165 | 0 | { |
5166 | 0 | Tuplestorestate *ts; |
5167 | 0 | List *tables; |
5168 | 0 | ListCell *lc; |
5169 | | |
5170 | | /* Drop the trigger events */ |
5171 | 0 | afterTriggerFreeEventList(&qs->events); |
5172 | | |
5173 | | /* Drop FDW tuplestore if any */ |
5174 | 0 | ts = qs->fdw_tuplestore; |
5175 | 0 | qs->fdw_tuplestore = NULL; |
5176 | 0 | if (ts) |
5177 | 0 | tuplestore_end(ts); |
5178 | | |
5179 | | /* Release per-table subsidiary storage */ |
5180 | 0 | tables = qs->tables; |
5181 | 0 | foreach(lc, tables) |
5182 | 0 | { |
5183 | 0 | AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc); |
5184 | |
|
5185 | 0 | ts = table->old_upd_tuplestore; |
5186 | 0 | table->old_upd_tuplestore = NULL; |
5187 | 0 | if (ts) |
5188 | 0 | tuplestore_end(ts); |
5189 | 0 | ts = table->new_upd_tuplestore; |
5190 | 0 | table->new_upd_tuplestore = NULL; |
5191 | 0 | if (ts) |
5192 | 0 | tuplestore_end(ts); |
5193 | 0 | ts = table->old_del_tuplestore; |
5194 | 0 | table->old_del_tuplestore = NULL; |
5195 | 0 | if (ts) |
5196 | 0 | tuplestore_end(ts); |
5197 | 0 | ts = table->new_ins_tuplestore; |
5198 | 0 | table->new_ins_tuplestore = NULL; |
5199 | 0 | if (ts) |
5200 | 0 | tuplestore_end(ts); |
5201 | 0 | if (table->storeslot) |
5202 | 0 | { |
5203 | 0 | TupleTableSlot *slot = table->storeslot; |
5204 | |
|
5205 | 0 | table->storeslot = NULL; |
5206 | 0 | ExecDropSingleTupleTableSlot(slot); |
5207 | 0 | } |
5208 | 0 | } |
5209 | | |
5210 | | /* |
5211 | | * Now free the AfterTriggersTableData structs and list cells. Reset list |
5212 | | * pointer first; if list_free_deep somehow gets an error, better to leak |
5213 | | * that storage than have an infinite loop. |
5214 | | */ |
5215 | 0 | qs->tables = NIL; |
5216 | 0 | list_free_deep(tables); |
5217 | 0 | } |
5218 | | |
5219 | | |
5220 | | /* ---------- |
5221 | | * AfterTriggerFireDeferred() |
5222 | | * |
5223 | | * Called just before the current transaction is committed. At this |
5224 | | * time we invoke all pending DEFERRED triggers. |
5225 | | * |
5226 | | * It is possible for other modules to queue additional deferred triggers |
5227 | | * during pre-commit processing; therefore xact.c may have to call this |
5228 | | * multiple times. |
5229 | | * ---------- |
5230 | | */ |
5231 | | void |
5232 | | AfterTriggerFireDeferred(void) |
5233 | 0 | { |
5234 | 0 | AfterTriggerEventList *events; |
5235 | 0 | bool snap_pushed = false; |
5236 | | |
5237 | | /* Must not be inside a query */ |
5238 | 0 | Assert(afterTriggers.query_depth == -1); |
5239 | | |
5240 | | /* |
5241 | | * If there are any triggers to fire, make sure we have set a snapshot for |
5242 | | * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we |
5243 | | * can't assume ActiveSnapshot is valid on entry.) |
5244 | | */ |
5245 | 0 | events = &afterTriggers.events; |
5246 | 0 | if (events->head != NULL) |
5247 | 0 | { |
5248 | 0 | PushActiveSnapshot(GetTransactionSnapshot()); |
5249 | 0 | snap_pushed = true; |
5250 | 0 | } |
5251 | | |
5252 | | /* |
5253 | | * Run all the remaining triggers. Loop until they are all gone, in case |
5254 | | * some trigger queues more for us to do. |
5255 | | */ |
5256 | 0 | while (afterTriggerMarkEvents(events, NULL, false)) |
5257 | 0 | { |
5258 | 0 | CommandId firing_id = afterTriggers.firing_counter++; |
5259 | |
|
5260 | 0 | if (afterTriggerInvokeEvents(events, firing_id, NULL, true)) |
5261 | 0 | break; /* all fired */ |
5262 | 0 | } |
5263 | | |
5264 | | /* |
5265 | | * We don't bother freeing the event list, since it will go away anyway |
5266 | | * (and more efficiently than via pfree) in AfterTriggerEndXact. |
5267 | | */ |
5268 | |
|
5269 | 0 | if (snap_pushed) |
5270 | 0 | PopActiveSnapshot(); |
5271 | 0 | } |
5272 | | |
5273 | | |
5274 | | /* ---------- |
5275 | | * AfterTriggerEndXact() |
5276 | | * |
5277 | | * The current transaction is finishing. |
5278 | | * |
5279 | | * Any unfired triggers are canceled so we simply throw |
5280 | | * away anything we know. |
5281 | | * |
5282 | | * Note: it is possible for this to be called repeatedly in case of |
5283 | | * error during transaction abort; therefore, do not complain if |
5284 | | * already closed down. |
5285 | | * ---------- |
5286 | | */ |
5287 | | void |
5288 | | AfterTriggerEndXact(bool isCommit) |
5289 | 0 | { |
5290 | | /* |
5291 | | * Forget the pending-events list. |
5292 | | * |
5293 | | * Since all the info is in TopTransactionContext or children thereof, we |
5294 | | * don't really need to do anything to reclaim memory. However, the |
5295 | | * pending-events list could be large, and so it's useful to discard it as |
5296 | | * soon as possible --- especially if we are aborting because we ran out |
5297 | | * of memory for the list! |
5298 | | */ |
5299 | 0 | if (afterTriggers.event_cxt) |
5300 | 0 | { |
5301 | 0 | MemoryContextDelete(afterTriggers.event_cxt); |
5302 | 0 | afterTriggers.event_cxt = NULL; |
5303 | 0 | afterTriggers.events.head = NULL; |
5304 | 0 | afterTriggers.events.tail = NULL; |
5305 | 0 | afterTriggers.events.tailfree = NULL; |
5306 | 0 | } |
5307 | | |
5308 | | /* |
5309 | | * Forget any subtransaction state as well. Since this can't be very |
5310 | | * large, we let the eventual reset of TopTransactionContext free the |
5311 | | * memory instead of doing it here. |
5312 | | */ |
5313 | 0 | afterTriggers.trans_stack = NULL; |
5314 | 0 | afterTriggers.maxtransdepth = 0; |
5315 | | |
5316 | | |
5317 | | /* |
5318 | | * Forget the query stack and constraint-related state information. As |
5319 | | * with the subtransaction state information, we don't bother freeing the |
5320 | | * memory here. |
5321 | | */ |
5322 | 0 | afterTriggers.query_stack = NULL; |
5323 | 0 | afterTriggers.maxquerydepth = 0; |
5324 | 0 | afterTriggers.state = NULL; |
5325 | | |
5326 | | /* No more afterTriggers manipulation until next transaction starts. */ |
5327 | 0 | afterTriggers.query_depth = -1; |
5328 | 0 | } |
5329 | | |
5330 | | /* |
5331 | | * AfterTriggerBeginSubXact() |
5332 | | * |
5333 | | * Start a subtransaction. |
5334 | | */ |
5335 | | void |
5336 | | AfterTriggerBeginSubXact(void) |
5337 | 0 | { |
5338 | 0 | int my_level = GetCurrentTransactionNestLevel(); |
5339 | | |
5340 | | /* |
5341 | | * Allocate more space in the trans_stack if needed. (Note: because the |
5342 | | * minimum nest level of a subtransaction is 2, we waste the first couple |
5343 | | * entries of the array; not worth the notational effort to avoid it.) |
5344 | | */ |
5345 | 0 | while (my_level >= afterTriggers.maxtransdepth) |
5346 | 0 | { |
5347 | 0 | if (afterTriggers.maxtransdepth == 0) |
5348 | 0 | { |
5349 | | /* Arbitrarily initialize for max of 8 subtransaction levels */ |
5350 | 0 | afterTriggers.trans_stack = (AfterTriggersTransData *) |
5351 | 0 | MemoryContextAlloc(TopTransactionContext, |
5352 | 0 | 8 * sizeof(AfterTriggersTransData)); |
5353 | 0 | afterTriggers.maxtransdepth = 8; |
5354 | 0 | } |
5355 | 0 | else |
5356 | 0 | { |
5357 | | /* repalloc will keep the stack in the same context */ |
5358 | 0 | int new_alloc = afterTriggers.maxtransdepth * 2; |
5359 | |
|
5360 | 0 | afterTriggers.trans_stack = (AfterTriggersTransData *) |
5361 | 0 | repalloc(afterTriggers.trans_stack, |
5362 | 0 | new_alloc * sizeof(AfterTriggersTransData)); |
5363 | 0 | afterTriggers.maxtransdepth = new_alloc; |
5364 | 0 | } |
5365 | 0 | } |
5366 | | |
5367 | | /* |
5368 | | * Push the current information into the stack. The SET CONSTRAINTS state |
5369 | | * is not saved until/unless changed. Likewise, we don't make a |
5370 | | * per-subtransaction event context until needed. |
5371 | | */ |
5372 | 0 | afterTriggers.trans_stack[my_level].state = NULL; |
5373 | 0 | afterTriggers.trans_stack[my_level].events = afterTriggers.events; |
5374 | 0 | afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth; |
5375 | 0 | afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter; |
5376 | 0 | } |
5377 | | |
5378 | | /* |
5379 | | * AfterTriggerEndSubXact() |
5380 | | * |
5381 | | * The current subtransaction is ending. |
5382 | | */ |
5383 | | void |
5384 | | AfterTriggerEndSubXact(bool isCommit) |
5385 | 0 | { |
5386 | 0 | int my_level = GetCurrentTransactionNestLevel(); |
5387 | 0 | SetConstraintState state; |
5388 | 0 | AfterTriggerEvent event; |
5389 | 0 | AfterTriggerEventChunk *chunk; |
5390 | 0 | CommandId subxact_firing_id; |
5391 | | |
5392 | | /* |
5393 | | * Pop the prior state if needed. |
5394 | | */ |
5395 | 0 | if (isCommit) |
5396 | 0 | { |
5397 | 0 | Assert(my_level < afterTriggers.maxtransdepth); |
5398 | | /* If we saved a prior state, we don't need it anymore */ |
5399 | 0 | state = afterTriggers.trans_stack[my_level].state; |
5400 | 0 | if (state != NULL) |
5401 | 0 | pfree(state); |
5402 | | /* this avoids double pfree if error later: */ |
5403 | 0 | afterTriggers.trans_stack[my_level].state = NULL; |
5404 | 0 | Assert(afterTriggers.query_depth == |
5405 | 0 | afterTriggers.trans_stack[my_level].query_depth); |
5406 | 0 | } |
5407 | 0 | else |
5408 | 0 | { |
5409 | | /* |
5410 | | * Aborting. It is possible subxact start failed before calling |
5411 | | * AfterTriggerBeginSubXact, in which case we mustn't risk touching |
5412 | | * trans_stack levels that aren't there. |
5413 | | */ |
5414 | 0 | if (my_level >= afterTriggers.maxtransdepth) |
5415 | 0 | return; |
5416 | | |
5417 | | /* |
5418 | | * Release query-level storage for queries being aborted, and restore |
5419 | | * query_depth to its pre-subxact value. This assumes that a |
5420 | | * subtransaction will not add events to query levels started in a |
5421 | | * earlier transaction state. |
5422 | | */ |
5423 | 0 | while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth) |
5424 | 0 | { |
5425 | 0 | if (afterTriggers.query_depth < afterTriggers.maxquerydepth) |
5426 | 0 | AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]); |
5427 | 0 | afterTriggers.query_depth--; |
5428 | 0 | } |
5429 | 0 | Assert(afterTriggers.query_depth == |
5430 | 0 | afterTriggers.trans_stack[my_level].query_depth); |
5431 | | |
5432 | | /* |
5433 | | * Restore the global deferred-event list to its former length, |
5434 | | * discarding any events queued by the subxact. |
5435 | | */ |
5436 | 0 | afterTriggerRestoreEventList(&afterTriggers.events, |
5437 | 0 | &afterTriggers.trans_stack[my_level].events); |
5438 | | |
5439 | | /* |
5440 | | * Restore the trigger state. If the saved state is NULL, then this |
5441 | | * subxact didn't save it, so it doesn't need restoring. |
5442 | | */ |
5443 | 0 | state = afterTriggers.trans_stack[my_level].state; |
5444 | 0 | if (state != NULL) |
5445 | 0 | { |
5446 | 0 | pfree(afterTriggers.state); |
5447 | 0 | afterTriggers.state = state; |
5448 | 0 | } |
5449 | | /* this avoids double pfree if error later: */ |
5450 | 0 | afterTriggers.trans_stack[my_level].state = NULL; |
5451 | | |
5452 | | /* |
5453 | | * Scan for any remaining deferred events that were marked DONE or IN |
5454 | | * PROGRESS by this subxact or a child, and un-mark them. We can |
5455 | | * recognize such events because they have a firing ID greater than or |
5456 | | * equal to the firing_counter value we saved at subtransaction start. |
5457 | | * (This essentially assumes that the current subxact includes all |
5458 | | * subxacts started after it.) |
5459 | | */ |
5460 | 0 | subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter; |
5461 | 0 | for_each_event_chunk(event, chunk, afterTriggers.events) |
5462 | 0 | { |
5463 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
5464 | |
|
5465 | 0 | if (event->ate_flags & |
5466 | 0 | (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)) |
5467 | 0 | { |
5468 | 0 | if (evtshared->ats_firing_id >= subxact_firing_id) |
5469 | 0 | event->ate_flags &= |
5470 | 0 | ~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS); |
5471 | 0 | } |
5472 | 0 | } |
5473 | 0 | } |
5474 | 0 | } |
5475 | | |
5476 | | /* |
5477 | | * Get the transition table for the given event and depending on whether we are |
5478 | | * processing the old or the new tuple. |
5479 | | */ |
5480 | | static Tuplestorestate * |
5481 | | GetAfterTriggersTransitionTable(int event, |
5482 | | TupleTableSlot *oldslot, |
5483 | | TupleTableSlot *newslot, |
5484 | | TransitionCaptureState *transition_capture) |
5485 | 0 | { |
5486 | 0 | Tuplestorestate *tuplestore = NULL; |
5487 | 0 | bool delete_old_table = transition_capture->tcs_delete_old_table; |
5488 | 0 | bool update_old_table = transition_capture->tcs_update_old_table; |
5489 | 0 | bool update_new_table = transition_capture->tcs_update_new_table; |
5490 | 0 | bool insert_new_table = transition_capture->tcs_insert_new_table; |
5491 | | |
5492 | | /* |
5493 | | * For INSERT events NEW should be non-NULL, for DELETE events OLD should |
5494 | | * be non-NULL, whereas for UPDATE events normally both OLD and NEW are |
5495 | | * non-NULL. But for UPDATE events fired for capturing transition tuples |
5496 | | * during UPDATE partition-key row movement, OLD is NULL when the event is |
5497 | | * for a row being inserted, whereas NEW is NULL when the event is for a |
5498 | | * row being deleted. |
5499 | | */ |
5500 | 0 | Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table && |
5501 | 0 | TupIsNull(oldslot))); |
5502 | 0 | Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table && |
5503 | 0 | TupIsNull(newslot))); |
5504 | |
|
5505 | 0 | if (!TupIsNull(oldslot)) |
5506 | 0 | { |
5507 | 0 | Assert(TupIsNull(newslot)); |
5508 | 0 | if (event == TRIGGER_EVENT_DELETE && delete_old_table) |
5509 | 0 | tuplestore = transition_capture->tcs_private->old_del_tuplestore; |
5510 | 0 | else if (event == TRIGGER_EVENT_UPDATE && update_old_table) |
5511 | 0 | tuplestore = transition_capture->tcs_private->old_upd_tuplestore; |
5512 | 0 | } |
5513 | 0 | else if (!TupIsNull(newslot)) |
5514 | 0 | { |
5515 | 0 | Assert(TupIsNull(oldslot)); |
5516 | 0 | if (event == TRIGGER_EVENT_INSERT && insert_new_table) |
5517 | 0 | tuplestore = transition_capture->tcs_private->new_ins_tuplestore; |
5518 | 0 | else if (event == TRIGGER_EVENT_UPDATE && update_new_table) |
5519 | 0 | tuplestore = transition_capture->tcs_private->new_upd_tuplestore; |
5520 | 0 | } |
5521 | |
|
5522 | 0 | return tuplestore; |
5523 | 0 | } |
5524 | | |
5525 | | /* |
5526 | | * Add the given heap tuple to the given tuplestore, applying the conversion |
5527 | | * map if necessary. |
5528 | | * |
5529 | | * If original_insert_tuple is given, we can add that tuple without conversion. |
5530 | | */ |
5531 | | static void |
5532 | | TransitionTableAddTuple(EState *estate, |
5533 | | TransitionCaptureState *transition_capture, |
5534 | | ResultRelInfo *relinfo, |
5535 | | TupleTableSlot *slot, |
5536 | | TupleTableSlot *original_insert_tuple, |
5537 | | Tuplestorestate *tuplestore) |
5538 | 0 | { |
5539 | 0 | TupleConversionMap *map; |
5540 | | |
5541 | | /* |
5542 | | * Nothing needs to be done if we don't have a tuplestore. |
5543 | | */ |
5544 | 0 | if (tuplestore == NULL) |
5545 | 0 | return; |
5546 | | |
5547 | 0 | if (original_insert_tuple) |
5548 | 0 | tuplestore_puttupleslot(tuplestore, original_insert_tuple); |
5549 | 0 | else if ((map = ExecGetChildToRootMap(relinfo)) != NULL) |
5550 | 0 | { |
5551 | 0 | AfterTriggersTableData *table = transition_capture->tcs_private; |
5552 | 0 | TupleTableSlot *storeslot; |
5553 | |
|
5554 | 0 | storeslot = GetAfterTriggersStoreSlot(table, map->outdesc); |
5555 | 0 | execute_attr_map_slot(map->attrMap, slot, storeslot); |
5556 | 0 | tuplestore_puttupleslot(tuplestore, storeslot); |
5557 | 0 | } |
5558 | 0 | else |
5559 | 0 | tuplestore_puttupleslot(tuplestore, slot); |
5560 | 0 | } |
5561 | | |
5562 | | /* ---------- |
5563 | | * AfterTriggerEnlargeQueryState() |
5564 | | * |
5565 | | * Prepare the necessary state so that we can record AFTER trigger events |
5566 | | * queued by a query. It is allowed to have nested queries within a |
5567 | | * (sub)transaction, so we need to have separate state for each query |
5568 | | * nesting level. |
5569 | | * ---------- |
5570 | | */ |
5571 | | static void |
5572 | | AfterTriggerEnlargeQueryState(void) |
5573 | 0 | { |
5574 | 0 | int init_depth = afterTriggers.maxquerydepth; |
5575 | |
|
5576 | 0 | Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth); |
5577 | |
|
5578 | 0 | if (afterTriggers.maxquerydepth == 0) |
5579 | 0 | { |
5580 | 0 | int new_alloc = Max(afterTriggers.query_depth + 1, 8); |
5581 | |
|
5582 | 0 | afterTriggers.query_stack = (AfterTriggersQueryData *) |
5583 | 0 | MemoryContextAlloc(TopTransactionContext, |
5584 | 0 | new_alloc * sizeof(AfterTriggersQueryData)); |
5585 | 0 | afterTriggers.maxquerydepth = new_alloc; |
5586 | 0 | } |
5587 | 0 | else |
5588 | 0 | { |
5589 | | /* repalloc will keep the stack in the same context */ |
5590 | 0 | int old_alloc = afterTriggers.maxquerydepth; |
5591 | 0 | int new_alloc = Max(afterTriggers.query_depth + 1, |
5592 | 0 | old_alloc * 2); |
5593 | |
|
5594 | 0 | afterTriggers.query_stack = (AfterTriggersQueryData *) |
5595 | 0 | repalloc(afterTriggers.query_stack, |
5596 | 0 | new_alloc * sizeof(AfterTriggersQueryData)); |
5597 | 0 | afterTriggers.maxquerydepth = new_alloc; |
5598 | 0 | } |
5599 | | |
5600 | | /* Initialize new array entries to empty */ |
5601 | 0 | while (init_depth < afterTriggers.maxquerydepth) |
5602 | 0 | { |
5603 | 0 | AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth]; |
5604 | |
|
5605 | 0 | qs->events.head = NULL; |
5606 | 0 | qs->events.tail = NULL; |
5607 | 0 | qs->events.tailfree = NULL; |
5608 | 0 | qs->fdw_tuplestore = NULL; |
5609 | 0 | qs->tables = NIL; |
5610 | |
|
5611 | 0 | ++init_depth; |
5612 | 0 | } |
5613 | 0 | } |
5614 | | |
5615 | | /* |
5616 | | * Create an empty SetConstraintState with room for numalloc trigstates |
5617 | | */ |
5618 | | static SetConstraintState |
5619 | | SetConstraintStateCreate(int numalloc) |
5620 | 0 | { |
5621 | 0 | SetConstraintState state; |
5622 | | |
5623 | | /* Behave sanely with numalloc == 0 */ |
5624 | 0 | if (numalloc <= 0) |
5625 | 0 | numalloc = 1; |
5626 | | |
5627 | | /* |
5628 | | * We assume that zeroing will correctly initialize the state values. |
5629 | | */ |
5630 | 0 | state = (SetConstraintState) |
5631 | 0 | MemoryContextAllocZero(TopTransactionContext, |
5632 | 0 | offsetof(SetConstraintStateData, trigstates) + |
5633 | 0 | numalloc * sizeof(SetConstraintTriggerData)); |
5634 | |
|
5635 | 0 | state->numalloc = numalloc; |
5636 | |
|
5637 | 0 | return state; |
5638 | 0 | } |
5639 | | |
5640 | | /* |
5641 | | * Copy a SetConstraintState |
5642 | | */ |
5643 | | static SetConstraintState |
5644 | | SetConstraintStateCopy(SetConstraintState origstate) |
5645 | 0 | { |
5646 | 0 | SetConstraintState state; |
5647 | |
|
5648 | 0 | state = SetConstraintStateCreate(origstate->numstates); |
5649 | |
|
5650 | 0 | state->all_isset = origstate->all_isset; |
5651 | 0 | state->all_isdeferred = origstate->all_isdeferred; |
5652 | 0 | state->numstates = origstate->numstates; |
5653 | 0 | memcpy(state->trigstates, origstate->trigstates, |
5654 | 0 | origstate->numstates * sizeof(SetConstraintTriggerData)); |
5655 | |
|
5656 | 0 | return state; |
5657 | 0 | } |
5658 | | |
5659 | | /* |
5660 | | * Add a per-trigger item to a SetConstraintState. Returns possibly-changed |
5661 | | * pointer to the state object (it will change if we have to repalloc). |
5662 | | */ |
5663 | | static SetConstraintState |
5664 | | SetConstraintStateAddItem(SetConstraintState state, |
5665 | | Oid tgoid, bool tgisdeferred) |
5666 | 0 | { |
5667 | 0 | if (state->numstates >= state->numalloc) |
5668 | 0 | { |
5669 | 0 | int newalloc = state->numalloc * 2; |
5670 | |
|
5671 | 0 | newalloc = Max(newalloc, 8); /* in case original has size 0 */ |
5672 | 0 | state = (SetConstraintState) |
5673 | 0 | repalloc(state, |
5674 | 0 | offsetof(SetConstraintStateData, trigstates) + |
5675 | 0 | newalloc * sizeof(SetConstraintTriggerData)); |
5676 | 0 | state->numalloc = newalloc; |
5677 | 0 | Assert(state->numstates < state->numalloc); |
5678 | 0 | } |
5679 | |
|
5680 | 0 | state->trigstates[state->numstates].sct_tgoid = tgoid; |
5681 | 0 | state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred; |
5682 | 0 | state->numstates++; |
5683 | |
|
5684 | 0 | return state; |
5685 | 0 | } |
5686 | | |
5687 | | /* ---------- |
5688 | | * AfterTriggerSetState() |
5689 | | * |
5690 | | * Execute the SET CONSTRAINTS ... utility command. |
5691 | | * ---------- |
5692 | | */ |
5693 | | void |
5694 | | AfterTriggerSetState(ConstraintsSetStmt *stmt) |
5695 | 0 | { |
5696 | 0 | int my_level = GetCurrentTransactionNestLevel(); |
5697 | | |
5698 | | /* If we haven't already done so, initialize our state. */ |
5699 | 0 | if (afterTriggers.state == NULL) |
5700 | 0 | afterTriggers.state = SetConstraintStateCreate(8); |
5701 | | |
5702 | | /* |
5703 | | * If in a subtransaction, and we didn't save the current state already, |
5704 | | * save it so it can be restored if the subtransaction aborts. |
5705 | | */ |
5706 | 0 | if (my_level > 1 && |
5707 | 0 | afterTriggers.trans_stack[my_level].state == NULL) |
5708 | 0 | { |
5709 | 0 | afterTriggers.trans_stack[my_level].state = |
5710 | 0 | SetConstraintStateCopy(afterTriggers.state); |
5711 | 0 | } |
5712 | | |
5713 | | /* |
5714 | | * Handle SET CONSTRAINTS ALL ... |
5715 | | */ |
5716 | 0 | if (stmt->constraints == NIL) |
5717 | 0 | { |
5718 | | /* |
5719 | | * Forget any previous SET CONSTRAINTS commands in this transaction. |
5720 | | */ |
5721 | 0 | afterTriggers.state->numstates = 0; |
5722 | | |
5723 | | /* |
5724 | | * Set the per-transaction ALL state to known. |
5725 | | */ |
5726 | 0 | afterTriggers.state->all_isset = true; |
5727 | 0 | afterTriggers.state->all_isdeferred = stmt->deferred; |
5728 | 0 | } |
5729 | 0 | else |
5730 | 0 | { |
5731 | 0 | Relation conrel; |
5732 | 0 | Relation tgrel; |
5733 | 0 | List *conoidlist = NIL; |
5734 | 0 | List *tgoidlist = NIL; |
5735 | 0 | ListCell *lc; |
5736 | | |
5737 | | /* |
5738 | | * Handle SET CONSTRAINTS constraint-name [, ...] |
5739 | | * |
5740 | | * First, identify all the named constraints and make a list of their |
5741 | | * OIDs. Since, unlike the SQL spec, we allow multiple constraints of |
5742 | | * the same name within a schema, the specifications are not |
5743 | | * necessarily unique. Our strategy is to target all matching |
5744 | | * constraints within the first search-path schema that has any |
5745 | | * matches, but disregard matches in schemas beyond the first match. |
5746 | | * (This is a bit odd but it's the historical behavior.) |
5747 | | * |
5748 | | * A constraint in a partitioned table may have corresponding |
5749 | | * constraints in the partitions. Grab those too. |
5750 | | */ |
5751 | 0 | conrel = table_open(ConstraintRelationId, AccessShareLock); |
5752 | |
|
5753 | 0 | foreach(lc, stmt->constraints) |
5754 | 0 | { |
5755 | 0 | RangeVar *constraint = lfirst(lc); |
5756 | 0 | bool found; |
5757 | 0 | List *namespacelist; |
5758 | 0 | ListCell *nslc; |
5759 | |
|
5760 | 0 | if (constraint->catalogname) |
5761 | 0 | { |
5762 | 0 | if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0) |
5763 | 0 | ereport(ERROR, |
5764 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
5765 | 0 | errmsg("cross-database references are not implemented: \"%s.%s.%s\"", |
5766 | 0 | constraint->catalogname, constraint->schemaname, |
5767 | 0 | constraint->relname))); |
5768 | 0 | } |
5769 | | |
5770 | | /* |
5771 | | * If we're given the schema name with the constraint, look only |
5772 | | * in that schema. If given a bare constraint name, use the |
5773 | | * search path to find the first matching constraint. |
5774 | | */ |
5775 | 0 | if (constraint->schemaname) |
5776 | 0 | { |
5777 | 0 | Oid namespaceId = LookupExplicitNamespace(constraint->schemaname, |
5778 | 0 | false); |
5779 | |
|
5780 | 0 | namespacelist = list_make1_oid(namespaceId); |
5781 | 0 | } |
5782 | 0 | else |
5783 | 0 | { |
5784 | 0 | namespacelist = fetch_search_path(true); |
5785 | 0 | } |
5786 | |
|
5787 | 0 | found = false; |
5788 | 0 | foreach(nslc, namespacelist) |
5789 | 0 | { |
5790 | 0 | Oid namespaceId = lfirst_oid(nslc); |
5791 | 0 | SysScanDesc conscan; |
5792 | 0 | ScanKeyData skey[2]; |
5793 | 0 | HeapTuple tup; |
5794 | |
|
5795 | 0 | ScanKeyInit(&skey[0], |
5796 | 0 | Anum_pg_constraint_conname, |
5797 | 0 | BTEqualStrategyNumber, F_NAMEEQ, |
5798 | 0 | CStringGetDatum(constraint->relname)); |
5799 | 0 | ScanKeyInit(&skey[1], |
5800 | 0 | Anum_pg_constraint_connamespace, |
5801 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
5802 | 0 | ObjectIdGetDatum(namespaceId)); |
5803 | |
|
5804 | 0 | conscan = systable_beginscan(conrel, ConstraintNameNspIndexId, |
5805 | 0 | true, NULL, 2, skey); |
5806 | |
|
5807 | 0 | while (HeapTupleIsValid(tup = systable_getnext(conscan))) |
5808 | 0 | { |
5809 | 0 | Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup); |
5810 | |
|
5811 | 0 | if (con->condeferrable) |
5812 | 0 | conoidlist = lappend_oid(conoidlist, con->oid); |
5813 | 0 | else if (stmt->deferred) |
5814 | 0 | ereport(ERROR, |
5815 | 0 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
5816 | 0 | errmsg("constraint \"%s\" is not deferrable", |
5817 | 0 | constraint->relname))); |
5818 | 0 | found = true; |
5819 | 0 | } |
5820 | | |
5821 | 0 | systable_endscan(conscan); |
5822 | | |
5823 | | /* |
5824 | | * Once we've found a matching constraint we do not search |
5825 | | * later parts of the search path. |
5826 | | */ |
5827 | 0 | if (found) |
5828 | 0 | break; |
5829 | 0 | } |
5830 | | |
5831 | 0 | list_free(namespacelist); |
5832 | | |
5833 | | /* |
5834 | | * Not found ? |
5835 | | */ |
5836 | 0 | if (!found) |
5837 | 0 | ereport(ERROR, |
5838 | 0 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
5839 | 0 | errmsg("constraint \"%s\" does not exist", |
5840 | 0 | constraint->relname))); |
5841 | 0 | } |
5842 | | |
5843 | | /* |
5844 | | * Scan for any possible descendants of the constraints. We append |
5845 | | * whatever we find to the same list that we're scanning; this has the |
5846 | | * effect that we create new scans for those, too, so if there are |
5847 | | * further descendents, we'll also catch them. |
5848 | | */ |
5849 | 0 | foreach(lc, conoidlist) |
5850 | 0 | { |
5851 | 0 | Oid parent = lfirst_oid(lc); |
5852 | 0 | ScanKeyData key; |
5853 | 0 | SysScanDesc scan; |
5854 | 0 | HeapTuple tuple; |
5855 | |
|
5856 | 0 | ScanKeyInit(&key, |
5857 | 0 | Anum_pg_constraint_conparentid, |
5858 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
5859 | 0 | ObjectIdGetDatum(parent)); |
5860 | |
|
5861 | 0 | scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key); |
5862 | |
|
5863 | 0 | while (HeapTupleIsValid(tuple = systable_getnext(scan))) |
5864 | 0 | { |
5865 | 0 | Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple); |
5866 | |
|
5867 | 0 | conoidlist = lappend_oid(conoidlist, con->oid); |
5868 | 0 | } |
5869 | |
|
5870 | 0 | systable_endscan(scan); |
5871 | 0 | } |
5872 | |
|
5873 | 0 | table_close(conrel, AccessShareLock); |
5874 | | |
5875 | | /* |
5876 | | * Now, locate the trigger(s) implementing each of these constraints, |
5877 | | * and make a list of their OIDs. |
5878 | | */ |
5879 | 0 | tgrel = table_open(TriggerRelationId, AccessShareLock); |
5880 | |
|
5881 | 0 | foreach(lc, conoidlist) |
5882 | 0 | { |
5883 | 0 | Oid conoid = lfirst_oid(lc); |
5884 | 0 | ScanKeyData skey; |
5885 | 0 | SysScanDesc tgscan; |
5886 | 0 | HeapTuple htup; |
5887 | |
|
5888 | 0 | ScanKeyInit(&skey, |
5889 | 0 | Anum_pg_trigger_tgconstraint, |
5890 | 0 | BTEqualStrategyNumber, F_OIDEQ, |
5891 | 0 | ObjectIdGetDatum(conoid)); |
5892 | |
|
5893 | 0 | tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true, |
5894 | 0 | NULL, 1, &skey); |
5895 | |
|
5896 | 0 | while (HeapTupleIsValid(htup = systable_getnext(tgscan))) |
5897 | 0 | { |
5898 | 0 | Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup); |
5899 | | |
5900 | | /* |
5901 | | * Silently skip triggers that are marked as non-deferrable in |
5902 | | * pg_trigger. This is not an error condition, since a |
5903 | | * deferrable RI constraint may have some non-deferrable |
5904 | | * actions. |
5905 | | */ |
5906 | 0 | if (pg_trigger->tgdeferrable) |
5907 | 0 | tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid); |
5908 | 0 | } |
5909 | |
|
5910 | 0 | systable_endscan(tgscan); |
5911 | 0 | } |
5912 | |
|
5913 | 0 | table_close(tgrel, AccessShareLock); |
5914 | | |
5915 | | /* |
5916 | | * Now we can set the trigger states of individual triggers for this |
5917 | | * xact. |
5918 | | */ |
5919 | 0 | foreach(lc, tgoidlist) |
5920 | 0 | { |
5921 | 0 | Oid tgoid = lfirst_oid(lc); |
5922 | 0 | SetConstraintState state = afterTriggers.state; |
5923 | 0 | bool found = false; |
5924 | 0 | int i; |
5925 | |
|
5926 | 0 | for (i = 0; i < state->numstates; i++) |
5927 | 0 | { |
5928 | 0 | if (state->trigstates[i].sct_tgoid == tgoid) |
5929 | 0 | { |
5930 | 0 | state->trigstates[i].sct_tgisdeferred = stmt->deferred; |
5931 | 0 | found = true; |
5932 | 0 | break; |
5933 | 0 | } |
5934 | 0 | } |
5935 | 0 | if (!found) |
5936 | 0 | { |
5937 | 0 | afterTriggers.state = |
5938 | 0 | SetConstraintStateAddItem(state, tgoid, stmt->deferred); |
5939 | 0 | } |
5940 | 0 | } |
5941 | 0 | } |
5942 | | |
5943 | | /* |
5944 | | * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred |
5945 | | * checks against that constraint must be made when the SET CONSTRAINTS |
5946 | | * command is executed -- i.e. the effects of the SET CONSTRAINTS command |
5947 | | * apply retroactively. We've updated the constraints state, so scan the |
5948 | | * list of previously deferred events to fire any that have now become |
5949 | | * immediate. |
5950 | | * |
5951 | | * Obviously, if this was SET ... DEFERRED then it can't have converted |
5952 | | * any unfired events to immediate, so we need do nothing in that case. |
5953 | | */ |
5954 | 0 | if (!stmt->deferred) |
5955 | 0 | { |
5956 | 0 | AfterTriggerEventList *events = &afterTriggers.events; |
5957 | 0 | bool snapshot_set = false; |
5958 | |
|
5959 | 0 | while (afterTriggerMarkEvents(events, NULL, true)) |
5960 | 0 | { |
5961 | 0 | CommandId firing_id = afterTriggers.firing_counter++; |
5962 | | |
5963 | | /* |
5964 | | * Make sure a snapshot has been established in case trigger |
5965 | | * functions need one. Note that we avoid setting a snapshot if |
5966 | | * we don't find at least one trigger that has to be fired now. |
5967 | | * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION |
5968 | | * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are |
5969 | | * at the start of a transaction it's not possible for any trigger |
5970 | | * events to be queued yet.) |
5971 | | */ |
5972 | 0 | if (!snapshot_set) |
5973 | 0 | { |
5974 | 0 | PushActiveSnapshot(GetTransactionSnapshot()); |
5975 | 0 | snapshot_set = true; |
5976 | 0 | } |
5977 | | |
5978 | | /* |
5979 | | * We can delete fired events if we are at top transaction level, |
5980 | | * but we'd better not if inside a subtransaction, since the |
5981 | | * subtransaction could later get rolled back. |
5982 | | */ |
5983 | 0 | if (afterTriggerInvokeEvents(events, firing_id, NULL, |
5984 | 0 | !IsSubTransaction())) |
5985 | 0 | break; /* all fired */ |
5986 | 0 | } |
5987 | |
|
5988 | 0 | if (snapshot_set) |
5989 | 0 | PopActiveSnapshot(); |
5990 | 0 | } |
5991 | 0 | } |
5992 | | |
5993 | | /* ---------- |
5994 | | * AfterTriggerPendingOnRel() |
5995 | | * Test to see if there are any pending after-trigger events for rel. |
5996 | | * |
5997 | | * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether |
5998 | | * it is unsafe to perform major surgery on a relation. Note that only |
5999 | | * local pending events are examined. We assume that having exclusive lock |
6000 | | * on a rel guarantees there are no unserviced events in other backends --- |
6001 | | * but having a lock does not prevent there being such events in our own. |
6002 | | * |
6003 | | * In some scenarios it'd be reasonable to remove pending events (more |
6004 | | * specifically, mark them DONE by the current subxact) but without a lot |
6005 | | * of knowledge of the trigger semantics we can't do this in general. |
6006 | | * ---------- |
6007 | | */ |
6008 | | bool |
6009 | | AfterTriggerPendingOnRel(Oid relid) |
6010 | 0 | { |
6011 | 0 | AfterTriggerEvent event; |
6012 | 0 | AfterTriggerEventChunk *chunk; |
6013 | 0 | int depth; |
6014 | | |
6015 | | /* Scan queued events */ |
6016 | 0 | for_each_event_chunk(event, chunk, afterTriggers.events) |
6017 | 0 | { |
6018 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
6019 | | |
6020 | | /* |
6021 | | * We can ignore completed events. (Even if a DONE flag is rolled |
6022 | | * back by subxact abort, it's OK because the effects of the TRUNCATE |
6023 | | * or whatever must get rolled back too.) |
6024 | | */ |
6025 | 0 | if (event->ate_flags & AFTER_TRIGGER_DONE) |
6026 | 0 | continue; |
6027 | | |
6028 | 0 | if (evtshared->ats_relid == relid) |
6029 | 0 | return true; |
6030 | 0 | } |
6031 | | |
6032 | | /* |
6033 | | * Also scan events queued by incomplete queries. This could only matter |
6034 | | * if TRUNCATE/etc is executed by a function or trigger within an updating |
6035 | | * query on the same relation, which is pretty perverse, but let's check. |
6036 | | */ |
6037 | 0 | for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++) |
6038 | 0 | { |
6039 | 0 | for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events) |
6040 | 0 | { |
6041 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
6042 | |
|
6043 | 0 | if (event->ate_flags & AFTER_TRIGGER_DONE) |
6044 | 0 | continue; |
6045 | | |
6046 | 0 | if (evtshared->ats_relid == relid) |
6047 | 0 | return true; |
6048 | 0 | } |
6049 | 0 | } |
6050 | | |
6051 | 0 | return false; |
6052 | 0 | } |
6053 | | |
6054 | | /* ---------- |
6055 | | * AfterTriggerSaveEvent() |
6056 | | * |
6057 | | * Called by ExecA[RS]...Triggers() to queue up the triggers that should |
6058 | | * be fired for an event. |
6059 | | * |
6060 | | * NOTE: this is called whenever there are any triggers associated with |
6061 | | * the event (even if they are disabled). This function decides which |
6062 | | * triggers actually need to be queued. It is also called after each row, |
6063 | | * even if there are no triggers for that event, if there are any AFTER |
6064 | | * STATEMENT triggers for the statement which use transition tables, so that |
6065 | | * the transition tuplestores can be built. Furthermore, if the transition |
6066 | | * capture is happening for UPDATEd rows being moved to another partition due |
6067 | | * to the partition-key being changed, then this function is called once when |
6068 | | * the row is deleted (to capture OLD row), and once when the row is inserted |
6069 | | * into another partition (to capture NEW row). This is done separately because |
6070 | | * DELETE and INSERT happen on different tables. |
6071 | | * |
6072 | | * Transition tuplestores are built now, rather than when events are pulled |
6073 | | * off of the queue because AFTER ROW triggers are allowed to select from the |
6074 | | * transition tables for the statement. |
6075 | | * |
6076 | | * This contains special support to queue the update events for the case where |
6077 | | * a partitioned table undergoing a cross-partition update may have foreign |
6078 | | * keys pointing into it. Normally, a partitioned table's row triggers are |
6079 | | * not fired because the leaf partition(s) which are modified as a result of |
6080 | | * the operation on the partitioned table contain the same triggers which are |
6081 | | * fired instead. But that general scheme can cause problematic behavior with |
6082 | | * foreign key triggers during cross-partition updates, which are implemented |
6083 | | * as DELETE on the source partition followed by INSERT into the destination |
6084 | | * partition. Specifically, firing DELETE triggers would lead to the wrong |
6085 | | * foreign key action to be enforced considering that the original command is |
6086 | | * UPDATE; in this case, this function is called with relinfo as the |
6087 | | * partitioned table, and src_partinfo and dst_partinfo referring to the |
6088 | | * source and target leaf partitions, respectively. |
6089 | | * |
6090 | | * is_crosspart_update is true either when a DELETE event is fired on the |
6091 | | * source partition (which is to be ignored) or an UPDATE event is fired on |
6092 | | * the root partitioned table. |
6093 | | * ---------- |
6094 | | */ |
6095 | | static void |
6096 | | AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, |
6097 | | ResultRelInfo *src_partinfo, |
6098 | | ResultRelInfo *dst_partinfo, |
6099 | | int event, bool row_trigger, |
6100 | | TupleTableSlot *oldslot, TupleTableSlot *newslot, |
6101 | | List *recheckIndexes, Bitmapset *modifiedCols, |
6102 | | TransitionCaptureState *transition_capture, |
6103 | | bool is_crosspart_update) |
6104 | 0 | { |
6105 | 0 | Relation rel = relinfo->ri_RelationDesc; |
6106 | 0 | TriggerDesc *trigdesc = relinfo->ri_TrigDesc; |
6107 | 0 | AfterTriggerEventData new_event; |
6108 | 0 | AfterTriggerSharedData new_shared; |
6109 | 0 | char relkind = rel->rd_rel->relkind; |
6110 | 0 | int tgtype_event; |
6111 | 0 | int tgtype_level; |
6112 | 0 | int i; |
6113 | 0 | Tuplestorestate *fdw_tuplestore = NULL; |
6114 | | |
6115 | | /* |
6116 | | * Check state. We use a normal test not Assert because it is possible to |
6117 | | * reach here in the wrong state given misconfigured RI triggers, in |
6118 | | * particular deferring a cascade action trigger. |
6119 | | */ |
6120 | 0 | if (afterTriggers.query_depth < 0) |
6121 | 0 | elog(ERROR, "AfterTriggerSaveEvent() called outside of query"); |
6122 | | |
6123 | | /* Be sure we have enough space to record events at this query depth. */ |
6124 | 0 | if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) |
6125 | 0 | AfterTriggerEnlargeQueryState(); |
6126 | | |
6127 | | /* |
6128 | | * If the directly named relation has any triggers with transition tables, |
6129 | | * then we need to capture transition tuples. |
6130 | | */ |
6131 | 0 | if (row_trigger && transition_capture != NULL) |
6132 | 0 | { |
6133 | 0 | TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple; |
6134 | | |
6135 | | /* |
6136 | | * Capture the old tuple in the appropriate transition table based on |
6137 | | * the event. |
6138 | | */ |
6139 | 0 | if (!TupIsNull(oldslot)) |
6140 | 0 | { |
6141 | 0 | Tuplestorestate *old_tuplestore; |
6142 | |
|
6143 | 0 | old_tuplestore = GetAfterTriggersTransitionTable(event, |
6144 | 0 | oldslot, |
6145 | 0 | NULL, |
6146 | 0 | transition_capture); |
6147 | 0 | TransitionTableAddTuple(estate, transition_capture, relinfo, |
6148 | 0 | oldslot, NULL, old_tuplestore); |
6149 | 0 | } |
6150 | | |
6151 | | /* |
6152 | | * Capture the new tuple in the appropriate transition table based on |
6153 | | * the event. |
6154 | | */ |
6155 | 0 | if (!TupIsNull(newslot)) |
6156 | 0 | { |
6157 | 0 | Tuplestorestate *new_tuplestore; |
6158 | |
|
6159 | 0 | new_tuplestore = GetAfterTriggersTransitionTable(event, |
6160 | 0 | NULL, |
6161 | 0 | newslot, |
6162 | 0 | transition_capture); |
6163 | 0 | TransitionTableAddTuple(estate, transition_capture, relinfo, |
6164 | 0 | newslot, original_insert_tuple, new_tuplestore); |
6165 | 0 | } |
6166 | | |
6167 | | /* |
6168 | | * If transition tables are the only reason we're here, return. As |
6169 | | * mentioned above, we can also be here during update tuple routing in |
6170 | | * presence of transition tables, in which case this function is |
6171 | | * called separately for OLD and NEW, so we expect exactly one of them |
6172 | | * to be NULL. |
6173 | | */ |
6174 | 0 | if (trigdesc == NULL || |
6175 | 0 | (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) || |
6176 | 0 | (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) || |
6177 | 0 | (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) || |
6178 | 0 | (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot)))) |
6179 | 0 | return; |
6180 | 0 | } |
6181 | | |
6182 | | /* |
6183 | | * We normally don't see partitioned tables here for row level triggers |
6184 | | * except in the special case of a cross-partition update. In that case, |
6185 | | * nodeModifyTable.c:ExecCrossPartitionUpdateForeignKey() calls here to |
6186 | | * queue an update event on the root target partitioned table, also |
6187 | | * passing the source and destination partitions and their tuples. |
6188 | | */ |
6189 | 0 | Assert(!row_trigger || |
6190 | 0 | rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE || |
6191 | 0 | (is_crosspart_update && |
6192 | 0 | TRIGGER_FIRED_BY_UPDATE(event) && |
6193 | 0 | src_partinfo != NULL && dst_partinfo != NULL)); |
6194 | | |
6195 | | /* |
6196 | | * Validate the event code and collect the associated tuple CTIDs. |
6197 | | * |
6198 | | * The event code will be used both as a bitmask and an array offset, so |
6199 | | * validation is important to make sure we don't walk off the edge of our |
6200 | | * arrays. |
6201 | | * |
6202 | | * Also, if we're considering statement-level triggers, check whether we |
6203 | | * already queued a set of them for this event, and cancel the prior set |
6204 | | * if so. This preserves the behavior that statement-level triggers fire |
6205 | | * just once per statement and fire after row-level triggers. |
6206 | | */ |
6207 | 0 | switch (event) |
6208 | 0 | { |
6209 | 0 | case TRIGGER_EVENT_INSERT: |
6210 | 0 | tgtype_event = TRIGGER_TYPE_INSERT; |
6211 | 0 | if (row_trigger) |
6212 | 0 | { |
6213 | 0 | Assert(oldslot == NULL); |
6214 | 0 | Assert(newslot != NULL); |
6215 | 0 | ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1)); |
6216 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6217 | 0 | } |
6218 | 0 | else |
6219 | 0 | { |
6220 | 0 | Assert(oldslot == NULL); |
6221 | 0 | Assert(newslot == NULL); |
6222 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid1)); |
6223 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6224 | 0 | cancel_prior_stmt_triggers(RelationGetRelid(rel), |
6225 | 0 | CMD_INSERT, event); |
6226 | 0 | } |
6227 | 0 | break; |
6228 | 0 | case TRIGGER_EVENT_DELETE: |
6229 | 0 | tgtype_event = TRIGGER_TYPE_DELETE; |
6230 | 0 | if (row_trigger) |
6231 | 0 | { |
6232 | 0 | Assert(oldslot != NULL); |
6233 | 0 | Assert(newslot == NULL); |
6234 | 0 | ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1)); |
6235 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6236 | 0 | } |
6237 | 0 | else |
6238 | 0 | { |
6239 | 0 | Assert(oldslot == NULL); |
6240 | 0 | Assert(newslot == NULL); |
6241 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid1)); |
6242 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6243 | 0 | cancel_prior_stmt_triggers(RelationGetRelid(rel), |
6244 | 0 | CMD_DELETE, event); |
6245 | 0 | } |
6246 | 0 | break; |
6247 | 0 | case TRIGGER_EVENT_UPDATE: |
6248 | 0 | tgtype_event = TRIGGER_TYPE_UPDATE; |
6249 | 0 | if (row_trigger) |
6250 | 0 | { |
6251 | 0 | Assert(oldslot != NULL); |
6252 | 0 | Assert(newslot != NULL); |
6253 | 0 | ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1)); |
6254 | 0 | ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2)); |
6255 | | |
6256 | | /* |
6257 | | * Also remember the OIDs of partitions to fetch these tuples |
6258 | | * out of later in AfterTriggerExecute(). |
6259 | | */ |
6260 | 0 | if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
6261 | 0 | { |
6262 | 0 | Assert(src_partinfo != NULL && dst_partinfo != NULL); |
6263 | 0 | new_event.ate_src_part = |
6264 | 0 | RelationGetRelid(src_partinfo->ri_RelationDesc); |
6265 | 0 | new_event.ate_dst_part = |
6266 | 0 | RelationGetRelid(dst_partinfo->ri_RelationDesc); |
6267 | 0 | } |
6268 | 0 | } |
6269 | 0 | else |
6270 | 0 | { |
6271 | 0 | Assert(oldslot == NULL); |
6272 | 0 | Assert(newslot == NULL); |
6273 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid1)); |
6274 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6275 | 0 | cancel_prior_stmt_triggers(RelationGetRelid(rel), |
6276 | 0 | CMD_UPDATE, event); |
6277 | 0 | } |
6278 | 0 | break; |
6279 | 0 | case TRIGGER_EVENT_TRUNCATE: |
6280 | 0 | tgtype_event = TRIGGER_TYPE_TRUNCATE; |
6281 | 0 | Assert(oldslot == NULL); |
6282 | 0 | Assert(newslot == NULL); |
6283 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid1)); |
6284 | 0 | ItemPointerSetInvalid(&(new_event.ate_ctid2)); |
6285 | 0 | break; |
6286 | 0 | default: |
6287 | 0 | elog(ERROR, "invalid after-trigger event code: %d", event); |
6288 | 0 | tgtype_event = 0; /* keep compiler quiet */ |
6289 | 0 | break; |
6290 | 0 | } |
6291 | | |
6292 | | /* Determine flags */ |
6293 | 0 | if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger)) |
6294 | 0 | { |
6295 | 0 | if (row_trigger && event == TRIGGER_EVENT_UPDATE) |
6296 | 0 | { |
6297 | 0 | if (relkind == RELKIND_PARTITIONED_TABLE) |
6298 | 0 | new_event.ate_flags = AFTER_TRIGGER_CP_UPDATE; |
6299 | 0 | else |
6300 | 0 | new_event.ate_flags = AFTER_TRIGGER_2CTID; |
6301 | 0 | } |
6302 | 0 | else |
6303 | 0 | new_event.ate_flags = AFTER_TRIGGER_1CTID; |
6304 | 0 | } |
6305 | | |
6306 | | /* else, we'll initialize ate_flags for each trigger */ |
6307 | |
|
6308 | 0 | tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT); |
6309 | | |
6310 | | /* |
6311 | | * Must convert/copy the source and destination partition tuples into the |
6312 | | * root partitioned table's format/slot, because the processing in the |
6313 | | * loop below expects both oldslot and newslot tuples to be in that form. |
6314 | | */ |
6315 | 0 | if (row_trigger && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
6316 | 0 | { |
6317 | 0 | TupleTableSlot *rootslot; |
6318 | 0 | TupleConversionMap *map; |
6319 | |
|
6320 | 0 | rootslot = ExecGetTriggerOldSlot(estate, relinfo); |
6321 | 0 | map = ExecGetChildToRootMap(src_partinfo); |
6322 | 0 | if (map) |
6323 | 0 | oldslot = execute_attr_map_slot(map->attrMap, |
6324 | 0 | oldslot, |
6325 | 0 | rootslot); |
6326 | 0 | else |
6327 | 0 | oldslot = ExecCopySlot(rootslot, oldslot); |
6328 | |
|
6329 | 0 | rootslot = ExecGetTriggerNewSlot(estate, relinfo); |
6330 | 0 | map = ExecGetChildToRootMap(dst_partinfo); |
6331 | 0 | if (map) |
6332 | 0 | newslot = execute_attr_map_slot(map->attrMap, |
6333 | 0 | newslot, |
6334 | 0 | rootslot); |
6335 | 0 | else |
6336 | 0 | newslot = ExecCopySlot(rootslot, newslot); |
6337 | 0 | } |
6338 | |
|
6339 | 0 | for (i = 0; i < trigdesc->numtriggers; i++) |
6340 | 0 | { |
6341 | 0 | Trigger *trigger = &trigdesc->triggers[i]; |
6342 | |
|
6343 | 0 | if (!TRIGGER_TYPE_MATCHES(trigger->tgtype, |
6344 | 0 | tgtype_level, |
6345 | 0 | TRIGGER_TYPE_AFTER, |
6346 | 0 | tgtype_event)) |
6347 | 0 | continue; |
6348 | 0 | if (!TriggerEnabled(estate, relinfo, trigger, event, |
6349 | 0 | modifiedCols, oldslot, newslot)) |
6350 | 0 | continue; |
6351 | | |
6352 | 0 | if (relkind == RELKIND_FOREIGN_TABLE && row_trigger) |
6353 | 0 | { |
6354 | 0 | if (fdw_tuplestore == NULL) |
6355 | 0 | { |
6356 | 0 | fdw_tuplestore = GetCurrentFDWTuplestore(); |
6357 | 0 | new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH; |
6358 | 0 | } |
6359 | 0 | else |
6360 | | /* subsequent event for the same tuple */ |
6361 | 0 | new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE; |
6362 | 0 | } |
6363 | | |
6364 | | /* |
6365 | | * If the trigger is a foreign key enforcement trigger, there are |
6366 | | * certain cases where we can skip queueing the event because we can |
6367 | | * tell by inspection that the FK constraint will still pass. There |
6368 | | * are also some cases during cross-partition updates of a partitioned |
6369 | | * table where queuing the event can be skipped. |
6370 | | */ |
6371 | 0 | if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event)) |
6372 | 0 | { |
6373 | 0 | switch (RI_FKey_trigger_type(trigger->tgfoid)) |
6374 | 0 | { |
6375 | 0 | case RI_TRIGGER_PK: |
6376 | | |
6377 | | /* |
6378 | | * For cross-partitioned updates of partitioned PK table, |
6379 | | * skip the event fired by the component delete on the |
6380 | | * source leaf partition unless the constraint originates |
6381 | | * in the partition itself (!tgisclone), because the |
6382 | | * update event that will be fired on the root |
6383 | | * (partitioned) target table will be used to perform the |
6384 | | * necessary foreign key enforcement action. |
6385 | | */ |
6386 | 0 | if (is_crosspart_update && |
6387 | 0 | TRIGGER_FIRED_BY_DELETE(event) && |
6388 | 0 | trigger->tgisclone) |
6389 | 0 | continue; |
6390 | | |
6391 | | /* Update or delete on trigger's PK table */ |
6392 | 0 | if (!RI_FKey_pk_upd_check_required(trigger, rel, |
6393 | 0 | oldslot, newslot)) |
6394 | 0 | { |
6395 | | /* skip queuing this event */ |
6396 | 0 | continue; |
6397 | 0 | } |
6398 | 0 | break; |
6399 | | |
6400 | 0 | case RI_TRIGGER_FK: |
6401 | | |
6402 | | /* |
6403 | | * Update on trigger's FK table. We can skip the update |
6404 | | * event fired on a partitioned table during a |
6405 | | * cross-partition of that table, because the insert event |
6406 | | * that is fired on the destination leaf partition would |
6407 | | * suffice to perform the necessary foreign key check. |
6408 | | * Moreover, RI_FKey_fk_upd_check_required() expects to be |
6409 | | * passed a tuple that contains system attributes, most of |
6410 | | * which are not present in the virtual slot belonging to |
6411 | | * a partitioned table. |
6412 | | */ |
6413 | 0 | if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE || |
6414 | 0 | !RI_FKey_fk_upd_check_required(trigger, rel, |
6415 | 0 | oldslot, newslot)) |
6416 | 0 | { |
6417 | | /* skip queuing this event */ |
6418 | 0 | continue; |
6419 | 0 | } |
6420 | 0 | break; |
6421 | | |
6422 | 0 | case RI_TRIGGER_NONE: |
6423 | | |
6424 | | /* |
6425 | | * Not an FK trigger. No need to queue the update event |
6426 | | * fired during a cross-partitioned update of a |
6427 | | * partitioned table, because the same row trigger must be |
6428 | | * present in the leaf partition(s) that are affected as |
6429 | | * part of this update and the events fired on them are |
6430 | | * queued instead. |
6431 | | */ |
6432 | 0 | if (row_trigger && |
6433 | 0 | rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) |
6434 | 0 | continue; |
6435 | 0 | break; |
6436 | 0 | } |
6437 | 0 | } |
6438 | | |
6439 | | /* |
6440 | | * If the trigger is a deferred unique constraint check trigger, only |
6441 | | * queue it if the unique constraint was potentially violated, which |
6442 | | * we know from index insertion time. |
6443 | | */ |
6444 | 0 | if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK) |
6445 | 0 | { |
6446 | 0 | if (!list_member_oid(recheckIndexes, trigger->tgconstrindid)) |
6447 | 0 | continue; /* Uniqueness definitely not violated */ |
6448 | 0 | } |
6449 | | |
6450 | | /* |
6451 | | * Fill in event structure and add it to the current query's queue. |
6452 | | * Note we set ats_table to NULL whenever this trigger doesn't use |
6453 | | * transition tables, to improve sharability of the shared event data. |
6454 | | */ |
6455 | 0 | new_shared.ats_event = |
6456 | 0 | (event & TRIGGER_EVENT_OPMASK) | |
6457 | 0 | (row_trigger ? TRIGGER_EVENT_ROW : 0) | |
6458 | 0 | (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) | |
6459 | 0 | (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0); |
6460 | 0 | new_shared.ats_tgoid = trigger->tgoid; |
6461 | 0 | new_shared.ats_relid = RelationGetRelid(rel); |
6462 | 0 | new_shared.ats_rolid = GetUserId(); |
6463 | 0 | new_shared.ats_firing_id = 0; |
6464 | 0 | if ((trigger->tgoldtable || trigger->tgnewtable) && |
6465 | 0 | transition_capture != NULL) |
6466 | 0 | new_shared.ats_table = transition_capture->tcs_private; |
6467 | 0 | else |
6468 | 0 | new_shared.ats_table = NULL; |
6469 | 0 | new_shared.ats_modifiedcols = modifiedCols; |
6470 | |
|
6471 | 0 | afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events, |
6472 | 0 | &new_event, &new_shared); |
6473 | 0 | } |
6474 | | |
6475 | | /* |
6476 | | * Finally, spool any foreign tuple(s). The tuplestore squashes them to |
6477 | | * minimal tuples, so this loses any system columns. The executor lost |
6478 | | * those columns before us, for an unrelated reason, so this is fine. |
6479 | | */ |
6480 | 0 | if (fdw_tuplestore) |
6481 | 0 | { |
6482 | 0 | if (oldslot != NULL) |
6483 | 0 | tuplestore_puttupleslot(fdw_tuplestore, oldslot); |
6484 | 0 | if (newslot != NULL) |
6485 | 0 | tuplestore_puttupleslot(fdw_tuplestore, newslot); |
6486 | 0 | } |
6487 | 0 | } |
6488 | | |
6489 | | /* |
6490 | | * Detect whether we already queued BEFORE STATEMENT triggers for the given |
6491 | | * relation + operation, and set the flag so the next call will report "true". |
6492 | | */ |
6493 | | static bool |
6494 | | before_stmt_triggers_fired(Oid relid, CmdType cmdType) |
6495 | 0 | { |
6496 | 0 | bool result; |
6497 | 0 | AfterTriggersTableData *table; |
6498 | | |
6499 | | /* Check state, like AfterTriggerSaveEvent. */ |
6500 | 0 | if (afterTriggers.query_depth < 0) |
6501 | 0 | elog(ERROR, "before_stmt_triggers_fired() called outside of query"); |
6502 | | |
6503 | | /* Be sure we have enough space to record events at this query depth. */ |
6504 | 0 | if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) |
6505 | 0 | AfterTriggerEnlargeQueryState(); |
6506 | | |
6507 | | /* |
6508 | | * We keep this state in the AfterTriggersTableData that also holds |
6509 | | * transition tables for the relation + operation. In this way, if we are |
6510 | | * forced to make a new set of transition tables because more tuples get |
6511 | | * entered after we've already fired triggers, we will allow a new set of |
6512 | | * statement triggers to get queued. |
6513 | | */ |
6514 | 0 | table = GetAfterTriggersTableData(relid, cmdType); |
6515 | 0 | result = table->before_trig_done; |
6516 | 0 | table->before_trig_done = true; |
6517 | 0 | return result; |
6518 | 0 | } |
6519 | | |
6520 | | /* |
6521 | | * If we previously queued a set of AFTER STATEMENT triggers for the given |
6522 | | * relation + operation, and they've not been fired yet, cancel them. The |
6523 | | * caller will queue a fresh set that's after any row-level triggers that may |
6524 | | * have been queued by the current sub-statement, preserving (as much as |
6525 | | * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT |
6526 | | * triggers, and that the latter only fire once. This deals with the |
6527 | | * situation where several FK enforcement triggers sequentially queue triggers |
6528 | | * for the same table into the same trigger query level. We can't fully |
6529 | | * prevent odd behavior though: if there are AFTER ROW triggers taking |
6530 | | * transition tables, we don't want to change the transition tables once the |
6531 | | * first such trigger has seen them. In such a case, any additional events |
6532 | | * will result in creating new transition tables and allowing new firings of |
6533 | | * statement triggers. |
6534 | | * |
6535 | | * This also saves the current event list location so that a later invocation |
6536 | | * of this function can cheaply find the triggers we're about to queue and |
6537 | | * cancel them. |
6538 | | */ |
6539 | | static void |
6540 | | cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent) |
6541 | 0 | { |
6542 | 0 | AfterTriggersTableData *table; |
6543 | 0 | AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth]; |
6544 | | |
6545 | | /* |
6546 | | * We keep this state in the AfterTriggersTableData that also holds |
6547 | | * transition tables for the relation + operation. In this way, if we are |
6548 | | * forced to make a new set of transition tables because more tuples get |
6549 | | * entered after we've already fired triggers, we will allow a new set of |
6550 | | * statement triggers to get queued without canceling the old ones. |
6551 | | */ |
6552 | 0 | table = GetAfterTriggersTableData(relid, cmdType); |
6553 | |
|
6554 | 0 | if (table->after_trig_done) |
6555 | 0 | { |
6556 | | /* |
6557 | | * We want to start scanning from the tail location that existed just |
6558 | | * before we inserted any statement triggers. But the events list |
6559 | | * might've been entirely empty then, in which case scan from the |
6560 | | * current head. |
6561 | | */ |
6562 | 0 | AfterTriggerEvent event; |
6563 | 0 | AfterTriggerEventChunk *chunk; |
6564 | |
|
6565 | 0 | if (table->after_trig_events.tail) |
6566 | 0 | { |
6567 | 0 | chunk = table->after_trig_events.tail; |
6568 | 0 | event = (AfterTriggerEvent) table->after_trig_events.tailfree; |
6569 | 0 | } |
6570 | 0 | else |
6571 | 0 | { |
6572 | 0 | chunk = qs->events.head; |
6573 | 0 | event = NULL; |
6574 | 0 | } |
6575 | |
|
6576 | 0 | for_each_chunk_from(chunk) |
6577 | 0 | { |
6578 | 0 | if (event == NULL) |
6579 | 0 | event = (AfterTriggerEvent) CHUNK_DATA_START(chunk); |
6580 | 0 | for_each_event_from(event, chunk) |
6581 | 0 | { |
6582 | 0 | AfterTriggerShared evtshared = GetTriggerSharedData(event); |
6583 | | |
6584 | | /* |
6585 | | * Exit loop when we reach events that aren't AS triggers for |
6586 | | * the target relation. |
6587 | | */ |
6588 | 0 | if (evtshared->ats_relid != relid) |
6589 | 0 | goto done; |
6590 | 0 | if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent) |
6591 | 0 | goto done; |
6592 | 0 | if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event)) |
6593 | 0 | goto done; |
6594 | 0 | if (!TRIGGER_FIRED_AFTER(evtshared->ats_event)) |
6595 | 0 | goto done; |
6596 | | /* OK, mark it DONE */ |
6597 | 0 | event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS; |
6598 | 0 | event->ate_flags |= AFTER_TRIGGER_DONE; |
6599 | 0 | } |
6600 | | /* signal we must reinitialize event ptr for next chunk */ |
6601 | 0 | event = NULL; |
6602 | 0 | } |
6603 | 0 | } |
6604 | 0 | done: |
6605 | | |
6606 | | /* In any case, save current insertion point for next time */ |
6607 | 0 | table->after_trig_done = true; |
6608 | 0 | table->after_trig_events = qs->events; |
6609 | 0 | } |
6610 | | |
6611 | | /* |
6612 | | * GUC assign_hook for session_replication_role |
6613 | | */ |
6614 | | void |
6615 | | assign_session_replication_role(int newval, void *extra) |
6616 | 0 | { |
6617 | | /* |
6618 | | * Must flush the plan cache when changing replication role; but don't |
6619 | | * flush unnecessarily. |
6620 | | */ |
6621 | 0 | if (SessionReplicationRole != newval) |
6622 | 0 | ResetPlanCache(); |
6623 | 0 | } |
6624 | | |
6625 | | /* |
6626 | | * SQL function pg_trigger_depth() |
6627 | | */ |
6628 | | Datum |
6629 | | pg_trigger_depth(PG_FUNCTION_ARGS) |
6630 | 0 | { |
6631 | 0 | PG_RETURN_INT32(MyTriggerDepth); |
6632 | 0 | } |
6633 | | |
6634 | | /* |
6635 | | * Check whether a trigger modified a virtual generated column and replace the |
6636 | | * value with null if so. |
6637 | | * |
6638 | | * We need to check this so that we don't end up storing a non-null value in a |
6639 | | * virtual generated column. |
6640 | | * |
6641 | | * We don't need to check for stored generated columns, since those will be |
6642 | | * overwritten later anyway. |
6643 | | */ |
6644 | | static HeapTuple |
6645 | | check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple) |
6646 | 0 | { |
6647 | 0 | if (!(tupdesc->constr && tupdesc->constr->has_generated_virtual)) |
6648 | 0 | return tuple; |
6649 | | |
6650 | 0 | for (int i = 0; i < tupdesc->natts; i++) |
6651 | 0 | { |
6652 | 0 | if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL) |
6653 | 0 | { |
6654 | 0 | if (!heap_attisnull(tuple, i + 1, tupdesc)) |
6655 | 0 | { |
6656 | 0 | int replCol = i + 1; |
6657 | 0 | Datum replValue = 0; |
6658 | 0 | bool replIsnull = true; |
6659 | |
|
6660 | 0 | tuple = heap_modify_tuple_by_cols(tuple, tupdesc, 1, &replCol, &replValue, &replIsnull); |
6661 | 0 | } |
6662 | 0 | } |
6663 | 0 | } |
6664 | |
|
6665 | 0 | return tuple; |
6666 | 0 | } |