/src/postgres/src/backend/executor/nodeModifyTable.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * nodeModifyTable.c |
4 | | * routines to handle ModifyTable nodes. |
5 | | * |
6 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
7 | | * Portions Copyright (c) 1994, Regents of the University of California |
8 | | * |
9 | | * |
10 | | * IDENTIFICATION |
11 | | * src/backend/executor/nodeModifyTable.c |
12 | | * |
13 | | *------------------------------------------------------------------------- |
14 | | */ |
15 | | /* INTERFACE ROUTINES |
16 | | * ExecInitModifyTable - initialize the ModifyTable node |
17 | | * ExecModifyTable - retrieve the next tuple from the node |
18 | | * ExecEndModifyTable - shut down the ModifyTable node |
19 | | * ExecReScanModifyTable - rescan the ModifyTable node |
20 | | * |
21 | | * NOTES |
22 | | * The ModifyTable node receives input from its outerPlan, which is |
23 | | * the data to insert for INSERT cases, the changed columns' new |
24 | | * values plus row-locating info for UPDATE and MERGE cases, or just the |
25 | | * row-locating info for DELETE cases. |
26 | | * |
27 | | * The relation to modify can be an ordinary table, a foreign table, or a |
28 | | * view. If it's a view, either it has sufficient INSTEAD OF triggers or |
29 | | * this node executes only MERGE ... DO NOTHING. If the original MERGE |
30 | | * targeted a view not in one of those two categories, earlier processing |
31 | | * already pointed the ModifyTable result relation to an underlying |
32 | | * relation of that other view. This node does process |
33 | | * ri_WithCheckOptions, which may have expressions from those other, |
34 | | * automatically updatable views. |
35 | | * |
36 | | * MERGE runs a join between the source relation and the target table. |
37 | | * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join |
38 | | * is an outer join that might output tuples without a matching target |
39 | | * tuple. In this case, any unmatched target tuples will have NULL |
40 | | * row-locating info, and only INSERT can be run. But for matched target |
41 | | * tuples, the row-locating info is used to determine the tuple to UPDATE |
42 | | * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY |
43 | | * SOURCE, all tuples produced by the join will include a matching target |
44 | | * tuple, so all tuples contain row-locating info. |
45 | | * |
46 | | * If the query specifies RETURNING, then the ModifyTable returns a |
47 | | * RETURNING tuple after completing each row insert, update, or delete. |
48 | | * It must be called again to continue the operation. Without RETURNING, |
49 | | * we just loop within the node until all the work is done, then |
50 | | * return NULL. This avoids useless call/return overhead. |
51 | | */ |
52 | | |
53 | | #include "postgres.h" |
54 | | |
55 | | #include "access/htup_details.h" |
56 | | #include "access/tableam.h" |
57 | | #include "access/xact.h" |
58 | | #include "commands/trigger.h" |
59 | | #include "executor/execPartition.h" |
60 | | #include "executor/executor.h" |
61 | | #include "executor/nodeModifyTable.h" |
62 | | #include "foreign/fdwapi.h" |
63 | | #include "miscadmin.h" |
64 | | #include "nodes/nodeFuncs.h" |
65 | | #include "optimizer/optimizer.h" |
66 | | #include "rewrite/rewriteHandler.h" |
67 | | #include "rewrite/rewriteManip.h" |
68 | | #include "storage/lmgr.h" |
69 | | #include "utils/builtins.h" |
70 | | #include "utils/datum.h" |
71 | | #include "utils/rel.h" |
72 | | #include "utils/snapmgr.h" |
73 | | |
74 | | |
75 | | typedef struct MTTargetRelLookup |
76 | | { |
77 | | Oid relationOid; /* hash key, must be first */ |
78 | | int relationIndex; /* rel's index in resultRelInfo[] array */ |
79 | | } MTTargetRelLookup; |
80 | | |
81 | | /* |
82 | | * Context struct for a ModifyTable operation, containing basic execution |
83 | | * state and some output variables populated by ExecUpdateAct() and |
84 | | * ExecDeleteAct() to report the result of their actions to callers. |
85 | | */ |
86 | | typedef struct ModifyTableContext |
87 | | { |
88 | | /* Operation state */ |
89 | | ModifyTableState *mtstate; |
90 | | EPQState *epqstate; |
91 | | EState *estate; |
92 | | |
93 | | /* |
94 | | * Slot containing tuple obtained from ModifyTable's subplan. Used to |
95 | | * access "junk" columns that are not going to be stored. |
96 | | */ |
97 | | TupleTableSlot *planSlot; |
98 | | |
99 | | /* |
100 | | * Information about the changes that were made concurrently to a tuple |
101 | | * being updated or deleted |
102 | | */ |
103 | | TM_FailureData tmfd; |
104 | | |
105 | | /* |
106 | | * The tuple deleted when doing a cross-partition UPDATE with a RETURNING |
107 | | * clause that refers to OLD columns (converted to the root's tuple |
108 | | * descriptor). |
109 | | */ |
110 | | TupleTableSlot *cpDeletedSlot; |
111 | | |
112 | | /* |
113 | | * The tuple projected by the INSERT's RETURNING clause, when doing a |
114 | | * cross-partition UPDATE |
115 | | */ |
116 | | TupleTableSlot *cpUpdateReturningSlot; |
117 | | } ModifyTableContext; |
118 | | |
119 | | /* |
120 | | * Context struct containing output data specific to UPDATE operations. |
121 | | */ |
122 | | typedef struct UpdateContext |
123 | | { |
124 | | bool crossPartUpdate; /* was it a cross-partition update? */ |
125 | | TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ |
126 | | |
127 | | /* |
128 | | * Lock mode to acquire on the latest tuple version before performing |
129 | | * EvalPlanQual on it |
130 | | */ |
131 | | LockTupleMode lockmode; |
132 | | } UpdateContext; |
133 | | |
134 | | |
135 | | static void ExecBatchInsert(ModifyTableState *mtstate, |
136 | | ResultRelInfo *resultRelInfo, |
137 | | TupleTableSlot **slots, |
138 | | TupleTableSlot **planSlots, |
139 | | int numSlots, |
140 | | EState *estate, |
141 | | bool canSetTag); |
142 | | static void ExecPendingInserts(EState *estate); |
143 | | static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, |
144 | | ResultRelInfo *sourcePartInfo, |
145 | | ResultRelInfo *destPartInfo, |
146 | | ItemPointer tupleid, |
147 | | TupleTableSlot *oldslot, |
148 | | TupleTableSlot *newslot); |
149 | | static bool ExecOnConflictUpdate(ModifyTableContext *context, |
150 | | ResultRelInfo *resultRelInfo, |
151 | | ItemPointer conflictTid, |
152 | | TupleTableSlot *excludedSlot, |
153 | | bool canSetTag, |
154 | | TupleTableSlot **returning); |
155 | | static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate, |
156 | | EState *estate, |
157 | | PartitionTupleRouting *proute, |
158 | | ResultRelInfo *targetRelInfo, |
159 | | TupleTableSlot *slot, |
160 | | ResultRelInfo **partRelInfo); |
161 | | |
162 | | static TupleTableSlot *ExecMerge(ModifyTableContext *context, |
163 | | ResultRelInfo *resultRelInfo, |
164 | | ItemPointer tupleid, |
165 | | HeapTuple oldtuple, |
166 | | bool canSetTag); |
167 | | static void ExecInitMerge(ModifyTableState *mtstate, EState *estate); |
168 | | static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context, |
169 | | ResultRelInfo *resultRelInfo, |
170 | | ItemPointer tupleid, |
171 | | HeapTuple oldtuple, |
172 | | bool canSetTag, |
173 | | bool *matched); |
174 | | static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context, |
175 | | ResultRelInfo *resultRelInfo, |
176 | | bool canSetTag); |
177 | | |
178 | | |
179 | | /* |
180 | | * Verify that the tuples to be produced by INSERT match the |
181 | | * target relation's rowtype |
182 | | * |
183 | | * We do this to guard against stale plans. If plan invalidation is |
184 | | * functioning properly then we should never get a failure here, but better |
185 | | * safe than sorry. Note that this is called after we have obtained lock |
186 | | * on the target rel, so the rowtype can't change underneath us. |
187 | | * |
188 | | * The plan output is represented by its targetlist, because that makes |
189 | | * handling the dropped-column case easier. |
190 | | * |
191 | | * We used to use this for UPDATE as well, but now the equivalent checks |
192 | | * are done in ExecBuildUpdateProjection. |
193 | | */ |
194 | | static void |
195 | | ExecCheckPlanOutput(Relation resultRel, List *targetList) |
196 | 0 | { |
197 | 0 | TupleDesc resultDesc = RelationGetDescr(resultRel); |
198 | 0 | int attno = 0; |
199 | 0 | ListCell *lc; |
200 | |
|
201 | 0 | foreach(lc, targetList) |
202 | 0 | { |
203 | 0 | TargetEntry *tle = (TargetEntry *) lfirst(lc); |
204 | 0 | Form_pg_attribute attr; |
205 | |
|
206 | 0 | Assert(!tle->resjunk); /* caller removed junk items already */ |
207 | |
|
208 | 0 | if (attno >= resultDesc->natts) |
209 | 0 | ereport(ERROR, |
210 | 0 | (errcode(ERRCODE_DATATYPE_MISMATCH), |
211 | 0 | errmsg("table row type and query-specified row type do not match"), |
212 | 0 | errdetail("Query has too many columns."))); |
213 | 0 | attr = TupleDescAttr(resultDesc, attno); |
214 | 0 | attno++; |
215 | | |
216 | | /* |
217 | | * Special cases here should match planner's expand_insert_targetlist. |
218 | | */ |
219 | 0 | if (attr->attisdropped) |
220 | 0 | { |
221 | | /* |
222 | | * For a dropped column, we can't check atttypid (it's likely 0). |
223 | | * In any case the planner has most likely inserted an INT4 null. |
224 | | * What we insist on is just *some* NULL constant. |
225 | | */ |
226 | 0 | if (!IsA(tle->expr, Const) || |
227 | 0 | !((Const *) tle->expr)->constisnull) |
228 | 0 | ereport(ERROR, |
229 | 0 | (errcode(ERRCODE_DATATYPE_MISMATCH), |
230 | 0 | errmsg("table row type and query-specified row type do not match"), |
231 | 0 | errdetail("Query provides a value for a dropped column at ordinal position %d.", |
232 | 0 | attno))); |
233 | 0 | } |
234 | 0 | else if (attr->attgenerated) |
235 | 0 | { |
236 | | /* |
237 | | * For a generated column, the planner will have inserted a null |
238 | | * of the column's base type (to avoid possibly failing on domain |
239 | | * not-null constraints). It doesn't seem worth insisting on that |
240 | | * exact type though, since a null value is type-independent. As |
241 | | * above, just insist on *some* NULL constant. |
242 | | */ |
243 | 0 | if (!IsA(tle->expr, Const) || |
244 | 0 | !((Const *) tle->expr)->constisnull) |
245 | 0 | ereport(ERROR, |
246 | 0 | (errcode(ERRCODE_DATATYPE_MISMATCH), |
247 | 0 | errmsg("table row type and query-specified row type do not match"), |
248 | 0 | errdetail("Query provides a value for a generated column at ordinal position %d.", |
249 | 0 | attno))); |
250 | 0 | } |
251 | 0 | else |
252 | 0 | { |
253 | | /* Normal case: demand type match */ |
254 | 0 | if (exprType((Node *) tle->expr) != attr->atttypid) |
255 | 0 | ereport(ERROR, |
256 | 0 | (errcode(ERRCODE_DATATYPE_MISMATCH), |
257 | 0 | errmsg("table row type and query-specified row type do not match"), |
258 | 0 | errdetail("Table has type %s at ordinal position %d, but query expects %s.", |
259 | 0 | format_type_be(attr->atttypid), |
260 | 0 | attno, |
261 | 0 | format_type_be(exprType((Node *) tle->expr))))); |
262 | 0 | } |
263 | 0 | } |
264 | 0 | if (attno != resultDesc->natts) |
265 | 0 | ereport(ERROR, |
266 | 0 | (errcode(ERRCODE_DATATYPE_MISMATCH), |
267 | 0 | errmsg("table row type and query-specified row type do not match"), |
268 | 0 | errdetail("Query has too few columns."))); |
269 | 0 | } |
270 | | |
271 | | /* |
272 | | * ExecProcessReturning --- evaluate a RETURNING list |
273 | | * |
274 | | * context: context for the ModifyTable operation |
275 | | * resultRelInfo: current result rel |
276 | | * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE) |
277 | | * oldSlot: slot holding old tuple deleted or updated |
278 | | * newSlot: slot holding new tuple inserted or updated |
279 | | * planSlot: slot holding tuple returned by top subplan node |
280 | | * |
281 | | * Note: If oldSlot and newSlot are NULL, the FDW should have already provided |
282 | | * econtext's scan tuple and its old & new tuples are not needed (FDW direct- |
283 | | * modify is disabled if the RETURNING list refers to any OLD/NEW values). |
284 | | * |
285 | | * Returns a slot holding the result tuple |
286 | | */ |
287 | | static TupleTableSlot * |
288 | | ExecProcessReturning(ModifyTableContext *context, |
289 | | ResultRelInfo *resultRelInfo, |
290 | | CmdType cmdType, |
291 | | TupleTableSlot *oldSlot, |
292 | | TupleTableSlot *newSlot, |
293 | | TupleTableSlot *planSlot) |
294 | 0 | { |
295 | 0 | EState *estate = context->estate; |
296 | 0 | ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning; |
297 | 0 | ExprContext *econtext = projectReturning->pi_exprContext; |
298 | | |
299 | | /* Make tuple and any needed join variables available to ExecProject */ |
300 | 0 | switch (cmdType) |
301 | 0 | { |
302 | 0 | case CMD_INSERT: |
303 | 0 | case CMD_UPDATE: |
304 | | /* return new tuple by default */ |
305 | 0 | if (newSlot) |
306 | 0 | econtext->ecxt_scantuple = newSlot; |
307 | 0 | break; |
308 | | |
309 | 0 | case CMD_DELETE: |
310 | | /* return old tuple by default */ |
311 | 0 | if (oldSlot) |
312 | 0 | econtext->ecxt_scantuple = oldSlot; |
313 | 0 | break; |
314 | | |
315 | 0 | default: |
316 | 0 | elog(ERROR, "unrecognized commandType: %d", (int) cmdType); |
317 | 0 | } |
318 | 0 | econtext->ecxt_outertuple = planSlot; |
319 | | |
320 | | /* Make old/new tuples available to ExecProject, if required */ |
321 | 0 | if (oldSlot) |
322 | 0 | econtext->ecxt_oldtuple = oldSlot; |
323 | 0 | else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) |
324 | 0 | econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo); |
325 | 0 | else |
326 | 0 | econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */ |
327 | |
|
328 | 0 | if (newSlot) |
329 | 0 | econtext->ecxt_newtuple = newSlot; |
330 | 0 | else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) |
331 | 0 | econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo); |
332 | 0 | else |
333 | 0 | econtext->ecxt_newtuple = NULL; /* No references to NEW columns */ |
334 | | |
335 | | /* |
336 | | * Tell ExecProject whether or not the OLD/NEW rows actually exist. This |
337 | | * information is required to evaluate ReturningExpr nodes and also in |
338 | | * ExecEvalSysVar() and ExecEvalWholeRowVar(). |
339 | | */ |
340 | 0 | if (oldSlot == NULL) |
341 | 0 | projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL; |
342 | 0 | else |
343 | 0 | projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL; |
344 | |
|
345 | 0 | if (newSlot == NULL) |
346 | 0 | projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL; |
347 | 0 | else |
348 | 0 | projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL; |
349 | | |
350 | | /* Compute the RETURNING expressions */ |
351 | 0 | return ExecProject(projectReturning); |
352 | 0 | } |
353 | | |
354 | | /* |
355 | | * ExecCheckTupleVisible -- verify tuple is visible |
356 | | * |
357 | | * It would not be consistent with guarantees of the higher isolation levels to |
358 | | * proceed with avoiding insertion (taking speculative insertion's alternative |
359 | | * path) on the basis of another tuple that is not visible to MVCC snapshot. |
360 | | * Check for the need to raise a serialization failure, and do so as necessary. |
361 | | */ |
362 | | static void |
363 | | ExecCheckTupleVisible(EState *estate, |
364 | | Relation rel, |
365 | | TupleTableSlot *slot) |
366 | 0 | { |
367 | 0 | if (!IsolationUsesXactSnapshot()) |
368 | 0 | return; |
369 | | |
370 | 0 | if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot)) |
371 | 0 | { |
372 | 0 | Datum xminDatum; |
373 | 0 | TransactionId xmin; |
374 | 0 | bool isnull; |
375 | |
|
376 | 0 | xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull); |
377 | 0 | Assert(!isnull); |
378 | 0 | xmin = DatumGetTransactionId(xminDatum); |
379 | | |
380 | | /* |
381 | | * We should not raise a serialization failure if the conflict is |
382 | | * against a tuple inserted by our own transaction, even if it's not |
383 | | * visible to our snapshot. (This would happen, for example, if |
384 | | * conflicting keys are proposed for insertion in a single command.) |
385 | | */ |
386 | 0 | if (!TransactionIdIsCurrentTransactionId(xmin)) |
387 | 0 | ereport(ERROR, |
388 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
389 | 0 | errmsg("could not serialize access due to concurrent update"))); |
390 | 0 | } |
391 | 0 | } |
392 | | |
393 | | /* |
394 | | * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible() |
395 | | */ |
396 | | static void |
397 | | ExecCheckTIDVisible(EState *estate, |
398 | | ResultRelInfo *relinfo, |
399 | | ItemPointer tid, |
400 | | TupleTableSlot *tempSlot) |
401 | 0 | { |
402 | 0 | Relation rel = relinfo->ri_RelationDesc; |
403 | | |
404 | | /* Redundantly check isolation level */ |
405 | 0 | if (!IsolationUsesXactSnapshot()) |
406 | 0 | return; |
407 | | |
408 | 0 | if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot)) |
409 | 0 | elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT"); |
410 | 0 | ExecCheckTupleVisible(estate, rel, tempSlot); |
411 | 0 | ExecClearTuple(tempSlot); |
412 | 0 | } |
413 | | |
414 | | /* |
415 | | * Initialize generated columns handling for a tuple |
416 | | * |
417 | | * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or |
418 | | * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype. |
419 | | * This is used only for stored generated columns. |
420 | | * |
421 | | * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too. |
422 | | * This is used by both stored and virtual generated columns. |
423 | | * |
424 | | * Note: usually, a given query would need only one of ri_GeneratedExprsI and |
425 | | * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can |
426 | | * cross-partition UPDATEs, since a partition might be the target of both |
427 | | * UPDATE and INSERT actions. |
428 | | */ |
429 | | void |
430 | | ExecInitGenerated(ResultRelInfo *resultRelInfo, |
431 | | EState *estate, |
432 | | CmdType cmdtype) |
433 | 0 | { |
434 | 0 | Relation rel = resultRelInfo->ri_RelationDesc; |
435 | 0 | TupleDesc tupdesc = RelationGetDescr(rel); |
436 | 0 | int natts = tupdesc->natts; |
437 | 0 | ExprState **ri_GeneratedExprs; |
438 | 0 | int ri_NumGeneratedNeeded; |
439 | 0 | Bitmapset *updatedCols; |
440 | 0 | MemoryContext oldContext; |
441 | | |
442 | | /* Nothing to do if no generated columns */ |
443 | 0 | if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual))) |
444 | 0 | return; |
445 | | |
446 | | /* |
447 | | * In an UPDATE, we can skip computing any generated columns that do not |
448 | | * depend on any UPDATE target column. But if there is a BEFORE ROW |
449 | | * UPDATE trigger, we cannot skip because the trigger might change more |
450 | | * columns. |
451 | | */ |
452 | 0 | if (cmdtype == CMD_UPDATE && |
453 | 0 | !(rel->trigdesc && rel->trigdesc->trig_update_before_row)) |
454 | 0 | updatedCols = ExecGetUpdatedCols(resultRelInfo, estate); |
455 | 0 | else |
456 | 0 | updatedCols = NULL; |
457 | | |
458 | | /* |
459 | | * Make sure these data structures are built in the per-query memory |
460 | | * context so they'll survive throughout the query. |
461 | | */ |
462 | 0 | oldContext = MemoryContextSwitchTo(estate->es_query_cxt); |
463 | |
|
464 | 0 | ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *)); |
465 | 0 | ri_NumGeneratedNeeded = 0; |
466 | |
|
467 | 0 | for (int i = 0; i < natts; i++) |
468 | 0 | { |
469 | 0 | char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated; |
470 | |
|
471 | 0 | if (attgenerated) |
472 | 0 | { |
473 | 0 | Expr *expr; |
474 | | |
475 | | /* Fetch the GENERATED AS expression tree */ |
476 | 0 | expr = (Expr *) build_column_default(rel, i + 1); |
477 | 0 | if (expr == NULL) |
478 | 0 | elog(ERROR, "no generation expression found for column number %d of table \"%s\"", |
479 | 0 | i + 1, RelationGetRelationName(rel)); |
480 | | |
481 | | /* |
482 | | * If it's an update with a known set of update target columns, |
483 | | * see if we can skip the computation. |
484 | | */ |
485 | 0 | if (updatedCols) |
486 | 0 | { |
487 | 0 | Bitmapset *attrs_used = NULL; |
488 | |
|
489 | 0 | pull_varattnos((Node *) expr, 1, &attrs_used); |
490 | |
|
491 | 0 | if (!bms_overlap(updatedCols, attrs_used)) |
492 | 0 | continue; /* need not update this column */ |
493 | 0 | } |
494 | | |
495 | | /* No luck, so prepare the expression for execution */ |
496 | 0 | if (attgenerated == ATTRIBUTE_GENERATED_STORED) |
497 | 0 | { |
498 | 0 | ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate); |
499 | 0 | ri_NumGeneratedNeeded++; |
500 | 0 | } |
501 | | |
502 | | /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */ |
503 | 0 | if (cmdtype == CMD_UPDATE) |
504 | 0 | resultRelInfo->ri_extraUpdatedCols = |
505 | 0 | bms_add_member(resultRelInfo->ri_extraUpdatedCols, |
506 | 0 | i + 1 - FirstLowInvalidHeapAttributeNumber); |
507 | 0 | } |
508 | 0 | } |
509 | | |
510 | 0 | if (ri_NumGeneratedNeeded == 0) |
511 | 0 | { |
512 | | /* didn't need it after all */ |
513 | 0 | pfree(ri_GeneratedExprs); |
514 | 0 | ri_GeneratedExprs = NULL; |
515 | 0 | } |
516 | | |
517 | | /* Save in appropriate set of fields */ |
518 | 0 | if (cmdtype == CMD_UPDATE) |
519 | 0 | { |
520 | | /* Don't call twice */ |
521 | 0 | Assert(resultRelInfo->ri_GeneratedExprsU == NULL); |
522 | |
|
523 | 0 | resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs; |
524 | 0 | resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded; |
525 | |
|
526 | 0 | resultRelInfo->ri_extraUpdatedCols_valid = true; |
527 | 0 | } |
528 | 0 | else |
529 | 0 | { |
530 | | /* Don't call twice */ |
531 | 0 | Assert(resultRelInfo->ri_GeneratedExprsI == NULL); |
532 | |
|
533 | 0 | resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs; |
534 | 0 | resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded; |
535 | 0 | } |
536 | |
|
537 | 0 | MemoryContextSwitchTo(oldContext); |
538 | 0 | } |
539 | | |
540 | | /* |
541 | | * Compute stored generated columns for a tuple |
542 | | */ |
543 | | void |
544 | | ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, |
545 | | EState *estate, TupleTableSlot *slot, |
546 | | CmdType cmdtype) |
547 | 0 | { |
548 | 0 | Relation rel = resultRelInfo->ri_RelationDesc; |
549 | 0 | TupleDesc tupdesc = RelationGetDescr(rel); |
550 | 0 | int natts = tupdesc->natts; |
551 | 0 | ExprContext *econtext = GetPerTupleExprContext(estate); |
552 | 0 | ExprState **ri_GeneratedExprs; |
553 | 0 | MemoryContext oldContext; |
554 | 0 | Datum *values; |
555 | 0 | bool *nulls; |
556 | | |
557 | | /* We should not be called unless this is true */ |
558 | 0 | Assert(tupdesc->constr && tupdesc->constr->has_generated_stored); |
559 | | |
560 | | /* |
561 | | * Initialize the expressions if we didn't already, and check whether we |
562 | | * can exit early because nothing needs to be computed. |
563 | | */ |
564 | 0 | if (cmdtype == CMD_UPDATE) |
565 | 0 | { |
566 | 0 | if (resultRelInfo->ri_GeneratedExprsU == NULL) |
567 | 0 | ExecInitGenerated(resultRelInfo, estate, cmdtype); |
568 | 0 | if (resultRelInfo->ri_NumGeneratedNeededU == 0) |
569 | 0 | return; |
570 | 0 | ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU; |
571 | 0 | } |
572 | 0 | else |
573 | 0 | { |
574 | 0 | if (resultRelInfo->ri_GeneratedExprsI == NULL) |
575 | 0 | ExecInitGenerated(resultRelInfo, estate, cmdtype); |
576 | | /* Early exit is impossible given the prior Assert */ |
577 | 0 | Assert(resultRelInfo->ri_NumGeneratedNeededI > 0); |
578 | 0 | ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI; |
579 | 0 | } |
580 | | |
581 | 0 | oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); |
582 | |
|
583 | 0 | values = palloc(sizeof(*values) * natts); |
584 | 0 | nulls = palloc(sizeof(*nulls) * natts); |
585 | |
|
586 | 0 | slot_getallattrs(slot); |
587 | 0 | memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts); |
588 | |
|
589 | 0 | for (int i = 0; i < natts; i++) |
590 | 0 | { |
591 | 0 | CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i); |
592 | |
|
593 | 0 | if (ri_GeneratedExprs[i]) |
594 | 0 | { |
595 | 0 | Datum val; |
596 | 0 | bool isnull; |
597 | |
|
598 | 0 | Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED); |
599 | |
|
600 | 0 | econtext->ecxt_scantuple = slot; |
601 | |
|
602 | 0 | val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull); |
603 | | |
604 | | /* |
605 | | * We must make a copy of val as we have no guarantees about where |
606 | | * memory for a pass-by-reference Datum is located. |
607 | | */ |
608 | 0 | if (!isnull) |
609 | 0 | val = datumCopy(val, attr->attbyval, attr->attlen); |
610 | |
|
611 | 0 | values[i] = val; |
612 | 0 | nulls[i] = isnull; |
613 | 0 | } |
614 | 0 | else |
615 | 0 | { |
616 | 0 | if (!nulls[i]) |
617 | 0 | values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen); |
618 | 0 | } |
619 | 0 | } |
620 | |
|
621 | 0 | ExecClearTuple(slot); |
622 | 0 | memcpy(slot->tts_values, values, sizeof(*values) * natts); |
623 | 0 | memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts); |
624 | 0 | ExecStoreVirtualTuple(slot); |
625 | 0 | ExecMaterializeSlot(slot); |
626 | |
|
627 | 0 | MemoryContextSwitchTo(oldContext); |
628 | 0 | } |
629 | | |
630 | | /* |
631 | | * ExecInitInsertProjection |
632 | | * Do one-time initialization of projection data for INSERT tuples. |
633 | | * |
634 | | * INSERT queries may need a projection to filter out junk attrs in the tlist. |
635 | | * |
636 | | * This is also a convenient place to verify that the |
637 | | * output of an INSERT matches the target table. |
638 | | */ |
639 | | static void |
640 | | ExecInitInsertProjection(ModifyTableState *mtstate, |
641 | | ResultRelInfo *resultRelInfo) |
642 | 0 | { |
643 | 0 | ModifyTable *node = (ModifyTable *) mtstate->ps.plan; |
644 | 0 | Plan *subplan = outerPlan(node); |
645 | 0 | EState *estate = mtstate->ps.state; |
646 | 0 | List *insertTargetList = NIL; |
647 | 0 | bool need_projection = false; |
648 | 0 | ListCell *l; |
649 | | |
650 | | /* Extract non-junk columns of the subplan's result tlist. */ |
651 | 0 | foreach(l, subplan->targetlist) |
652 | 0 | { |
653 | 0 | TargetEntry *tle = (TargetEntry *) lfirst(l); |
654 | |
|
655 | 0 | if (!tle->resjunk) |
656 | 0 | insertTargetList = lappend(insertTargetList, tle); |
657 | 0 | else |
658 | 0 | need_projection = true; |
659 | 0 | } |
660 | | |
661 | | /* |
662 | | * The junk-free list must produce a tuple suitable for the result |
663 | | * relation. |
664 | | */ |
665 | 0 | ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList); |
666 | | |
667 | | /* We'll need a slot matching the table's format. */ |
668 | 0 | resultRelInfo->ri_newTupleSlot = |
669 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
670 | 0 | &estate->es_tupleTable); |
671 | | |
672 | | /* Build ProjectionInfo if needed (it probably isn't). */ |
673 | 0 | if (need_projection) |
674 | 0 | { |
675 | 0 | TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc); |
676 | | |
677 | | /* need an expression context to do the projection */ |
678 | 0 | if (mtstate->ps.ps_ExprContext == NULL) |
679 | 0 | ExecAssignExprContext(estate, &mtstate->ps); |
680 | |
|
681 | 0 | resultRelInfo->ri_projectNew = |
682 | 0 | ExecBuildProjectionInfo(insertTargetList, |
683 | 0 | mtstate->ps.ps_ExprContext, |
684 | 0 | resultRelInfo->ri_newTupleSlot, |
685 | 0 | &mtstate->ps, |
686 | 0 | relDesc); |
687 | 0 | } |
688 | |
|
689 | 0 | resultRelInfo->ri_projectNewInfoValid = true; |
690 | 0 | } |
691 | | |
692 | | /* |
693 | | * ExecInitUpdateProjection |
694 | | * Do one-time initialization of projection data for UPDATE tuples. |
695 | | * |
696 | | * UPDATE always needs a projection, because (1) there's always some junk |
697 | | * attrs, and (2) we may need to merge values of not-updated columns from |
698 | | * the old tuple into the final tuple. In UPDATE, the tuple arriving from |
699 | | * the subplan contains only new values for the changed columns, plus row |
700 | | * identity info in the junk attrs. |
701 | | * |
702 | | * This is "one-time" for any given result rel, but we might touch more than |
703 | | * one result rel in the course of an inherited UPDATE, and each one needs |
704 | | * its own projection due to possible column order variation. |
705 | | * |
706 | | * This is also a convenient place to verify that the output of an UPDATE |
707 | | * matches the target table (ExecBuildUpdateProjection does that). |
708 | | */ |
709 | | static void |
710 | | ExecInitUpdateProjection(ModifyTableState *mtstate, |
711 | | ResultRelInfo *resultRelInfo) |
712 | 0 | { |
713 | 0 | ModifyTable *node = (ModifyTable *) mtstate->ps.plan; |
714 | 0 | Plan *subplan = outerPlan(node); |
715 | 0 | EState *estate = mtstate->ps.state; |
716 | 0 | TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc); |
717 | 0 | int whichrel; |
718 | 0 | List *updateColnos; |
719 | | |
720 | | /* |
721 | | * Usually, mt_lastResultIndex matches the target rel. If it happens not |
722 | | * to, we can get the index the hard way with an integer division. |
723 | | */ |
724 | 0 | whichrel = mtstate->mt_lastResultIndex; |
725 | 0 | if (resultRelInfo != mtstate->resultRelInfo + whichrel) |
726 | 0 | { |
727 | 0 | whichrel = resultRelInfo - mtstate->resultRelInfo; |
728 | 0 | Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels); |
729 | 0 | } |
730 | |
|
731 | 0 | updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel); |
732 | | |
733 | | /* |
734 | | * For UPDATE, we use the old tuple to fill up missing values in the tuple |
735 | | * produced by the subplan to get the new tuple. We need two slots, both |
736 | | * matching the table's desired format. |
737 | | */ |
738 | 0 | resultRelInfo->ri_oldTupleSlot = |
739 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
740 | 0 | &estate->es_tupleTable); |
741 | 0 | resultRelInfo->ri_newTupleSlot = |
742 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
743 | 0 | &estate->es_tupleTable); |
744 | | |
745 | | /* need an expression context to do the projection */ |
746 | 0 | if (mtstate->ps.ps_ExprContext == NULL) |
747 | 0 | ExecAssignExprContext(estate, &mtstate->ps); |
748 | |
|
749 | 0 | resultRelInfo->ri_projectNew = |
750 | 0 | ExecBuildUpdateProjection(subplan->targetlist, |
751 | 0 | false, /* subplan did the evaluation */ |
752 | 0 | updateColnos, |
753 | 0 | relDesc, |
754 | 0 | mtstate->ps.ps_ExprContext, |
755 | 0 | resultRelInfo->ri_newTupleSlot, |
756 | 0 | &mtstate->ps); |
757 | |
|
758 | 0 | resultRelInfo->ri_projectNewInfoValid = true; |
759 | 0 | } |
760 | | |
761 | | /* |
762 | | * ExecGetInsertNewTuple |
763 | | * This prepares a "new" tuple ready to be inserted into given result |
764 | | * relation, by removing any junk columns of the plan's output tuple |
765 | | * and (if necessary) coercing the tuple to the right tuple format. |
766 | | */ |
767 | | static TupleTableSlot * |
768 | | ExecGetInsertNewTuple(ResultRelInfo *relinfo, |
769 | | TupleTableSlot *planSlot) |
770 | 0 | { |
771 | 0 | ProjectionInfo *newProj = relinfo->ri_projectNew; |
772 | 0 | ExprContext *econtext; |
773 | | |
774 | | /* |
775 | | * If there's no projection to be done, just make sure the slot is of the |
776 | | * right type for the target rel. If the planSlot is the right type we |
777 | | * can use it as-is, else copy the data into ri_newTupleSlot. |
778 | | */ |
779 | 0 | if (newProj == NULL) |
780 | 0 | { |
781 | 0 | if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops) |
782 | 0 | { |
783 | 0 | ExecCopySlot(relinfo->ri_newTupleSlot, planSlot); |
784 | 0 | return relinfo->ri_newTupleSlot; |
785 | 0 | } |
786 | 0 | else |
787 | 0 | return planSlot; |
788 | 0 | } |
789 | | |
790 | | /* |
791 | | * Else project; since the projection output slot is ri_newTupleSlot, this |
792 | | * will also fix any slot-type problem. |
793 | | * |
794 | | * Note: currently, this is dead code, because INSERT cases don't receive |
795 | | * any junk columns so there's never a projection to be done. |
796 | | */ |
797 | 0 | econtext = newProj->pi_exprContext; |
798 | 0 | econtext->ecxt_outertuple = planSlot; |
799 | 0 | return ExecProject(newProj); |
800 | 0 | } |
801 | | |
802 | | /* |
803 | | * ExecGetUpdateNewTuple |
804 | | * This prepares a "new" tuple by combining an UPDATE subplan's output |
805 | | * tuple (which contains values of changed columns) with unchanged |
806 | | * columns taken from the old tuple. |
807 | | * |
808 | | * The subplan tuple might also contain junk columns, which are ignored. |
809 | | * Note that the projection also ensures we have a slot of the right type. |
810 | | */ |
811 | | TupleTableSlot * |
812 | | ExecGetUpdateNewTuple(ResultRelInfo *relinfo, |
813 | | TupleTableSlot *planSlot, |
814 | | TupleTableSlot *oldSlot) |
815 | 0 | { |
816 | 0 | ProjectionInfo *newProj = relinfo->ri_projectNew; |
817 | 0 | ExprContext *econtext; |
818 | | |
819 | | /* Use a few extra Asserts to protect against outside callers */ |
820 | 0 | Assert(relinfo->ri_projectNewInfoValid); |
821 | 0 | Assert(planSlot != NULL && !TTS_EMPTY(planSlot)); |
822 | 0 | Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot)); |
823 | |
|
824 | 0 | econtext = newProj->pi_exprContext; |
825 | 0 | econtext->ecxt_outertuple = planSlot; |
826 | 0 | econtext->ecxt_scantuple = oldSlot; |
827 | 0 | return ExecProject(newProj); |
828 | 0 | } |
829 | | |
830 | | /* ---------------------------------------------------------------- |
831 | | * ExecInsert |
832 | | * |
833 | | * For INSERT, we have to insert the tuple into the target relation |
834 | | * (or partition thereof) and insert appropriate tuples into the index |
835 | | * relations. |
836 | | * |
837 | | * slot contains the new tuple value to be stored. |
838 | | * |
839 | | * Returns RETURNING result if any, otherwise NULL. |
840 | | * *inserted_tuple is the tuple that's effectively inserted; |
841 | | * *insert_destrel is the relation where it was inserted. |
842 | | * These are only set on success. |
843 | | * |
844 | | * This may change the currently active tuple conversion map in |
845 | | * mtstate->mt_transition_capture, so the callers must take care to |
846 | | * save the previous value to avoid losing track of it. |
847 | | * ---------------------------------------------------------------- |
848 | | */ |
849 | | static TupleTableSlot * |
850 | | ExecInsert(ModifyTableContext *context, |
851 | | ResultRelInfo *resultRelInfo, |
852 | | TupleTableSlot *slot, |
853 | | bool canSetTag, |
854 | | TupleTableSlot **inserted_tuple, |
855 | | ResultRelInfo **insert_destrel) |
856 | 0 | { |
857 | 0 | ModifyTableState *mtstate = context->mtstate; |
858 | 0 | EState *estate = context->estate; |
859 | 0 | Relation resultRelationDesc; |
860 | 0 | List *recheckIndexes = NIL; |
861 | 0 | TupleTableSlot *planSlot = context->planSlot; |
862 | 0 | TupleTableSlot *result = NULL; |
863 | 0 | TransitionCaptureState *ar_insert_trig_tcs; |
864 | 0 | ModifyTable *node = (ModifyTable *) mtstate->ps.plan; |
865 | 0 | OnConflictAction onconflict = node->onConflictAction; |
866 | 0 | PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; |
867 | 0 | MemoryContext oldContext; |
868 | | |
869 | | /* |
870 | | * If the input result relation is a partitioned table, find the leaf |
871 | | * partition to insert the tuple into. |
872 | | */ |
873 | 0 | if (proute) |
874 | 0 | { |
875 | 0 | ResultRelInfo *partRelInfo; |
876 | |
|
877 | 0 | slot = ExecPrepareTupleRouting(mtstate, estate, proute, |
878 | 0 | resultRelInfo, slot, |
879 | 0 | &partRelInfo); |
880 | 0 | resultRelInfo = partRelInfo; |
881 | 0 | } |
882 | |
|
883 | 0 | ExecMaterializeSlot(slot); |
884 | |
|
885 | 0 | resultRelationDesc = resultRelInfo->ri_RelationDesc; |
886 | | |
887 | | /* |
888 | | * Open the table's indexes, if we have not done so already, so that we |
889 | | * can add new index entries for the inserted tuple. |
890 | | */ |
891 | 0 | if (resultRelationDesc->rd_rel->relhasindex && |
892 | 0 | resultRelInfo->ri_IndexRelationDescs == NULL) |
893 | 0 | ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE); |
894 | | |
895 | | /* |
896 | | * BEFORE ROW INSERT Triggers. |
897 | | * |
898 | | * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an |
899 | | * INSERT ... ON CONFLICT statement. We cannot check for constraint |
900 | | * violations before firing these triggers, because they can change the |
901 | | * values to insert. Also, they can run arbitrary user-defined code with |
902 | | * side-effects that we can't cancel by just not inserting the tuple. |
903 | | */ |
904 | 0 | if (resultRelInfo->ri_TrigDesc && |
905 | 0 | resultRelInfo->ri_TrigDesc->trig_insert_before_row) |
906 | 0 | { |
907 | | /* Flush any pending inserts, so rows are visible to the triggers */ |
908 | 0 | if (estate->es_insert_pending_result_relations != NIL) |
909 | 0 | ExecPendingInserts(estate); |
910 | |
|
911 | 0 | if (!ExecBRInsertTriggers(estate, resultRelInfo, slot)) |
912 | 0 | return NULL; /* "do nothing" */ |
913 | 0 | } |
914 | | |
915 | | /* INSTEAD OF ROW INSERT Triggers */ |
916 | 0 | if (resultRelInfo->ri_TrigDesc && |
917 | 0 | resultRelInfo->ri_TrigDesc->trig_insert_instead_row) |
918 | 0 | { |
919 | 0 | if (!ExecIRInsertTriggers(estate, resultRelInfo, slot)) |
920 | 0 | return NULL; /* "do nothing" */ |
921 | 0 | } |
922 | 0 | else if (resultRelInfo->ri_FdwRoutine) |
923 | 0 | { |
924 | | /* |
925 | | * GENERATED expressions might reference the tableoid column, so |
926 | | * (re-)initialize tts_tableOid before evaluating them. |
927 | | */ |
928 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); |
929 | | |
930 | | /* |
931 | | * Compute stored generated columns |
932 | | */ |
933 | 0 | if (resultRelationDesc->rd_att->constr && |
934 | 0 | resultRelationDesc->rd_att->constr->has_generated_stored) |
935 | 0 | ExecComputeStoredGenerated(resultRelInfo, estate, slot, |
936 | 0 | CMD_INSERT); |
937 | | |
938 | | /* |
939 | | * If the FDW supports batching, and batching is requested, accumulate |
940 | | * rows and insert them in batches. Otherwise use the per-row inserts. |
941 | | */ |
942 | 0 | if (resultRelInfo->ri_BatchSize > 1) |
943 | 0 | { |
944 | 0 | bool flushed = false; |
945 | | |
946 | | /* |
947 | | * When we've reached the desired batch size, perform the |
948 | | * insertion. |
949 | | */ |
950 | 0 | if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize) |
951 | 0 | { |
952 | 0 | ExecBatchInsert(mtstate, resultRelInfo, |
953 | 0 | resultRelInfo->ri_Slots, |
954 | 0 | resultRelInfo->ri_PlanSlots, |
955 | 0 | resultRelInfo->ri_NumSlots, |
956 | 0 | estate, canSetTag); |
957 | 0 | flushed = true; |
958 | 0 | } |
959 | |
|
960 | 0 | oldContext = MemoryContextSwitchTo(estate->es_query_cxt); |
961 | |
|
962 | 0 | if (resultRelInfo->ri_Slots == NULL) |
963 | 0 | { |
964 | 0 | resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) * |
965 | 0 | resultRelInfo->ri_BatchSize); |
966 | 0 | resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) * |
967 | 0 | resultRelInfo->ri_BatchSize); |
968 | 0 | } |
969 | | |
970 | | /* |
971 | | * Initialize the batch slots. We don't know how many slots will |
972 | | * be needed, so we initialize them as the batch grows, and we |
973 | | * keep them across batches. To mitigate an inefficiency in how |
974 | | * resource owner handles objects with many references (as with |
975 | | * many slots all referencing the same tuple descriptor) we copy |
976 | | * the appropriate tuple descriptor for each slot. |
977 | | */ |
978 | 0 | if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized) |
979 | 0 | { |
980 | 0 | TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); |
981 | 0 | TupleDesc plan_tdesc = |
982 | 0 | CreateTupleDescCopy(planSlot->tts_tupleDescriptor); |
983 | |
|
984 | 0 | resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = |
985 | 0 | MakeSingleTupleTableSlot(tdesc, slot->tts_ops); |
986 | |
|
987 | 0 | resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] = |
988 | 0 | MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops); |
989 | | |
990 | | /* remember how many batch slots we initialized */ |
991 | 0 | resultRelInfo->ri_NumSlotsInitialized++; |
992 | 0 | } |
993 | |
|
994 | 0 | ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots], |
995 | 0 | slot); |
996 | |
|
997 | 0 | ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots], |
998 | 0 | planSlot); |
999 | | |
1000 | | /* |
1001 | | * If these are the first tuples stored in the buffers, add the |
1002 | | * target rel and the mtstate to the |
1003 | | * es_insert_pending_result_relations and |
1004 | | * es_insert_pending_modifytables lists respectively, except in |
1005 | | * the case where flushing was done above, in which case they |
1006 | | * would already have been added to the lists, so no need to do |
1007 | | * this. |
1008 | | */ |
1009 | 0 | if (resultRelInfo->ri_NumSlots == 0 && !flushed) |
1010 | 0 | { |
1011 | 0 | Assert(!list_member_ptr(estate->es_insert_pending_result_relations, |
1012 | 0 | resultRelInfo)); |
1013 | 0 | estate->es_insert_pending_result_relations = |
1014 | 0 | lappend(estate->es_insert_pending_result_relations, |
1015 | 0 | resultRelInfo); |
1016 | 0 | estate->es_insert_pending_modifytables = |
1017 | 0 | lappend(estate->es_insert_pending_modifytables, mtstate); |
1018 | 0 | } |
1019 | 0 | Assert(list_member_ptr(estate->es_insert_pending_result_relations, |
1020 | 0 | resultRelInfo)); |
1021 | |
|
1022 | 0 | resultRelInfo->ri_NumSlots++; |
1023 | |
|
1024 | 0 | MemoryContextSwitchTo(oldContext); |
1025 | |
|
1026 | 0 | return NULL; |
1027 | 0 | } |
1028 | | |
1029 | | /* |
1030 | | * insert into foreign table: let the FDW do it |
1031 | | */ |
1032 | 0 | slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, |
1033 | 0 | resultRelInfo, |
1034 | 0 | slot, |
1035 | 0 | planSlot); |
1036 | |
|
1037 | 0 | if (slot == NULL) /* "do nothing" */ |
1038 | 0 | return NULL; |
1039 | | |
1040 | | /* |
1041 | | * AFTER ROW Triggers or RETURNING expressions might reference the |
1042 | | * tableoid column, so (re-)initialize tts_tableOid before evaluating |
1043 | | * them. (This covers the case where the FDW replaced the slot.) |
1044 | | */ |
1045 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); |
1046 | 0 | } |
1047 | 0 | else |
1048 | 0 | { |
1049 | 0 | WCOKind wco_kind; |
1050 | | |
1051 | | /* |
1052 | | * Constraints and GENERATED expressions might reference the tableoid |
1053 | | * column, so (re-)initialize tts_tableOid before evaluating them. |
1054 | | */ |
1055 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelationDesc); |
1056 | | |
1057 | | /* |
1058 | | * Compute stored generated columns |
1059 | | */ |
1060 | 0 | if (resultRelationDesc->rd_att->constr && |
1061 | 0 | resultRelationDesc->rd_att->constr->has_generated_stored) |
1062 | 0 | ExecComputeStoredGenerated(resultRelInfo, estate, slot, |
1063 | 0 | CMD_INSERT); |
1064 | | |
1065 | | /* |
1066 | | * Check any RLS WITH CHECK policies. |
1067 | | * |
1068 | | * Normally we should check INSERT policies. But if the insert is the |
1069 | | * result of a partition key update that moved the tuple to a new |
1070 | | * partition, we should instead check UPDATE policies, because we are |
1071 | | * executing policies defined on the target table, and not those |
1072 | | * defined on the child partitions. |
1073 | | * |
1074 | | * If we're running MERGE, we refer to the action that we're executing |
1075 | | * to know if we're doing an INSERT or UPDATE to a partition table. |
1076 | | */ |
1077 | 0 | if (mtstate->operation == CMD_UPDATE) |
1078 | 0 | wco_kind = WCO_RLS_UPDATE_CHECK; |
1079 | 0 | else if (mtstate->operation == CMD_MERGE) |
1080 | 0 | wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ? |
1081 | 0 | WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK; |
1082 | 0 | else |
1083 | 0 | wco_kind = WCO_RLS_INSERT_CHECK; |
1084 | | |
1085 | | /* |
1086 | | * ExecWithCheckOptions() will skip any WCOs which are not of the kind |
1087 | | * we are looking for at this point. |
1088 | | */ |
1089 | 0 | if (resultRelInfo->ri_WithCheckOptions != NIL) |
1090 | 0 | ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate); |
1091 | | |
1092 | | /* |
1093 | | * Check the constraints of the tuple. |
1094 | | */ |
1095 | 0 | if (resultRelationDesc->rd_att->constr) |
1096 | 0 | ExecConstraints(resultRelInfo, slot, estate); |
1097 | | |
1098 | | /* |
1099 | | * Also check the tuple against the partition constraint, if there is |
1100 | | * one; except that if we got here via tuple-routing, we don't need to |
1101 | | * if there's no BR trigger defined on the partition. |
1102 | | */ |
1103 | 0 | if (resultRelationDesc->rd_rel->relispartition && |
1104 | 0 | (resultRelInfo->ri_RootResultRelInfo == NULL || |
1105 | 0 | (resultRelInfo->ri_TrigDesc && |
1106 | 0 | resultRelInfo->ri_TrigDesc->trig_insert_before_row))) |
1107 | 0 | ExecPartitionCheck(resultRelInfo, slot, estate, true); |
1108 | |
|
1109 | 0 | if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0) |
1110 | 0 | { |
1111 | | /* Perform a speculative insertion. */ |
1112 | 0 | uint32 specToken; |
1113 | 0 | ItemPointerData conflictTid; |
1114 | 0 | ItemPointerData invalidItemPtr; |
1115 | 0 | bool specConflict; |
1116 | 0 | List *arbiterIndexes; |
1117 | |
|
1118 | 0 | ItemPointerSetInvalid(&invalidItemPtr); |
1119 | 0 | arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes; |
1120 | | |
1121 | | /* |
1122 | | * Do a non-conclusive check for conflicts first. |
1123 | | * |
1124 | | * We're not holding any locks yet, so this doesn't guarantee that |
1125 | | * the later insert won't conflict. But it avoids leaving behind |
1126 | | * a lot of canceled speculative insertions, if you run a lot of |
1127 | | * INSERT ON CONFLICT statements that do conflict. |
1128 | | * |
1129 | | * We loop back here if we find a conflict below, either during |
1130 | | * the pre-check, or when we re-check after inserting the tuple |
1131 | | * speculatively. Better allow interrupts in case some bug makes |
1132 | | * this an infinite loop. |
1133 | | */ |
1134 | 0 | vlock: |
1135 | 0 | CHECK_FOR_INTERRUPTS(); |
1136 | 0 | specConflict = false; |
1137 | 0 | if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate, |
1138 | 0 | &conflictTid, &invalidItemPtr, |
1139 | 0 | arbiterIndexes)) |
1140 | 0 | { |
1141 | | /* committed conflict tuple found */ |
1142 | 0 | if (onconflict == ONCONFLICT_UPDATE) |
1143 | 0 | { |
1144 | | /* |
1145 | | * In case of ON CONFLICT DO UPDATE, execute the UPDATE |
1146 | | * part. Be prepared to retry if the UPDATE fails because |
1147 | | * of another concurrent UPDATE/DELETE to the conflict |
1148 | | * tuple. |
1149 | | */ |
1150 | 0 | TupleTableSlot *returning = NULL; |
1151 | |
|
1152 | 0 | if (ExecOnConflictUpdate(context, resultRelInfo, |
1153 | 0 | &conflictTid, slot, canSetTag, |
1154 | 0 | &returning)) |
1155 | 0 | { |
1156 | 0 | InstrCountTuples2(&mtstate->ps, 1); |
1157 | 0 | return returning; |
1158 | 0 | } |
1159 | 0 | else |
1160 | 0 | goto vlock; |
1161 | 0 | } |
1162 | 0 | else |
1163 | 0 | { |
1164 | | /* |
1165 | | * In case of ON CONFLICT DO NOTHING, do nothing. However, |
1166 | | * verify that the tuple is visible to the executor's MVCC |
1167 | | * snapshot at higher isolation levels. |
1168 | | * |
1169 | | * Using ExecGetReturningSlot() to store the tuple for the |
1170 | | * recheck isn't that pretty, but we can't trivially use |
1171 | | * the input slot, because it might not be of a compatible |
1172 | | * type. As there's no conflicting usage of |
1173 | | * ExecGetReturningSlot() in the DO NOTHING case... |
1174 | | */ |
1175 | 0 | Assert(onconflict == ONCONFLICT_NOTHING); |
1176 | 0 | ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid, |
1177 | 0 | ExecGetReturningSlot(estate, resultRelInfo)); |
1178 | 0 | InstrCountTuples2(&mtstate->ps, 1); |
1179 | 0 | return NULL; |
1180 | 0 | } |
1181 | 0 | } |
1182 | | |
1183 | | /* |
1184 | | * Before we start insertion proper, acquire our "speculative |
1185 | | * insertion lock". Others can use that to wait for us to decide |
1186 | | * if we're going to go ahead with the insertion, instead of |
1187 | | * waiting for the whole transaction to complete. |
1188 | | */ |
1189 | 0 | specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId()); |
1190 | | |
1191 | | /* insert the tuple, with the speculative token */ |
1192 | 0 | table_tuple_insert_speculative(resultRelationDesc, slot, |
1193 | 0 | estate->es_output_cid, |
1194 | 0 | 0, |
1195 | 0 | NULL, |
1196 | 0 | specToken); |
1197 | | |
1198 | | /* insert index entries for tuple */ |
1199 | 0 | recheckIndexes = ExecInsertIndexTuples(resultRelInfo, |
1200 | 0 | slot, estate, false, true, |
1201 | 0 | &specConflict, |
1202 | 0 | arbiterIndexes, |
1203 | 0 | false); |
1204 | | |
1205 | | /* adjust the tuple's state accordingly */ |
1206 | 0 | table_tuple_complete_speculative(resultRelationDesc, slot, |
1207 | 0 | specToken, !specConflict); |
1208 | | |
1209 | | /* |
1210 | | * Wake up anyone waiting for our decision. They will re-check |
1211 | | * the tuple, see that it's no longer speculative, and wait on our |
1212 | | * XID as if this was a regularly inserted tuple all along. Or if |
1213 | | * we killed the tuple, they will see it's dead, and proceed as if |
1214 | | * the tuple never existed. |
1215 | | */ |
1216 | 0 | SpeculativeInsertionLockRelease(GetCurrentTransactionId()); |
1217 | | |
1218 | | /* |
1219 | | * If there was a conflict, start from the beginning. We'll do |
1220 | | * the pre-check again, which will now find the conflicting tuple |
1221 | | * (unless it aborts before we get there). |
1222 | | */ |
1223 | 0 | if (specConflict) |
1224 | 0 | { |
1225 | 0 | list_free(recheckIndexes); |
1226 | 0 | goto vlock; |
1227 | 0 | } |
1228 | | |
1229 | | /* Since there was no insertion conflict, we're done */ |
1230 | 0 | } |
1231 | 0 | else |
1232 | 0 | { |
1233 | | /* insert the tuple normally */ |
1234 | 0 | table_tuple_insert(resultRelationDesc, slot, |
1235 | 0 | estate->es_output_cid, |
1236 | 0 | 0, NULL); |
1237 | | |
1238 | | /* insert index entries for tuple */ |
1239 | 0 | if (resultRelInfo->ri_NumIndices > 0) |
1240 | 0 | recheckIndexes = ExecInsertIndexTuples(resultRelInfo, |
1241 | 0 | slot, estate, false, |
1242 | 0 | false, NULL, NIL, |
1243 | 0 | false); |
1244 | 0 | } |
1245 | 0 | } |
1246 | | |
1247 | 0 | if (canSetTag) |
1248 | 0 | (estate->es_processed)++; |
1249 | | |
1250 | | /* |
1251 | | * If this insert is the result of a partition key update that moved the |
1252 | | * tuple to a new partition, put this row into the transition NEW TABLE, |
1253 | | * if there is one. We need to do this separately for DELETE and INSERT |
1254 | | * because they happen on different tables. |
1255 | | */ |
1256 | 0 | ar_insert_trig_tcs = mtstate->mt_transition_capture; |
1257 | 0 | if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture |
1258 | 0 | && mtstate->mt_transition_capture->tcs_update_new_table) |
1259 | 0 | { |
1260 | 0 | ExecARUpdateTriggers(estate, resultRelInfo, |
1261 | 0 | NULL, NULL, |
1262 | 0 | NULL, |
1263 | 0 | NULL, |
1264 | 0 | slot, |
1265 | 0 | NULL, |
1266 | 0 | mtstate->mt_transition_capture, |
1267 | 0 | false); |
1268 | | |
1269 | | /* |
1270 | | * We've already captured the NEW TABLE row, so make sure any AR |
1271 | | * INSERT trigger fired below doesn't capture it again. |
1272 | | */ |
1273 | 0 | ar_insert_trig_tcs = NULL; |
1274 | 0 | } |
1275 | | |
1276 | | /* AFTER ROW INSERT Triggers */ |
1277 | 0 | ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes, |
1278 | 0 | ar_insert_trig_tcs); |
1279 | |
|
1280 | 0 | list_free(recheckIndexes); |
1281 | | |
1282 | | /* |
1283 | | * Check any WITH CHECK OPTION constraints from parent views. We are |
1284 | | * required to do this after testing all constraints and uniqueness |
1285 | | * violations per the SQL spec, so we do it after actually inserting the |
1286 | | * record into the heap and all indexes. |
1287 | | * |
1288 | | * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the |
1289 | | * tuple will never be seen, if it violates the WITH CHECK OPTION. |
1290 | | * |
1291 | | * ExecWithCheckOptions() will skip any WCOs which are not of the kind we |
1292 | | * are looking for at this point. |
1293 | | */ |
1294 | 0 | if (resultRelInfo->ri_WithCheckOptions != NIL) |
1295 | 0 | ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); |
1296 | | |
1297 | | /* Process RETURNING if present */ |
1298 | 0 | if (resultRelInfo->ri_projectReturning) |
1299 | 0 | { |
1300 | 0 | TupleTableSlot *oldSlot = NULL; |
1301 | | |
1302 | | /* |
1303 | | * If this is part of a cross-partition UPDATE, and the RETURNING list |
1304 | | * refers to any OLD columns, ExecDelete() will have saved the tuple |
1305 | | * deleted from the original partition, which we must use here to |
1306 | | * compute the OLD column values. Otherwise, all OLD column values |
1307 | | * will be NULL. |
1308 | | */ |
1309 | 0 | if (context->cpDeletedSlot) |
1310 | 0 | { |
1311 | 0 | TupleConversionMap *tupconv_map; |
1312 | | |
1313 | | /* |
1314 | | * Convert the OLD tuple to the new partition's format/slot, if |
1315 | | * needed. Note that ExecDelete() already converted it to the |
1316 | | * root's partition's format/slot. |
1317 | | */ |
1318 | 0 | oldSlot = context->cpDeletedSlot; |
1319 | 0 | tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate); |
1320 | 0 | if (tupconv_map != NULL) |
1321 | 0 | { |
1322 | 0 | oldSlot = execute_attr_map_slot(tupconv_map->attrMap, |
1323 | 0 | oldSlot, |
1324 | 0 | ExecGetReturningSlot(estate, |
1325 | 0 | resultRelInfo)); |
1326 | |
|
1327 | 0 | oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid; |
1328 | 0 | ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid); |
1329 | 0 | } |
1330 | 0 | } |
1331 | |
|
1332 | 0 | result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT, |
1333 | 0 | oldSlot, slot, planSlot); |
1334 | | |
1335 | | /* |
1336 | | * For a cross-partition UPDATE, release the old tuple, first making |
1337 | | * sure that the result slot has a local copy of any pass-by-reference |
1338 | | * values. |
1339 | | */ |
1340 | 0 | if (context->cpDeletedSlot) |
1341 | 0 | { |
1342 | 0 | ExecMaterializeSlot(result); |
1343 | 0 | ExecClearTuple(oldSlot); |
1344 | 0 | if (context->cpDeletedSlot != oldSlot) |
1345 | 0 | ExecClearTuple(context->cpDeletedSlot); |
1346 | 0 | context->cpDeletedSlot = NULL; |
1347 | 0 | } |
1348 | 0 | } |
1349 | |
|
1350 | 0 | if (inserted_tuple) |
1351 | 0 | *inserted_tuple = slot; |
1352 | 0 | if (insert_destrel) |
1353 | 0 | *insert_destrel = resultRelInfo; |
1354 | |
|
1355 | 0 | return result; |
1356 | 0 | } |
1357 | | |
1358 | | /* ---------------------------------------------------------------- |
1359 | | * ExecBatchInsert |
1360 | | * |
1361 | | * Insert multiple tuples in an efficient way. |
1362 | | * Currently, this handles inserting into a foreign table without |
1363 | | * RETURNING clause. |
1364 | | * ---------------------------------------------------------------- |
1365 | | */ |
1366 | | static void |
1367 | | ExecBatchInsert(ModifyTableState *mtstate, |
1368 | | ResultRelInfo *resultRelInfo, |
1369 | | TupleTableSlot **slots, |
1370 | | TupleTableSlot **planSlots, |
1371 | | int numSlots, |
1372 | | EState *estate, |
1373 | | bool canSetTag) |
1374 | 0 | { |
1375 | 0 | int i; |
1376 | 0 | int numInserted = numSlots; |
1377 | 0 | TupleTableSlot *slot = NULL; |
1378 | 0 | TupleTableSlot **rslots; |
1379 | | |
1380 | | /* |
1381 | | * insert into foreign table: let the FDW do it |
1382 | | */ |
1383 | 0 | rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate, |
1384 | 0 | resultRelInfo, |
1385 | 0 | slots, |
1386 | 0 | planSlots, |
1387 | 0 | &numInserted); |
1388 | |
|
1389 | 0 | for (i = 0; i < numInserted; i++) |
1390 | 0 | { |
1391 | 0 | slot = rslots[i]; |
1392 | | |
1393 | | /* |
1394 | | * AFTER ROW Triggers might reference the tableoid column, so |
1395 | | * (re-)initialize tts_tableOid before evaluating them. |
1396 | | */ |
1397 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); |
1398 | | |
1399 | | /* AFTER ROW INSERT Triggers */ |
1400 | 0 | ExecARInsertTriggers(estate, resultRelInfo, slot, NIL, |
1401 | 0 | mtstate->mt_transition_capture); |
1402 | | |
1403 | | /* |
1404 | | * Check any WITH CHECK OPTION constraints from parent views. See the |
1405 | | * comment in ExecInsert. |
1406 | | */ |
1407 | 0 | if (resultRelInfo->ri_WithCheckOptions != NIL) |
1408 | 0 | ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); |
1409 | 0 | } |
1410 | |
|
1411 | 0 | if (canSetTag && numInserted > 0) |
1412 | 0 | estate->es_processed += numInserted; |
1413 | | |
1414 | | /* Clean up all the slots, ready for the next batch */ |
1415 | 0 | for (i = 0; i < numSlots; i++) |
1416 | 0 | { |
1417 | 0 | ExecClearTuple(slots[i]); |
1418 | 0 | ExecClearTuple(planSlots[i]); |
1419 | 0 | } |
1420 | 0 | resultRelInfo->ri_NumSlots = 0; |
1421 | 0 | } |
1422 | | |
1423 | | /* |
1424 | | * ExecPendingInserts -- flushes all pending inserts to the foreign tables |
1425 | | */ |
1426 | | static void |
1427 | | ExecPendingInserts(EState *estate) |
1428 | 0 | { |
1429 | 0 | ListCell *l1, |
1430 | 0 | *l2; |
1431 | |
|
1432 | 0 | forboth(l1, estate->es_insert_pending_result_relations, |
1433 | 0 | l2, estate->es_insert_pending_modifytables) |
1434 | 0 | { |
1435 | 0 | ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1); |
1436 | 0 | ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2); |
1437 | |
|
1438 | 0 | Assert(mtstate); |
1439 | 0 | ExecBatchInsert(mtstate, resultRelInfo, |
1440 | 0 | resultRelInfo->ri_Slots, |
1441 | 0 | resultRelInfo->ri_PlanSlots, |
1442 | 0 | resultRelInfo->ri_NumSlots, |
1443 | 0 | estate, mtstate->canSetTag); |
1444 | 0 | } |
1445 | |
|
1446 | 0 | list_free(estate->es_insert_pending_result_relations); |
1447 | 0 | list_free(estate->es_insert_pending_modifytables); |
1448 | 0 | estate->es_insert_pending_result_relations = NIL; |
1449 | 0 | estate->es_insert_pending_modifytables = NIL; |
1450 | 0 | } |
1451 | | |
1452 | | /* |
1453 | | * ExecDeletePrologue -- subroutine for ExecDelete |
1454 | | * |
1455 | | * Prepare executor state for DELETE. Actually, the only thing we have to do |
1456 | | * here is execute BEFORE ROW triggers. We return false if one of them makes |
1457 | | * the delete a no-op; otherwise, return true. |
1458 | | */ |
1459 | | static bool |
1460 | | ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
1461 | | ItemPointer tupleid, HeapTuple oldtuple, |
1462 | | TupleTableSlot **epqreturnslot, TM_Result *result) |
1463 | 0 | { |
1464 | 0 | if (result) |
1465 | 0 | *result = TM_Ok; |
1466 | | |
1467 | | /* BEFORE ROW DELETE triggers */ |
1468 | 0 | if (resultRelInfo->ri_TrigDesc && |
1469 | 0 | resultRelInfo->ri_TrigDesc->trig_delete_before_row) |
1470 | 0 | { |
1471 | | /* Flush any pending inserts, so rows are visible to the triggers */ |
1472 | 0 | if (context->estate->es_insert_pending_result_relations != NIL) |
1473 | 0 | ExecPendingInserts(context->estate); |
1474 | |
|
1475 | 0 | return ExecBRDeleteTriggers(context->estate, context->epqstate, |
1476 | 0 | resultRelInfo, tupleid, oldtuple, |
1477 | 0 | epqreturnslot, result, &context->tmfd); |
1478 | 0 | } |
1479 | | |
1480 | 0 | return true; |
1481 | 0 | } |
1482 | | |
1483 | | /* |
1484 | | * ExecDeleteAct -- subroutine for ExecDelete |
1485 | | * |
1486 | | * Actually delete the tuple from a plain table. |
1487 | | * |
1488 | | * Caller is in charge of doing EvalPlanQual as necessary |
1489 | | */ |
1490 | | static TM_Result |
1491 | | ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
1492 | | ItemPointer tupleid, bool changingPart) |
1493 | 0 | { |
1494 | 0 | EState *estate = context->estate; |
1495 | |
|
1496 | 0 | return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid, |
1497 | 0 | estate->es_output_cid, |
1498 | 0 | estate->es_snapshot, |
1499 | 0 | estate->es_crosscheck_snapshot, |
1500 | 0 | true /* wait for commit */ , |
1501 | 0 | &context->tmfd, |
1502 | 0 | changingPart); |
1503 | 0 | } |
1504 | | |
1505 | | /* |
1506 | | * ExecDeleteEpilogue -- subroutine for ExecDelete |
1507 | | * |
1508 | | * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers, |
1509 | | * including the UPDATE triggers if the deletion is being done as part of a |
1510 | | * cross-partition tuple move. |
1511 | | */ |
1512 | | static void |
1513 | | ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
1514 | | ItemPointer tupleid, HeapTuple oldtuple, bool changingPart) |
1515 | 0 | { |
1516 | 0 | ModifyTableState *mtstate = context->mtstate; |
1517 | 0 | EState *estate = context->estate; |
1518 | 0 | TransitionCaptureState *ar_delete_trig_tcs; |
1519 | | |
1520 | | /* |
1521 | | * If this delete is the result of a partition key update that moved the |
1522 | | * tuple to a new partition, put this row into the transition OLD TABLE, |
1523 | | * if there is one. We need to do this separately for DELETE and INSERT |
1524 | | * because they happen on different tables. |
1525 | | */ |
1526 | 0 | ar_delete_trig_tcs = mtstate->mt_transition_capture; |
1527 | 0 | if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture && |
1528 | 0 | mtstate->mt_transition_capture->tcs_update_old_table) |
1529 | 0 | { |
1530 | 0 | ExecARUpdateTriggers(estate, resultRelInfo, |
1531 | 0 | NULL, NULL, |
1532 | 0 | tupleid, oldtuple, |
1533 | 0 | NULL, NULL, mtstate->mt_transition_capture, |
1534 | 0 | false); |
1535 | | |
1536 | | /* |
1537 | | * We've already captured the OLD TABLE row, so make sure any AR |
1538 | | * DELETE trigger fired below doesn't capture it again. |
1539 | | */ |
1540 | 0 | ar_delete_trig_tcs = NULL; |
1541 | 0 | } |
1542 | | |
1543 | | /* AFTER ROW DELETE Triggers */ |
1544 | 0 | ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple, |
1545 | 0 | ar_delete_trig_tcs, changingPart); |
1546 | 0 | } |
1547 | | |
1548 | | /* ---------------------------------------------------------------- |
1549 | | * ExecDelete |
1550 | | * |
1551 | | * DELETE is like UPDATE, except that we delete the tuple and no |
1552 | | * index modifications are needed. |
1553 | | * |
1554 | | * When deleting from a table, tupleid identifies the tuple to delete and |
1555 | | * oldtuple is NULL. When deleting through a view INSTEAD OF trigger, |
1556 | | * oldtuple is passed to the triggers and identifies what to delete, and |
1557 | | * tupleid is invalid. When deleting from a foreign table, tupleid is |
1558 | | * invalid; the FDW has to figure out which row to delete using data from |
1559 | | * the planSlot. oldtuple is passed to foreign table triggers; it is |
1560 | | * NULL when the foreign table has no relevant triggers. We use |
1561 | | * tupleDeleted to indicate whether the tuple is actually deleted, |
1562 | | * callers can use it to decide whether to continue the operation. When |
1563 | | * this DELETE is a part of an UPDATE of partition-key, then the slot |
1564 | | * returned by EvalPlanQual() is passed back using output parameter |
1565 | | * epqreturnslot. |
1566 | | * |
1567 | | * Returns RETURNING result if any, otherwise NULL. |
1568 | | * ---------------------------------------------------------------- |
1569 | | */ |
1570 | | static TupleTableSlot * |
1571 | | ExecDelete(ModifyTableContext *context, |
1572 | | ResultRelInfo *resultRelInfo, |
1573 | | ItemPointer tupleid, |
1574 | | HeapTuple oldtuple, |
1575 | | bool processReturning, |
1576 | | bool changingPart, |
1577 | | bool canSetTag, |
1578 | | TM_Result *tmresult, |
1579 | | bool *tupleDeleted, |
1580 | | TupleTableSlot **epqreturnslot) |
1581 | 0 | { |
1582 | 0 | EState *estate = context->estate; |
1583 | 0 | Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; |
1584 | 0 | TupleTableSlot *slot = NULL; |
1585 | 0 | TM_Result result; |
1586 | 0 | bool saveOld; |
1587 | |
|
1588 | 0 | if (tupleDeleted) |
1589 | 0 | *tupleDeleted = false; |
1590 | | |
1591 | | /* |
1592 | | * Prepare for the delete. This includes BEFORE ROW triggers, so we're |
1593 | | * done if it says we are. |
1594 | | */ |
1595 | 0 | if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple, |
1596 | 0 | epqreturnslot, tmresult)) |
1597 | 0 | return NULL; |
1598 | | |
1599 | | /* INSTEAD OF ROW DELETE Triggers */ |
1600 | 0 | if (resultRelInfo->ri_TrigDesc && |
1601 | 0 | resultRelInfo->ri_TrigDesc->trig_delete_instead_row) |
1602 | 0 | { |
1603 | 0 | bool dodelete; |
1604 | |
|
1605 | 0 | Assert(oldtuple != NULL); |
1606 | 0 | dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple); |
1607 | |
|
1608 | 0 | if (!dodelete) /* "do nothing" */ |
1609 | 0 | return NULL; |
1610 | 0 | } |
1611 | 0 | else if (resultRelInfo->ri_FdwRoutine) |
1612 | 0 | { |
1613 | | /* |
1614 | | * delete from foreign table: let the FDW do it |
1615 | | * |
1616 | | * We offer the returning slot as a place to store RETURNING data, |
1617 | | * although the FDW can return some other slot if it wants. |
1618 | | */ |
1619 | 0 | slot = ExecGetReturningSlot(estate, resultRelInfo); |
1620 | 0 | slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, |
1621 | 0 | resultRelInfo, |
1622 | 0 | slot, |
1623 | 0 | context->planSlot); |
1624 | |
|
1625 | 0 | if (slot == NULL) /* "do nothing" */ |
1626 | 0 | return NULL; |
1627 | | |
1628 | | /* |
1629 | | * RETURNING expressions might reference the tableoid column, so |
1630 | | * (re)initialize tts_tableOid before evaluating them. |
1631 | | */ |
1632 | 0 | if (TTS_EMPTY(slot)) |
1633 | 0 | ExecStoreAllNullTuple(slot); |
1634 | |
|
1635 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelationDesc); |
1636 | 0 | } |
1637 | 0 | else |
1638 | 0 | { |
1639 | | /* |
1640 | | * delete the tuple |
1641 | | * |
1642 | | * Note: if context->estate->es_crosscheck_snapshot isn't |
1643 | | * InvalidSnapshot, we check that the row to be deleted is visible to |
1644 | | * that snapshot, and throw a can't-serialize error if not. This is a |
1645 | | * special-case behavior needed for referential integrity updates in |
1646 | | * transaction-snapshot mode transactions. |
1647 | | */ |
1648 | 0 | ldelete: |
1649 | 0 | result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart); |
1650 | |
|
1651 | 0 | if (tmresult) |
1652 | 0 | *tmresult = result; |
1653 | |
|
1654 | 0 | switch (result) |
1655 | 0 | { |
1656 | 0 | case TM_SelfModified: |
1657 | | |
1658 | | /* |
1659 | | * The target tuple was already updated or deleted by the |
1660 | | * current command, or by a later command in the current |
1661 | | * transaction. The former case is possible in a join DELETE |
1662 | | * where multiple tuples join to the same target tuple. This |
1663 | | * is somewhat questionable, but Postgres has always allowed |
1664 | | * it: we just ignore additional deletion attempts. |
1665 | | * |
1666 | | * The latter case arises if the tuple is modified by a |
1667 | | * command in a BEFORE trigger, or perhaps by a command in a |
1668 | | * volatile function used in the query. In such situations we |
1669 | | * should not ignore the deletion, but it is equally unsafe to |
1670 | | * proceed. We don't want to discard the original DELETE |
1671 | | * while keeping the triggered actions based on its deletion; |
1672 | | * and it would be no better to allow the original DELETE |
1673 | | * while discarding updates that it triggered. The row update |
1674 | | * carries some information that might be important according |
1675 | | * to business rules; so throwing an error is the only safe |
1676 | | * course. |
1677 | | * |
1678 | | * If a trigger actually intends this type of interaction, it |
1679 | | * can re-execute the DELETE and then return NULL to cancel |
1680 | | * the outer delete. |
1681 | | */ |
1682 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
1683 | 0 | ereport(ERROR, |
1684 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
1685 | 0 | errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), |
1686 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
1687 | | |
1688 | | /* Else, already deleted by self; nothing to do */ |
1689 | 0 | return NULL; |
1690 | | |
1691 | 0 | case TM_Ok: |
1692 | 0 | break; |
1693 | | |
1694 | 0 | case TM_Updated: |
1695 | 0 | { |
1696 | 0 | TupleTableSlot *inputslot; |
1697 | 0 | TupleTableSlot *epqslot; |
1698 | |
|
1699 | 0 | if (IsolationUsesXactSnapshot()) |
1700 | 0 | ereport(ERROR, |
1701 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
1702 | 0 | errmsg("could not serialize access due to concurrent update"))); |
1703 | | |
1704 | | /* |
1705 | | * Already know that we're going to need to do EPQ, so |
1706 | | * fetch tuple directly into the right slot. |
1707 | | */ |
1708 | 0 | EvalPlanQualBegin(context->epqstate); |
1709 | 0 | inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc, |
1710 | 0 | resultRelInfo->ri_RangeTableIndex); |
1711 | |
|
1712 | 0 | result = table_tuple_lock(resultRelationDesc, tupleid, |
1713 | 0 | estate->es_snapshot, |
1714 | 0 | inputslot, estate->es_output_cid, |
1715 | 0 | LockTupleExclusive, LockWaitBlock, |
1716 | 0 | TUPLE_LOCK_FLAG_FIND_LAST_VERSION, |
1717 | 0 | &context->tmfd); |
1718 | |
|
1719 | 0 | switch (result) |
1720 | 0 | { |
1721 | 0 | case TM_Ok: |
1722 | 0 | Assert(context->tmfd.traversed); |
1723 | 0 | epqslot = EvalPlanQual(context->epqstate, |
1724 | 0 | resultRelationDesc, |
1725 | 0 | resultRelInfo->ri_RangeTableIndex, |
1726 | 0 | inputslot); |
1727 | 0 | if (TupIsNull(epqslot)) |
1728 | | /* Tuple not passing quals anymore, exiting... */ |
1729 | 0 | return NULL; |
1730 | | |
1731 | | /* |
1732 | | * If requested, skip delete and pass back the |
1733 | | * updated row. |
1734 | | */ |
1735 | 0 | if (epqreturnslot) |
1736 | 0 | { |
1737 | 0 | *epqreturnslot = epqslot; |
1738 | 0 | return NULL; |
1739 | 0 | } |
1740 | 0 | else |
1741 | 0 | goto ldelete; |
1742 | | |
1743 | 0 | case TM_SelfModified: |
1744 | | |
1745 | | /* |
1746 | | * This can be reached when following an update |
1747 | | * chain from a tuple updated by another session, |
1748 | | * reaching a tuple that was already updated in |
1749 | | * this transaction. If previously updated by this |
1750 | | * command, ignore the delete, otherwise error |
1751 | | * out. |
1752 | | * |
1753 | | * See also TM_SelfModified response to |
1754 | | * table_tuple_delete() above. |
1755 | | */ |
1756 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
1757 | 0 | ereport(ERROR, |
1758 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
1759 | 0 | errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), |
1760 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
1761 | 0 | return NULL; |
1762 | | |
1763 | 0 | case TM_Deleted: |
1764 | | /* tuple already deleted; nothing to do */ |
1765 | 0 | return NULL; |
1766 | | |
1767 | 0 | default: |
1768 | | |
1769 | | /* |
1770 | | * TM_Invisible should be impossible because we're |
1771 | | * waiting for updated row versions, and would |
1772 | | * already have errored out if the first version |
1773 | | * is invisible. |
1774 | | * |
1775 | | * TM_Updated should be impossible, because we're |
1776 | | * locking the latest version via |
1777 | | * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. |
1778 | | */ |
1779 | 0 | elog(ERROR, "unexpected table_tuple_lock status: %u", |
1780 | 0 | result); |
1781 | 0 | return NULL; |
1782 | 0 | } |
1783 | | |
1784 | 0 | Assert(false); |
1785 | 0 | break; |
1786 | 0 | } |
1787 | | |
1788 | 0 | case TM_Deleted: |
1789 | 0 | if (IsolationUsesXactSnapshot()) |
1790 | 0 | ereport(ERROR, |
1791 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
1792 | 0 | errmsg("could not serialize access due to concurrent delete"))); |
1793 | | /* tuple already deleted; nothing to do */ |
1794 | 0 | return NULL; |
1795 | | |
1796 | 0 | default: |
1797 | 0 | elog(ERROR, "unrecognized table_tuple_delete status: %u", |
1798 | 0 | result); |
1799 | 0 | return NULL; |
1800 | 0 | } |
1801 | | |
1802 | | /* |
1803 | | * Note: Normally one would think that we have to delete index tuples |
1804 | | * associated with the heap tuple now... |
1805 | | * |
1806 | | * ... but in POSTGRES, we have no need to do this because VACUUM will |
1807 | | * take care of it later. We can't delete index tuples immediately |
1808 | | * anyway, since the tuple is still visible to other transactions. |
1809 | | */ |
1810 | 0 | } |
1811 | | |
1812 | 0 | if (canSetTag) |
1813 | 0 | (estate->es_processed)++; |
1814 | | |
1815 | | /* Tell caller that the delete actually happened. */ |
1816 | 0 | if (tupleDeleted) |
1817 | 0 | *tupleDeleted = true; |
1818 | |
|
1819 | 0 | ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart); |
1820 | | |
1821 | | /* |
1822 | | * Process RETURNING if present and if requested. |
1823 | | * |
1824 | | * If this is part of a cross-partition UPDATE, and the RETURNING list |
1825 | | * refers to any OLD column values, save the old tuple here for later |
1826 | | * processing of the RETURNING list by ExecInsert(). |
1827 | | */ |
1828 | 0 | saveOld = changingPart && resultRelInfo->ri_projectReturning && |
1829 | 0 | resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD; |
1830 | |
|
1831 | 0 | if (resultRelInfo->ri_projectReturning && (processReturning || saveOld)) |
1832 | 0 | { |
1833 | | /* |
1834 | | * We have to put the target tuple into a slot, which means first we |
1835 | | * gotta fetch it. We can use the trigger tuple slot. |
1836 | | */ |
1837 | 0 | TupleTableSlot *rslot; |
1838 | |
|
1839 | 0 | if (resultRelInfo->ri_FdwRoutine) |
1840 | 0 | { |
1841 | | /* FDW must have provided a slot containing the deleted row */ |
1842 | 0 | Assert(!TupIsNull(slot)); |
1843 | 0 | } |
1844 | 0 | else |
1845 | 0 | { |
1846 | 0 | slot = ExecGetReturningSlot(estate, resultRelInfo); |
1847 | 0 | if (oldtuple != NULL) |
1848 | 0 | { |
1849 | 0 | ExecForceStoreHeapTuple(oldtuple, slot, false); |
1850 | 0 | } |
1851 | 0 | else |
1852 | 0 | { |
1853 | 0 | if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid, |
1854 | 0 | SnapshotAny, slot)) |
1855 | 0 | elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); |
1856 | 0 | } |
1857 | 0 | } |
1858 | | |
1859 | | /* |
1860 | | * If required, save the old tuple for later processing of the |
1861 | | * RETURNING list by ExecInsert(). |
1862 | | */ |
1863 | 0 | if (saveOld) |
1864 | 0 | { |
1865 | 0 | TupleConversionMap *tupconv_map; |
1866 | | |
1867 | | /* |
1868 | | * Convert the tuple into the root partition's format/slot, if |
1869 | | * needed. ExecInsert() will then convert it to the new |
1870 | | * partition's format/slot, if necessary. |
1871 | | */ |
1872 | 0 | tupconv_map = ExecGetChildToRootMap(resultRelInfo); |
1873 | 0 | if (tupconv_map != NULL) |
1874 | 0 | { |
1875 | 0 | ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo; |
1876 | 0 | TupleTableSlot *oldSlot = slot; |
1877 | |
|
1878 | 0 | slot = execute_attr_map_slot(tupconv_map->attrMap, |
1879 | 0 | slot, |
1880 | 0 | ExecGetReturningSlot(estate, |
1881 | 0 | rootRelInfo)); |
1882 | |
|
1883 | 0 | slot->tts_tableOid = oldSlot->tts_tableOid; |
1884 | 0 | ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid); |
1885 | 0 | } |
1886 | |
|
1887 | 0 | context->cpDeletedSlot = slot; |
1888 | |
|
1889 | 0 | return NULL; |
1890 | 0 | } |
1891 | | |
1892 | 0 | rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE, |
1893 | 0 | slot, NULL, context->planSlot); |
1894 | | |
1895 | | /* |
1896 | | * Before releasing the target tuple again, make sure rslot has a |
1897 | | * local copy of any pass-by-reference values. |
1898 | | */ |
1899 | 0 | ExecMaterializeSlot(rslot); |
1900 | |
|
1901 | 0 | ExecClearTuple(slot); |
1902 | |
|
1903 | 0 | return rslot; |
1904 | 0 | } |
1905 | | |
1906 | 0 | return NULL; |
1907 | 0 | } |
1908 | | |
1909 | | /* |
1910 | | * ExecCrossPartitionUpdate --- Move an updated tuple to another partition. |
1911 | | * |
1912 | | * This works by first deleting the old tuple from the current partition, |
1913 | | * followed by inserting the new tuple into the root parent table, that is, |
1914 | | * mtstate->rootResultRelInfo. It will be re-routed from there to the |
1915 | | * correct partition. |
1916 | | * |
1917 | | * Returns true if the tuple has been successfully moved, or if it's found |
1918 | | * that the tuple was concurrently deleted so there's nothing more to do |
1919 | | * for the caller. |
1920 | | * |
1921 | | * False is returned if the tuple we're trying to move is found to have been |
1922 | | * concurrently updated. In that case, the caller must check if the updated |
1923 | | * tuple that's returned in *retry_slot still needs to be re-routed, and call |
1924 | | * this function again or perform a regular update accordingly. For MERGE, |
1925 | | * the updated tuple is not returned in *retry_slot; it has its own retry |
1926 | | * logic. |
1927 | | */ |
1928 | | static bool |
1929 | | ExecCrossPartitionUpdate(ModifyTableContext *context, |
1930 | | ResultRelInfo *resultRelInfo, |
1931 | | ItemPointer tupleid, HeapTuple oldtuple, |
1932 | | TupleTableSlot *slot, |
1933 | | bool canSetTag, |
1934 | | UpdateContext *updateCxt, |
1935 | | TM_Result *tmresult, |
1936 | | TupleTableSlot **retry_slot, |
1937 | | TupleTableSlot **inserted_tuple, |
1938 | | ResultRelInfo **insert_destrel) |
1939 | 0 | { |
1940 | 0 | ModifyTableState *mtstate = context->mtstate; |
1941 | 0 | EState *estate = mtstate->ps.state; |
1942 | 0 | TupleConversionMap *tupconv_map; |
1943 | 0 | bool tuple_deleted; |
1944 | 0 | TupleTableSlot *epqslot = NULL; |
1945 | |
|
1946 | 0 | context->cpDeletedSlot = NULL; |
1947 | 0 | context->cpUpdateReturningSlot = NULL; |
1948 | 0 | *retry_slot = NULL; |
1949 | | |
1950 | | /* |
1951 | | * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row |
1952 | | * to migrate to a different partition. Maybe this can be implemented |
1953 | | * some day, but it seems a fringe feature with little redeeming value. |
1954 | | */ |
1955 | 0 | if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE) |
1956 | 0 | ereport(ERROR, |
1957 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1958 | 0 | errmsg("invalid ON UPDATE specification"), |
1959 | 0 | errdetail("The result tuple would appear in a different partition than the original tuple."))); |
1960 | | |
1961 | | /* |
1962 | | * When an UPDATE is run directly on a leaf partition, simply fail with a |
1963 | | * partition constraint violation error. |
1964 | | */ |
1965 | 0 | if (resultRelInfo == mtstate->rootResultRelInfo) |
1966 | 0 | ExecPartitionCheckEmitError(resultRelInfo, slot, estate); |
1967 | | |
1968 | | /* Initialize tuple routing info if not already done. */ |
1969 | 0 | if (mtstate->mt_partition_tuple_routing == NULL) |
1970 | 0 | { |
1971 | 0 | Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc; |
1972 | 0 | MemoryContext oldcxt; |
1973 | | |
1974 | | /* Things built here have to last for the query duration. */ |
1975 | 0 | oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); |
1976 | |
|
1977 | 0 | mtstate->mt_partition_tuple_routing = |
1978 | 0 | ExecSetupPartitionTupleRouting(estate, rootRel); |
1979 | | |
1980 | | /* |
1981 | | * Before a partition's tuple can be re-routed, it must first be |
1982 | | * converted to the root's format, so we'll need a slot for storing |
1983 | | * such tuples. |
1984 | | */ |
1985 | 0 | Assert(mtstate->mt_root_tuple_slot == NULL); |
1986 | 0 | mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL); |
1987 | |
|
1988 | 0 | MemoryContextSwitchTo(oldcxt); |
1989 | 0 | } |
1990 | | |
1991 | | /* |
1992 | | * Row movement, part 1. Delete the tuple, but skip RETURNING processing. |
1993 | | * We want to return rows from INSERT. |
1994 | | */ |
1995 | 0 | ExecDelete(context, resultRelInfo, |
1996 | 0 | tupleid, oldtuple, |
1997 | 0 | false, /* processReturning */ |
1998 | 0 | true, /* changingPart */ |
1999 | 0 | false, /* canSetTag */ |
2000 | 0 | tmresult, &tuple_deleted, &epqslot); |
2001 | | |
2002 | | /* |
2003 | | * For some reason if DELETE didn't happen (e.g. trigger prevented it, or |
2004 | | * it was already deleted by self, or it was concurrently deleted by |
2005 | | * another transaction), then we should skip the insert as well; |
2006 | | * otherwise, an UPDATE could cause an increase in the total number of |
2007 | | * rows across all partitions, which is clearly wrong. |
2008 | | * |
2009 | | * For a normal UPDATE, the case where the tuple has been the subject of a |
2010 | | * concurrent UPDATE or DELETE would be handled by the EvalPlanQual |
2011 | | * machinery, but for an UPDATE that we've translated into a DELETE from |
2012 | | * this partition and an INSERT into some other partition, that's not |
2013 | | * available, because CTID chains can't span relation boundaries. We |
2014 | | * mimic the semantics to a limited extent by skipping the INSERT if the |
2015 | | * DELETE fails to find a tuple. This ensures that two concurrent |
2016 | | * attempts to UPDATE the same tuple at the same time can't turn one tuple |
2017 | | * into two, and that an UPDATE of a just-deleted tuple can't resurrect |
2018 | | * it. |
2019 | | */ |
2020 | 0 | if (!tuple_deleted) |
2021 | 0 | { |
2022 | | /* |
2023 | | * epqslot will be typically NULL. But when ExecDelete() finds that |
2024 | | * another transaction has concurrently updated the same row, it |
2025 | | * re-fetches the row, skips the delete, and epqslot is set to the |
2026 | | * re-fetched tuple slot. In that case, we need to do all the checks |
2027 | | * again. For MERGE, we leave everything to the caller (it must do |
2028 | | * additional rechecking, and might end up executing a different |
2029 | | * action entirely). |
2030 | | */ |
2031 | 0 | if (mtstate->operation == CMD_MERGE) |
2032 | 0 | return *tmresult == TM_Ok; |
2033 | 0 | else if (TupIsNull(epqslot)) |
2034 | 0 | return true; |
2035 | 0 | else |
2036 | 0 | { |
2037 | | /* Fetch the most recent version of old tuple. */ |
2038 | 0 | TupleTableSlot *oldSlot; |
2039 | | |
2040 | | /* ... but first, make sure ri_oldTupleSlot is initialized. */ |
2041 | 0 | if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) |
2042 | 0 | ExecInitUpdateProjection(mtstate, resultRelInfo); |
2043 | 0 | oldSlot = resultRelInfo->ri_oldTupleSlot; |
2044 | 0 | if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc, |
2045 | 0 | tupleid, |
2046 | 0 | SnapshotAny, |
2047 | 0 | oldSlot)) |
2048 | 0 | elog(ERROR, "failed to fetch tuple being updated"); |
2049 | | /* and project the new tuple to retry the UPDATE with */ |
2050 | 0 | *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot, |
2051 | 0 | oldSlot); |
2052 | 0 | return false; |
2053 | 0 | } |
2054 | 0 | } |
2055 | | |
2056 | | /* |
2057 | | * resultRelInfo is one of the per-relation resultRelInfos. So we should |
2058 | | * convert the tuple into root's tuple descriptor if needed, since |
2059 | | * ExecInsert() starts the search from root. |
2060 | | */ |
2061 | 0 | tupconv_map = ExecGetChildToRootMap(resultRelInfo); |
2062 | 0 | if (tupconv_map != NULL) |
2063 | 0 | slot = execute_attr_map_slot(tupconv_map->attrMap, |
2064 | 0 | slot, |
2065 | 0 | mtstate->mt_root_tuple_slot); |
2066 | | |
2067 | | /* Tuple routing starts from the root table. */ |
2068 | 0 | context->cpUpdateReturningSlot = |
2069 | 0 | ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag, |
2070 | 0 | inserted_tuple, insert_destrel); |
2071 | | |
2072 | | /* |
2073 | | * Reset the transition state that may possibly have been written by |
2074 | | * INSERT. |
2075 | | */ |
2076 | 0 | if (mtstate->mt_transition_capture) |
2077 | 0 | mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; |
2078 | | |
2079 | | /* We're done moving. */ |
2080 | 0 | return true; |
2081 | 0 | } |
2082 | | |
2083 | | /* |
2084 | | * ExecUpdatePrologue -- subroutine for ExecUpdate |
2085 | | * |
2086 | | * Prepare executor state for UPDATE. This includes running BEFORE ROW |
2087 | | * triggers. We return false if one of them makes the update a no-op; |
2088 | | * otherwise, return true. |
2089 | | */ |
2090 | | static bool |
2091 | | ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
2092 | | ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, |
2093 | | TM_Result *result) |
2094 | 0 | { |
2095 | 0 | Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; |
2096 | |
|
2097 | 0 | if (result) |
2098 | 0 | *result = TM_Ok; |
2099 | |
|
2100 | 0 | ExecMaterializeSlot(slot); |
2101 | | |
2102 | | /* |
2103 | | * Open the table's indexes, if we have not done so already, so that we |
2104 | | * can add new index entries for the updated tuple. |
2105 | | */ |
2106 | 0 | if (resultRelationDesc->rd_rel->relhasindex && |
2107 | 0 | resultRelInfo->ri_IndexRelationDescs == NULL) |
2108 | 0 | ExecOpenIndices(resultRelInfo, false); |
2109 | | |
2110 | | /* BEFORE ROW UPDATE triggers */ |
2111 | 0 | if (resultRelInfo->ri_TrigDesc && |
2112 | 0 | resultRelInfo->ri_TrigDesc->trig_update_before_row) |
2113 | 0 | { |
2114 | | /* Flush any pending inserts, so rows are visible to the triggers */ |
2115 | 0 | if (context->estate->es_insert_pending_result_relations != NIL) |
2116 | 0 | ExecPendingInserts(context->estate); |
2117 | |
|
2118 | 0 | return ExecBRUpdateTriggers(context->estate, context->epqstate, |
2119 | 0 | resultRelInfo, tupleid, oldtuple, slot, |
2120 | 0 | result, &context->tmfd); |
2121 | 0 | } |
2122 | | |
2123 | 0 | return true; |
2124 | 0 | } |
2125 | | |
2126 | | /* |
2127 | | * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct |
2128 | | * |
2129 | | * Apply the final modifications to the tuple slot before the update. |
2130 | | * (This is split out because we also need it in the foreign-table code path.) |
2131 | | */ |
2132 | | static void |
2133 | | ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo, |
2134 | | TupleTableSlot *slot, |
2135 | | EState *estate) |
2136 | 0 | { |
2137 | 0 | Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; |
2138 | | |
2139 | | /* |
2140 | | * Constraints and GENERATED expressions might reference the tableoid |
2141 | | * column, so (re-)initialize tts_tableOid before evaluating them. |
2142 | | */ |
2143 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelationDesc); |
2144 | | |
2145 | | /* |
2146 | | * Compute stored generated columns |
2147 | | */ |
2148 | 0 | if (resultRelationDesc->rd_att->constr && |
2149 | 0 | resultRelationDesc->rd_att->constr->has_generated_stored) |
2150 | 0 | ExecComputeStoredGenerated(resultRelInfo, estate, slot, |
2151 | 0 | CMD_UPDATE); |
2152 | 0 | } |
2153 | | |
2154 | | /* |
2155 | | * ExecUpdateAct -- subroutine for ExecUpdate |
2156 | | * |
2157 | | * Actually update the tuple, when operating on a plain table. If the |
2158 | | * table is a partition, and the command was called referencing an ancestor |
2159 | | * partitioned table, this routine migrates the resulting tuple to another |
2160 | | * partition. |
2161 | | * |
2162 | | * The caller is in charge of keeping indexes current as necessary. The |
2163 | | * caller is also in charge of doing EvalPlanQual if the tuple is found to |
2164 | | * be concurrently updated. However, in case of a cross-partition update, |
2165 | | * this routine does it. |
2166 | | */ |
2167 | | static TM_Result |
2168 | | ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
2169 | | ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, |
2170 | | bool canSetTag, UpdateContext *updateCxt) |
2171 | 0 | { |
2172 | 0 | EState *estate = context->estate; |
2173 | 0 | Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; |
2174 | 0 | bool partition_constraint_failed; |
2175 | 0 | TM_Result result; |
2176 | |
|
2177 | 0 | updateCxt->crossPartUpdate = false; |
2178 | | |
2179 | | /* |
2180 | | * If we move the tuple to a new partition, we loop back here to recompute |
2181 | | * GENERATED values (which are allowed to be different across partitions) |
2182 | | * and recheck any RLS policies and constraints. We do not fire any |
2183 | | * BEFORE triggers of the new partition, however. |
2184 | | */ |
2185 | 0 | lreplace: |
2186 | | /* Fill in GENERATEd columns */ |
2187 | 0 | ExecUpdatePrepareSlot(resultRelInfo, slot, estate); |
2188 | | |
2189 | | /* ensure slot is independent, consider e.g. EPQ */ |
2190 | 0 | ExecMaterializeSlot(slot); |
2191 | | |
2192 | | /* |
2193 | | * If partition constraint fails, this row might get moved to another |
2194 | | * partition, in which case we should check the RLS CHECK policy just |
2195 | | * before inserting into the new partition, rather than doing it here. |
2196 | | * This is because a trigger on that partition might again change the row. |
2197 | | * So skip the WCO checks if the partition constraint fails. |
2198 | | */ |
2199 | 0 | partition_constraint_failed = |
2200 | 0 | resultRelationDesc->rd_rel->relispartition && |
2201 | 0 | !ExecPartitionCheck(resultRelInfo, slot, estate, false); |
2202 | | |
2203 | | /* Check any RLS UPDATE WITH CHECK policies */ |
2204 | 0 | if (!partition_constraint_failed && |
2205 | 0 | resultRelInfo->ri_WithCheckOptions != NIL) |
2206 | 0 | { |
2207 | | /* |
2208 | | * ExecWithCheckOptions() will skip any WCOs which are not of the kind |
2209 | | * we are looking for at this point. |
2210 | | */ |
2211 | 0 | ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK, |
2212 | 0 | resultRelInfo, slot, estate); |
2213 | 0 | } |
2214 | | |
2215 | | /* |
2216 | | * If a partition check failed, try to move the row into the right |
2217 | | * partition. |
2218 | | */ |
2219 | 0 | if (partition_constraint_failed) |
2220 | 0 | { |
2221 | 0 | TupleTableSlot *inserted_tuple, |
2222 | 0 | *retry_slot; |
2223 | 0 | ResultRelInfo *insert_destrel = NULL; |
2224 | | |
2225 | | /* |
2226 | | * ExecCrossPartitionUpdate will first DELETE the row from the |
2227 | | * partition it's currently in and then insert it back into the root |
2228 | | * table, which will re-route it to the correct partition. However, |
2229 | | * if the tuple has been concurrently updated, a retry is needed. |
2230 | | */ |
2231 | 0 | if (ExecCrossPartitionUpdate(context, resultRelInfo, |
2232 | 0 | tupleid, oldtuple, slot, |
2233 | 0 | canSetTag, updateCxt, |
2234 | 0 | &result, |
2235 | 0 | &retry_slot, |
2236 | 0 | &inserted_tuple, |
2237 | 0 | &insert_destrel)) |
2238 | 0 | { |
2239 | | /* success! */ |
2240 | 0 | updateCxt->crossPartUpdate = true; |
2241 | | |
2242 | | /* |
2243 | | * If the partitioned table being updated is referenced in foreign |
2244 | | * keys, queue up trigger events to check that none of them were |
2245 | | * violated. No special treatment is needed in |
2246 | | * non-cross-partition update situations, because the leaf |
2247 | | * partition's AR update triggers will take care of that. During |
2248 | | * cross-partition updates implemented as delete on the source |
2249 | | * partition followed by insert on the destination partition, |
2250 | | * AR-UPDATE triggers of the root table (that is, the table |
2251 | | * mentioned in the query) must be fired. |
2252 | | * |
2253 | | * NULL insert_destrel means that the move failed to occur, that |
2254 | | * is, the update failed, so no need to anything in that case. |
2255 | | */ |
2256 | 0 | if (insert_destrel && |
2257 | 0 | resultRelInfo->ri_TrigDesc && |
2258 | 0 | resultRelInfo->ri_TrigDesc->trig_update_after_row) |
2259 | 0 | ExecCrossPartitionUpdateForeignKey(context, |
2260 | 0 | resultRelInfo, |
2261 | 0 | insert_destrel, |
2262 | 0 | tupleid, slot, |
2263 | 0 | inserted_tuple); |
2264 | |
|
2265 | 0 | return TM_Ok; |
2266 | 0 | } |
2267 | | |
2268 | | /* |
2269 | | * No luck, a retry is needed. If running MERGE, we do not do so |
2270 | | * here; instead let it handle that on its own rules. |
2271 | | */ |
2272 | 0 | if (context->mtstate->operation == CMD_MERGE) |
2273 | 0 | return result; |
2274 | | |
2275 | | /* |
2276 | | * ExecCrossPartitionUpdate installed an updated version of the new |
2277 | | * tuple in the retry slot; start over. |
2278 | | */ |
2279 | 0 | slot = retry_slot; |
2280 | 0 | goto lreplace; |
2281 | 0 | } |
2282 | | |
2283 | | /* |
2284 | | * Check the constraints of the tuple. We've already checked the |
2285 | | * partition constraint above; however, we must still ensure the tuple |
2286 | | * passes all other constraints, so we will call ExecConstraints() and |
2287 | | * have it validate all remaining checks. |
2288 | | */ |
2289 | 0 | if (resultRelationDesc->rd_att->constr) |
2290 | 0 | ExecConstraints(resultRelInfo, slot, estate); |
2291 | | |
2292 | | /* |
2293 | | * replace the heap tuple |
2294 | | * |
2295 | | * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that |
2296 | | * the row to be updated is visible to that snapshot, and throw a |
2297 | | * can't-serialize error if not. This is a special-case behavior needed |
2298 | | * for referential integrity updates in transaction-snapshot mode |
2299 | | * transactions. |
2300 | | */ |
2301 | 0 | result = table_tuple_update(resultRelationDesc, tupleid, slot, |
2302 | 0 | estate->es_output_cid, |
2303 | 0 | estate->es_snapshot, |
2304 | 0 | estate->es_crosscheck_snapshot, |
2305 | 0 | true /* wait for commit */ , |
2306 | 0 | &context->tmfd, &updateCxt->lockmode, |
2307 | 0 | &updateCxt->updateIndexes); |
2308 | |
|
2309 | 0 | return result; |
2310 | 0 | } |
2311 | | |
2312 | | /* |
2313 | | * ExecUpdateEpilogue -- subroutine for ExecUpdate |
2314 | | * |
2315 | | * Closing steps of updating a tuple. Must be called if ExecUpdateAct |
2316 | | * returns indicating that the tuple was updated. |
2317 | | */ |
2318 | | static void |
2319 | | ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt, |
2320 | | ResultRelInfo *resultRelInfo, ItemPointer tupleid, |
2321 | | HeapTuple oldtuple, TupleTableSlot *slot) |
2322 | 0 | { |
2323 | 0 | ModifyTableState *mtstate = context->mtstate; |
2324 | 0 | List *recheckIndexes = NIL; |
2325 | | |
2326 | | /* insert index entries for tuple if necessary */ |
2327 | 0 | if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None)) |
2328 | 0 | recheckIndexes = ExecInsertIndexTuples(resultRelInfo, |
2329 | 0 | slot, context->estate, |
2330 | 0 | true, false, |
2331 | 0 | NULL, NIL, |
2332 | 0 | (updateCxt->updateIndexes == TU_Summarizing)); |
2333 | | |
2334 | | /* AFTER ROW UPDATE Triggers */ |
2335 | 0 | ExecARUpdateTriggers(context->estate, resultRelInfo, |
2336 | 0 | NULL, NULL, |
2337 | 0 | tupleid, oldtuple, slot, |
2338 | 0 | recheckIndexes, |
2339 | 0 | mtstate->operation == CMD_INSERT ? |
2340 | 0 | mtstate->mt_oc_transition_capture : |
2341 | 0 | mtstate->mt_transition_capture, |
2342 | 0 | false); |
2343 | |
|
2344 | 0 | list_free(recheckIndexes); |
2345 | | |
2346 | | /* |
2347 | | * Check any WITH CHECK OPTION constraints from parent views. We are |
2348 | | * required to do this after testing all constraints and uniqueness |
2349 | | * violations per the SQL spec, so we do it after actually updating the |
2350 | | * record in the heap and all indexes. |
2351 | | * |
2352 | | * ExecWithCheckOptions() will skip any WCOs which are not of the kind we |
2353 | | * are looking for at this point. |
2354 | | */ |
2355 | 0 | if (resultRelInfo->ri_WithCheckOptions != NIL) |
2356 | 0 | ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, |
2357 | 0 | slot, context->estate); |
2358 | 0 | } |
2359 | | |
2360 | | /* |
2361 | | * Queues up an update event using the target root partitioned table's |
2362 | | * trigger to check that a cross-partition update hasn't broken any foreign |
2363 | | * keys pointing into it. |
2364 | | */ |
2365 | | static void |
2366 | | ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, |
2367 | | ResultRelInfo *sourcePartInfo, |
2368 | | ResultRelInfo *destPartInfo, |
2369 | | ItemPointer tupleid, |
2370 | | TupleTableSlot *oldslot, |
2371 | | TupleTableSlot *newslot) |
2372 | 0 | { |
2373 | 0 | ListCell *lc; |
2374 | 0 | ResultRelInfo *rootRelInfo; |
2375 | 0 | List *ancestorRels; |
2376 | |
|
2377 | 0 | rootRelInfo = sourcePartInfo->ri_RootResultRelInfo; |
2378 | 0 | ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo); |
2379 | | |
2380 | | /* |
2381 | | * For any foreign keys that point directly into a non-root ancestors of |
2382 | | * the source partition, we can in theory fire an update event to enforce |
2383 | | * those constraints using their triggers, if we could tell that both the |
2384 | | * source and the destination partitions are under the same ancestor. But |
2385 | | * for now, we simply report an error that those cannot be enforced. |
2386 | | */ |
2387 | 0 | foreach(lc, ancestorRels) |
2388 | 0 | { |
2389 | 0 | ResultRelInfo *rInfo = lfirst(lc); |
2390 | 0 | TriggerDesc *trigdesc = rInfo->ri_TrigDesc; |
2391 | 0 | bool has_noncloned_fkey = false; |
2392 | | |
2393 | | /* Root ancestor's triggers will be processed. */ |
2394 | 0 | if (rInfo == rootRelInfo) |
2395 | 0 | continue; |
2396 | | |
2397 | 0 | if (trigdesc && trigdesc->trig_update_after_row) |
2398 | 0 | { |
2399 | 0 | for (int i = 0; i < trigdesc->numtriggers; i++) |
2400 | 0 | { |
2401 | 0 | Trigger *trig = &trigdesc->triggers[i]; |
2402 | |
|
2403 | 0 | if (!trig->tgisclone && |
2404 | 0 | RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK) |
2405 | 0 | { |
2406 | 0 | has_noncloned_fkey = true; |
2407 | 0 | break; |
2408 | 0 | } |
2409 | 0 | } |
2410 | 0 | } |
2411 | |
|
2412 | 0 | if (has_noncloned_fkey) |
2413 | 0 | ereport(ERROR, |
2414 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
2415 | 0 | errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"), |
2416 | 0 | errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".", |
2417 | 0 | RelationGetRelationName(rInfo->ri_RelationDesc), |
2418 | 0 | RelationGetRelationName(rootRelInfo->ri_RelationDesc)), |
2419 | 0 | errhint("Consider defining the foreign key on table \"%s\".", |
2420 | 0 | RelationGetRelationName(rootRelInfo->ri_RelationDesc)))); |
2421 | 0 | } |
2422 | | |
2423 | | /* Perform the root table's triggers. */ |
2424 | 0 | ExecARUpdateTriggers(context->estate, |
2425 | 0 | rootRelInfo, sourcePartInfo, destPartInfo, |
2426 | 0 | tupleid, NULL, newslot, NIL, NULL, true); |
2427 | 0 | } |
2428 | | |
2429 | | /* ---------------------------------------------------------------- |
2430 | | * ExecUpdate |
2431 | | * |
2432 | | * note: we can't run UPDATE queries with transactions |
2433 | | * off because UPDATEs are actually INSERTs and our |
2434 | | * scan will mistakenly loop forever, updating the tuple |
2435 | | * it just inserted.. This should be fixed but until it |
2436 | | * is, we don't want to get stuck in an infinite loop |
2437 | | * which corrupts your database.. |
2438 | | * |
2439 | | * When updating a table, tupleid identifies the tuple to update and |
2440 | | * oldtuple is NULL. When updating through a view INSTEAD OF trigger, |
2441 | | * oldtuple is passed to the triggers and identifies what to update, and |
2442 | | * tupleid is invalid. When updating a foreign table, tupleid is |
2443 | | * invalid; the FDW has to figure out which row to update using data from |
2444 | | * the planSlot. oldtuple is passed to foreign table triggers; it is |
2445 | | * NULL when the foreign table has no relevant triggers. |
2446 | | * |
2447 | | * oldSlot contains the old tuple value. |
2448 | | * slot contains the new tuple value to be stored. |
2449 | | * planSlot is the output of the ModifyTable's subplan; we use it |
2450 | | * to access values from other input tables (for RETURNING), |
2451 | | * row-ID junk columns, etc. |
2452 | | * |
2453 | | * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid |
2454 | | * had identified the tuple to update, it will identify the tuple |
2455 | | * actually updated after EvalPlanQual. |
2456 | | * ---------------------------------------------------------------- |
2457 | | */ |
2458 | | static TupleTableSlot * |
2459 | | ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
2460 | | ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot, |
2461 | | TupleTableSlot *slot, bool canSetTag) |
2462 | 0 | { |
2463 | 0 | EState *estate = context->estate; |
2464 | 0 | Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; |
2465 | 0 | UpdateContext updateCxt = {0}; |
2466 | 0 | TM_Result result; |
2467 | | |
2468 | | /* |
2469 | | * abort the operation if not running transactions |
2470 | | */ |
2471 | 0 | if (IsBootstrapProcessingMode()) |
2472 | 0 | elog(ERROR, "cannot UPDATE during bootstrap"); |
2473 | | |
2474 | | /* |
2475 | | * Prepare for the update. This includes BEFORE ROW triggers, so we're |
2476 | | * done if it says we are. |
2477 | | */ |
2478 | 0 | if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL)) |
2479 | 0 | return NULL; |
2480 | | |
2481 | | /* INSTEAD OF ROW UPDATE Triggers */ |
2482 | 0 | if (resultRelInfo->ri_TrigDesc && |
2483 | 0 | resultRelInfo->ri_TrigDesc->trig_update_instead_row) |
2484 | 0 | { |
2485 | 0 | if (!ExecIRUpdateTriggers(estate, resultRelInfo, |
2486 | 0 | oldtuple, slot)) |
2487 | 0 | return NULL; /* "do nothing" */ |
2488 | 0 | } |
2489 | 0 | else if (resultRelInfo->ri_FdwRoutine) |
2490 | 0 | { |
2491 | | /* Fill in GENERATEd columns */ |
2492 | 0 | ExecUpdatePrepareSlot(resultRelInfo, slot, estate); |
2493 | | |
2494 | | /* |
2495 | | * update in foreign table: let the FDW do it |
2496 | | */ |
2497 | 0 | slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate, |
2498 | 0 | resultRelInfo, |
2499 | 0 | slot, |
2500 | 0 | context->planSlot); |
2501 | |
|
2502 | 0 | if (slot == NULL) /* "do nothing" */ |
2503 | 0 | return NULL; |
2504 | | |
2505 | | /* |
2506 | | * AFTER ROW Triggers or RETURNING expressions might reference the |
2507 | | * tableoid column, so (re-)initialize tts_tableOid before evaluating |
2508 | | * them. (This covers the case where the FDW replaced the slot.) |
2509 | | */ |
2510 | 0 | slot->tts_tableOid = RelationGetRelid(resultRelationDesc); |
2511 | 0 | } |
2512 | 0 | else |
2513 | 0 | { |
2514 | 0 | ItemPointerData lockedtid; |
2515 | | |
2516 | | /* |
2517 | | * If we generate a new candidate tuple after EvalPlanQual testing, we |
2518 | | * must loop back here to try again. (We don't need to redo triggers, |
2519 | | * however. If there are any BEFORE triggers then trigger.c will have |
2520 | | * done table_tuple_lock to lock the correct tuple, so there's no need |
2521 | | * to do them again.) |
2522 | | */ |
2523 | 0 | redo_act: |
2524 | 0 | lockedtid = *tupleid; |
2525 | 0 | result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot, |
2526 | 0 | canSetTag, &updateCxt); |
2527 | | |
2528 | | /* |
2529 | | * If ExecUpdateAct reports that a cross-partition update was done, |
2530 | | * then the RETURNING tuple (if any) has been projected and there's |
2531 | | * nothing else for us to do. |
2532 | | */ |
2533 | 0 | if (updateCxt.crossPartUpdate) |
2534 | 0 | return context->cpUpdateReturningSlot; |
2535 | | |
2536 | 0 | switch (result) |
2537 | 0 | { |
2538 | 0 | case TM_SelfModified: |
2539 | | |
2540 | | /* |
2541 | | * The target tuple was already updated or deleted by the |
2542 | | * current command, or by a later command in the current |
2543 | | * transaction. The former case is possible in a join UPDATE |
2544 | | * where multiple tuples join to the same target tuple. This |
2545 | | * is pretty questionable, but Postgres has always allowed it: |
2546 | | * we just execute the first update action and ignore |
2547 | | * additional update attempts. |
2548 | | * |
2549 | | * The latter case arises if the tuple is modified by a |
2550 | | * command in a BEFORE trigger, or perhaps by a command in a |
2551 | | * volatile function used in the query. In such situations we |
2552 | | * should not ignore the update, but it is equally unsafe to |
2553 | | * proceed. We don't want to discard the original UPDATE |
2554 | | * while keeping the triggered actions based on it; and we |
2555 | | * have no principled way to merge this update with the |
2556 | | * previous ones. So throwing an error is the only safe |
2557 | | * course. |
2558 | | * |
2559 | | * If a trigger actually intends this type of interaction, it |
2560 | | * can re-execute the UPDATE (assuming it can figure out how) |
2561 | | * and then return NULL to cancel the outer update. |
2562 | | */ |
2563 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
2564 | 0 | ereport(ERROR, |
2565 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
2566 | 0 | errmsg("tuple to be updated was already modified by an operation triggered by the current command"), |
2567 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
2568 | | |
2569 | | /* Else, already updated by self; nothing to do */ |
2570 | 0 | return NULL; |
2571 | | |
2572 | 0 | case TM_Ok: |
2573 | 0 | break; |
2574 | | |
2575 | 0 | case TM_Updated: |
2576 | 0 | { |
2577 | 0 | TupleTableSlot *inputslot; |
2578 | 0 | TupleTableSlot *epqslot; |
2579 | |
|
2580 | 0 | if (IsolationUsesXactSnapshot()) |
2581 | 0 | ereport(ERROR, |
2582 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
2583 | 0 | errmsg("could not serialize access due to concurrent update"))); |
2584 | | |
2585 | | /* |
2586 | | * Already know that we're going to need to do EPQ, so |
2587 | | * fetch tuple directly into the right slot. |
2588 | | */ |
2589 | 0 | inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc, |
2590 | 0 | resultRelInfo->ri_RangeTableIndex); |
2591 | |
|
2592 | 0 | result = table_tuple_lock(resultRelationDesc, tupleid, |
2593 | 0 | estate->es_snapshot, |
2594 | 0 | inputslot, estate->es_output_cid, |
2595 | 0 | updateCxt.lockmode, LockWaitBlock, |
2596 | 0 | TUPLE_LOCK_FLAG_FIND_LAST_VERSION, |
2597 | 0 | &context->tmfd); |
2598 | |
|
2599 | 0 | switch (result) |
2600 | 0 | { |
2601 | 0 | case TM_Ok: |
2602 | 0 | Assert(context->tmfd.traversed); |
2603 | |
|
2604 | 0 | epqslot = EvalPlanQual(context->epqstate, |
2605 | 0 | resultRelationDesc, |
2606 | 0 | resultRelInfo->ri_RangeTableIndex, |
2607 | 0 | inputslot); |
2608 | 0 | if (TupIsNull(epqslot)) |
2609 | | /* Tuple not passing quals anymore, exiting... */ |
2610 | 0 | return NULL; |
2611 | | |
2612 | | /* Make sure ri_oldTupleSlot is initialized. */ |
2613 | 0 | if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) |
2614 | 0 | ExecInitUpdateProjection(context->mtstate, |
2615 | 0 | resultRelInfo); |
2616 | |
|
2617 | 0 | if (resultRelInfo->ri_needLockTagTuple) |
2618 | 0 | { |
2619 | 0 | UnlockTuple(resultRelationDesc, |
2620 | 0 | &lockedtid, InplaceUpdateTupleLock); |
2621 | 0 | LockTuple(resultRelationDesc, |
2622 | 0 | tupleid, InplaceUpdateTupleLock); |
2623 | 0 | } |
2624 | | |
2625 | | /* Fetch the most recent version of old tuple. */ |
2626 | 0 | oldSlot = resultRelInfo->ri_oldTupleSlot; |
2627 | 0 | if (!table_tuple_fetch_row_version(resultRelationDesc, |
2628 | 0 | tupleid, |
2629 | 0 | SnapshotAny, |
2630 | 0 | oldSlot)) |
2631 | 0 | elog(ERROR, "failed to fetch tuple being updated"); |
2632 | 0 | slot = ExecGetUpdateNewTuple(resultRelInfo, |
2633 | 0 | epqslot, oldSlot); |
2634 | 0 | goto redo_act; |
2635 | | |
2636 | 0 | case TM_Deleted: |
2637 | | /* tuple already deleted; nothing to do */ |
2638 | 0 | return NULL; |
2639 | | |
2640 | 0 | case TM_SelfModified: |
2641 | | |
2642 | | /* |
2643 | | * This can be reached when following an update |
2644 | | * chain from a tuple updated by another session, |
2645 | | * reaching a tuple that was already updated in |
2646 | | * this transaction. If previously modified by |
2647 | | * this command, ignore the redundant update, |
2648 | | * otherwise error out. |
2649 | | * |
2650 | | * See also TM_SelfModified response to |
2651 | | * table_tuple_update() above. |
2652 | | */ |
2653 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
2654 | 0 | ereport(ERROR, |
2655 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
2656 | 0 | errmsg("tuple to be updated was already modified by an operation triggered by the current command"), |
2657 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
2658 | 0 | return NULL; |
2659 | | |
2660 | 0 | default: |
2661 | | /* see table_tuple_lock call in ExecDelete() */ |
2662 | 0 | elog(ERROR, "unexpected table_tuple_lock status: %u", |
2663 | 0 | result); |
2664 | 0 | return NULL; |
2665 | 0 | } |
2666 | 0 | } |
2667 | | |
2668 | 0 | break; |
2669 | | |
2670 | 0 | case TM_Deleted: |
2671 | 0 | if (IsolationUsesXactSnapshot()) |
2672 | 0 | ereport(ERROR, |
2673 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
2674 | 0 | errmsg("could not serialize access due to concurrent delete"))); |
2675 | | /* tuple already deleted; nothing to do */ |
2676 | 0 | return NULL; |
2677 | | |
2678 | 0 | default: |
2679 | 0 | elog(ERROR, "unrecognized table_tuple_update status: %u", |
2680 | 0 | result); |
2681 | 0 | return NULL; |
2682 | 0 | } |
2683 | 0 | } |
2684 | | |
2685 | 0 | if (canSetTag) |
2686 | 0 | (estate->es_processed)++; |
2687 | |
|
2688 | 0 | ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple, |
2689 | 0 | slot); |
2690 | | |
2691 | | /* Process RETURNING if present */ |
2692 | 0 | if (resultRelInfo->ri_projectReturning) |
2693 | 0 | return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE, |
2694 | 0 | oldSlot, slot, context->planSlot); |
2695 | | |
2696 | 0 | return NULL; |
2697 | 0 | } |
2698 | | |
2699 | | /* |
2700 | | * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE |
2701 | | * |
2702 | | * Try to lock tuple for update as part of speculative insertion. If |
2703 | | * a qual originating from ON CONFLICT DO UPDATE is satisfied, update |
2704 | | * (but still lock row, even though it may not satisfy estate's |
2705 | | * snapshot). |
2706 | | * |
2707 | | * Returns true if we're done (with or without an update), or false if |
2708 | | * the caller must retry the INSERT from scratch. |
2709 | | */ |
2710 | | static bool |
2711 | | ExecOnConflictUpdate(ModifyTableContext *context, |
2712 | | ResultRelInfo *resultRelInfo, |
2713 | | ItemPointer conflictTid, |
2714 | | TupleTableSlot *excludedSlot, |
2715 | | bool canSetTag, |
2716 | | TupleTableSlot **returning) |
2717 | 0 | { |
2718 | 0 | ModifyTableState *mtstate = context->mtstate; |
2719 | 0 | ExprContext *econtext = mtstate->ps.ps_ExprContext; |
2720 | 0 | Relation relation = resultRelInfo->ri_RelationDesc; |
2721 | 0 | ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause; |
2722 | 0 | TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing; |
2723 | 0 | TM_FailureData tmfd; |
2724 | 0 | LockTupleMode lockmode; |
2725 | 0 | TM_Result test; |
2726 | 0 | Datum xminDatum; |
2727 | 0 | TransactionId xmin; |
2728 | 0 | bool isnull; |
2729 | | |
2730 | | /* |
2731 | | * Parse analysis should have blocked ON CONFLICT for all system |
2732 | | * relations, which includes these. There's no fundamental obstacle to |
2733 | | * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other |
2734 | | * ExecUpdate() caller. |
2735 | | */ |
2736 | 0 | Assert(!resultRelInfo->ri_needLockTagTuple); |
2737 | | |
2738 | | /* Determine lock mode to use */ |
2739 | 0 | lockmode = ExecUpdateLockMode(context->estate, resultRelInfo); |
2740 | | |
2741 | | /* |
2742 | | * Lock tuple for update. Don't follow updates when tuple cannot be |
2743 | | * locked without doing so. A row locking conflict here means our |
2744 | | * previous conclusion that the tuple is conclusively committed is not |
2745 | | * true anymore. |
2746 | | */ |
2747 | 0 | test = table_tuple_lock(relation, conflictTid, |
2748 | 0 | context->estate->es_snapshot, |
2749 | 0 | existing, context->estate->es_output_cid, |
2750 | 0 | lockmode, LockWaitBlock, 0, |
2751 | 0 | &tmfd); |
2752 | 0 | switch (test) |
2753 | 0 | { |
2754 | 0 | case TM_Ok: |
2755 | | /* success! */ |
2756 | 0 | break; |
2757 | | |
2758 | 0 | case TM_Invisible: |
2759 | | |
2760 | | /* |
2761 | | * This can occur when a just inserted tuple is updated again in |
2762 | | * the same command. E.g. because multiple rows with the same |
2763 | | * conflicting key values are inserted. |
2764 | | * |
2765 | | * This is somewhat similar to the ExecUpdate() TM_SelfModified |
2766 | | * case. We do not want to proceed because it would lead to the |
2767 | | * same row being updated a second time in some unspecified order, |
2768 | | * and in contrast to plain UPDATEs there's no historical behavior |
2769 | | * to break. |
2770 | | * |
2771 | | * It is the user's responsibility to prevent this situation from |
2772 | | * occurring. These problems are why the SQL standard similarly |
2773 | | * specifies that for SQL MERGE, an exception must be raised in |
2774 | | * the event of an attempt to update the same row twice. |
2775 | | */ |
2776 | 0 | xminDatum = slot_getsysattr(existing, |
2777 | 0 | MinTransactionIdAttributeNumber, |
2778 | 0 | &isnull); |
2779 | 0 | Assert(!isnull); |
2780 | 0 | xmin = DatumGetTransactionId(xminDatum); |
2781 | |
|
2782 | 0 | if (TransactionIdIsCurrentTransactionId(xmin)) |
2783 | 0 | ereport(ERROR, |
2784 | 0 | (errcode(ERRCODE_CARDINALITY_VIOLATION), |
2785 | | /* translator: %s is a SQL command name */ |
2786 | 0 | errmsg("%s command cannot affect row a second time", |
2787 | 0 | "ON CONFLICT DO UPDATE"), |
2788 | 0 | errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values."))); |
2789 | | |
2790 | | /* This shouldn't happen */ |
2791 | 0 | elog(ERROR, "attempted to lock invisible tuple"); |
2792 | 0 | break; |
2793 | | |
2794 | 0 | case TM_SelfModified: |
2795 | | |
2796 | | /* |
2797 | | * This state should never be reached. As a dirty snapshot is used |
2798 | | * to find conflicting tuples, speculative insertion wouldn't have |
2799 | | * seen this row to conflict with. |
2800 | | */ |
2801 | 0 | elog(ERROR, "unexpected self-updated tuple"); |
2802 | 0 | break; |
2803 | | |
2804 | 0 | case TM_Updated: |
2805 | 0 | if (IsolationUsesXactSnapshot()) |
2806 | 0 | ereport(ERROR, |
2807 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
2808 | 0 | errmsg("could not serialize access due to concurrent update"))); |
2809 | | |
2810 | | /* |
2811 | | * As long as we don't support an UPDATE of INSERT ON CONFLICT for |
2812 | | * a partitioned table we shouldn't reach to a case where tuple to |
2813 | | * be lock is moved to another partition due to concurrent update |
2814 | | * of the partition key. |
2815 | | */ |
2816 | 0 | Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid)); |
2817 | | |
2818 | | /* |
2819 | | * Tell caller to try again from the very start. |
2820 | | * |
2821 | | * It does not make sense to use the usual EvalPlanQual() style |
2822 | | * loop here, as the new version of the row might not conflict |
2823 | | * anymore, or the conflicting tuple has actually been deleted. |
2824 | | */ |
2825 | 0 | ExecClearTuple(existing); |
2826 | 0 | return false; |
2827 | | |
2828 | 0 | case TM_Deleted: |
2829 | 0 | if (IsolationUsesXactSnapshot()) |
2830 | 0 | ereport(ERROR, |
2831 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
2832 | 0 | errmsg("could not serialize access due to concurrent delete"))); |
2833 | | |
2834 | | /* see TM_Updated case */ |
2835 | 0 | Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid)); |
2836 | 0 | ExecClearTuple(existing); |
2837 | 0 | return false; |
2838 | | |
2839 | 0 | default: |
2840 | 0 | elog(ERROR, "unrecognized table_tuple_lock status: %u", test); |
2841 | 0 | } |
2842 | | |
2843 | | /* Success, the tuple is locked. */ |
2844 | | |
2845 | | /* |
2846 | | * Verify that the tuple is visible to our MVCC snapshot if the current |
2847 | | * isolation level mandates that. |
2848 | | * |
2849 | | * It's not sufficient to rely on the check within ExecUpdate() as e.g. |
2850 | | * CONFLICT ... WHERE clause may prevent us from reaching that. |
2851 | | * |
2852 | | * This means we only ever continue when a new command in the current |
2853 | | * transaction could see the row, even though in READ COMMITTED mode the |
2854 | | * tuple will not be visible according to the current statement's |
2855 | | * snapshot. This is in line with the way UPDATE deals with newer tuple |
2856 | | * versions. |
2857 | | */ |
2858 | 0 | ExecCheckTupleVisible(context->estate, relation, existing); |
2859 | | |
2860 | | /* |
2861 | | * Make tuple and any needed join variables available to ExecQual and |
2862 | | * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while |
2863 | | * the target's existing tuple is installed in the scantuple. EXCLUDED |
2864 | | * has been made to reference INNER_VAR in setrefs.c, but there is no |
2865 | | * other redirection. |
2866 | | */ |
2867 | 0 | econtext->ecxt_scantuple = existing; |
2868 | 0 | econtext->ecxt_innertuple = excludedSlot; |
2869 | 0 | econtext->ecxt_outertuple = NULL; |
2870 | |
|
2871 | 0 | if (!ExecQual(onConflictSetWhere, econtext)) |
2872 | 0 | { |
2873 | 0 | ExecClearTuple(existing); /* see return below */ |
2874 | 0 | InstrCountFiltered1(&mtstate->ps, 1); |
2875 | 0 | return true; /* done with the tuple */ |
2876 | 0 | } |
2877 | | |
2878 | 0 | if (resultRelInfo->ri_WithCheckOptions != NIL) |
2879 | 0 | { |
2880 | | /* |
2881 | | * Check target's existing tuple against UPDATE-applicable USING |
2882 | | * security barrier quals (if any), enforced here as RLS checks/WCOs. |
2883 | | * |
2884 | | * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security |
2885 | | * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK, |
2886 | | * but that's almost the extent of its special handling for ON |
2887 | | * CONFLICT DO UPDATE. |
2888 | | * |
2889 | | * The rewriter will also have associated UPDATE applicable straight |
2890 | | * RLS checks/WCOs for the benefit of the ExecUpdate() call that |
2891 | | * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO |
2892 | | * kinds, so there is no danger of spurious over-enforcement in the |
2893 | | * INSERT or UPDATE path. |
2894 | | */ |
2895 | 0 | ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo, |
2896 | 0 | existing, |
2897 | 0 | mtstate->ps.state); |
2898 | 0 | } |
2899 | | |
2900 | | /* Project the new tuple version */ |
2901 | 0 | ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo); |
2902 | | |
2903 | | /* |
2904 | | * Note that it is possible that the target tuple has been modified in |
2905 | | * this session, after the above table_tuple_lock. We choose to not error |
2906 | | * out in that case, in line with ExecUpdate's treatment of similar cases. |
2907 | | * This can happen if an UPDATE is triggered from within ExecQual(), |
2908 | | * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a |
2909 | | * wCTE in the ON CONFLICT's SET. |
2910 | | */ |
2911 | | |
2912 | | /* Execute UPDATE with projection */ |
2913 | 0 | *returning = ExecUpdate(context, resultRelInfo, |
2914 | 0 | conflictTid, NULL, existing, |
2915 | 0 | resultRelInfo->ri_onConflict->oc_ProjSlot, |
2916 | 0 | canSetTag); |
2917 | | |
2918 | | /* |
2919 | | * Clear out existing tuple, as there might not be another conflict among |
2920 | | * the next input rows. Don't want to hold resources till the end of the |
2921 | | * query. First though, make sure that the returning slot, if any, has a |
2922 | | * local copy of any OLD pass-by-reference values, if it refers to any OLD |
2923 | | * columns. |
2924 | | */ |
2925 | 0 | if (*returning != NULL && |
2926 | 0 | resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) |
2927 | 0 | ExecMaterializeSlot(*returning); |
2928 | |
|
2929 | 0 | ExecClearTuple(existing); |
2930 | |
|
2931 | 0 | return true; |
2932 | 0 | } |
2933 | | |
2934 | | /* |
2935 | | * Perform MERGE. |
2936 | | */ |
2937 | | static TupleTableSlot * |
2938 | | ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
2939 | | ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag) |
2940 | 0 | { |
2941 | 0 | TupleTableSlot *rslot = NULL; |
2942 | 0 | bool matched; |
2943 | | |
2944 | | /*----- |
2945 | | * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is |
2946 | | * valid, depending on whether the result relation is a table or a view. |
2947 | | * We execute the first action for which the additional WHEN MATCHED AND |
2948 | | * quals pass. If an action without quals is found, that action is |
2949 | | * executed. |
2950 | | * |
2951 | | * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple |
2952 | | * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions |
2953 | | * in sequence until one passes. This is almost identical to the WHEN |
2954 | | * MATCHED case, and both cases are handled by ExecMergeMatched(). |
2955 | | * |
2956 | | * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and |
2957 | | * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY |
2958 | | * TARGET] actions in sequence until one passes. |
2959 | | * |
2960 | | * Things get interesting in case of concurrent update/delete of the |
2961 | | * target tuple. Such concurrent update/delete is detected while we are |
2962 | | * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action. |
2963 | | * |
2964 | | * A concurrent update can: |
2965 | | * |
2966 | | * 1. modify the target tuple so that the results from checking any |
2967 | | * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY |
2968 | | * SOURCE actions potentially change, but the result from the join |
2969 | | * quals does not change. |
2970 | | * |
2971 | | * In this case, we are still dealing with the same kind of match |
2972 | | * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of |
2973 | | * actions from the start and choose the first one that satisfies the |
2974 | | * new target tuple. |
2975 | | * |
2976 | | * 2. modify the target tuple in the WHEN MATCHED case so that the join |
2977 | | * quals no longer pass and hence the source and target tuples no |
2978 | | * longer match. |
2979 | | * |
2980 | | * In this case, we are now dealing with a NOT MATCHED case, and we |
2981 | | * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY |
2982 | | * TARGET] actions. First ExecMergeMatched() processes the list of |
2983 | | * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes, |
2984 | | * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY |
2985 | | * TARGET] actions in sequence until one passes. Thus we may execute |
2986 | | * two actions; one of each kind. |
2987 | | * |
2988 | | * Thus we support concurrent updates that turn MATCHED candidate rows |
2989 | | * into NOT MATCHED rows. However, we do not attempt to support cases |
2990 | | * that would turn NOT MATCHED rows into MATCHED rows, or which would |
2991 | | * cause a target row to match a different source row. |
2992 | | * |
2993 | | * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED |
2994 | | * [BY TARGET]. |
2995 | | * |
2996 | | * ExecMergeMatched() takes care of following the update chain and |
2997 | | * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE |
2998 | | * action, as long as the target tuple still exists. If the target tuple |
2999 | | * gets deleted or a concurrent update causes the join quals to fail, it |
3000 | | * returns a matched status of false and we call ExecMergeNotMatched(). |
3001 | | * Given that ExecMergeMatched() always makes progress by following the |
3002 | | * update chain and we never switch from ExecMergeNotMatched() to |
3003 | | * ExecMergeMatched(), there is no risk of a livelock. |
3004 | | */ |
3005 | 0 | matched = tupleid != NULL || oldtuple != NULL; |
3006 | 0 | if (matched) |
3007 | 0 | rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple, |
3008 | 0 | canSetTag, &matched); |
3009 | | |
3010 | | /* |
3011 | | * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the |
3012 | | * join, or a previously MATCHED tuple for which ExecMergeMatched() set |
3013 | | * "matched" to false, indicating that it no longer matches). |
3014 | | */ |
3015 | 0 | if (!matched) |
3016 | 0 | { |
3017 | | /* |
3018 | | * If a concurrent update turned a MATCHED case into a NOT MATCHED |
3019 | | * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT |
3020 | | * MATCHED [BY TARGET] actions, and there is a RETURNING clause, |
3021 | | * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY |
3022 | | * SOURCE action, and computed the row to return. If so, we cannot |
3023 | | * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as |
3024 | | * pending (to be processed on the next call to ExecModifyTable()). |
3025 | | * Otherwise, just process the action now. |
3026 | | */ |
3027 | 0 | if (rslot == NULL) |
3028 | 0 | rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag); |
3029 | 0 | else |
3030 | 0 | context->mtstate->mt_merge_pending_not_matched = context->planSlot; |
3031 | 0 | } |
3032 | |
|
3033 | 0 | return rslot; |
3034 | 0 | } |
3035 | | |
3036 | | /* |
3037 | | * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE |
3038 | | * action, depending on whether the join quals are satisfied. If the target |
3039 | | * relation is a table, the current target tuple is identified by tupleid. |
3040 | | * Otherwise, if the target relation is a view, oldtuple is the current target |
3041 | | * tuple from the view. |
3042 | | * |
3043 | | * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action |
3044 | | * and check if the WHEN quals pass, if any. If the WHEN quals for the first |
3045 | | * action do not pass, we check the second, then the third and so on. If we |
3046 | | * reach the end without finding a qualifying action, we return NULL. |
3047 | | * Otherwise, we execute the qualifying action and return its RETURNING |
3048 | | * result, if any, or NULL. |
3049 | | * |
3050 | | * On entry, "*matched" is assumed to be true. If a concurrent update or |
3051 | | * delete is detected that causes the join quals to no longer pass, we set it |
3052 | | * to false, indicating that the caller should process any NOT MATCHED [BY |
3053 | | * TARGET] actions. |
3054 | | * |
3055 | | * After a concurrent update, we restart from the first action to look for a |
3056 | | * new qualifying action to execute. If the join quals originally passed, and |
3057 | | * the concurrent update caused them to no longer pass, then we switch from |
3058 | | * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting |
3059 | | * (and setting "*matched" to false). As a result we may execute a WHEN NOT |
3060 | | * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller |
3061 | | * to also execute a WHEN NOT MATCHED [BY TARGET] action. |
3062 | | */ |
3063 | | static TupleTableSlot * |
3064 | | ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
3065 | | ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag, |
3066 | | bool *matched) |
3067 | 0 | { |
3068 | 0 | ModifyTableState *mtstate = context->mtstate; |
3069 | 0 | List **mergeActions = resultRelInfo->ri_MergeActions; |
3070 | 0 | ItemPointerData lockedtid; |
3071 | 0 | List *actionStates; |
3072 | 0 | TupleTableSlot *newslot = NULL; |
3073 | 0 | TupleTableSlot *rslot = NULL; |
3074 | 0 | EState *estate = context->estate; |
3075 | 0 | ExprContext *econtext = mtstate->ps.ps_ExprContext; |
3076 | 0 | bool isNull; |
3077 | 0 | EPQState *epqstate = &mtstate->mt_epqstate; |
3078 | 0 | ListCell *l; |
3079 | | |
3080 | | /* Expect matched to be true on entry */ |
3081 | 0 | Assert(*matched); |
3082 | | |
3083 | | /* |
3084 | | * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we |
3085 | | * are done. |
3086 | | */ |
3087 | 0 | if (mergeActions[MERGE_WHEN_MATCHED] == NIL && |
3088 | 0 | mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL) |
3089 | 0 | return NULL; |
3090 | | |
3091 | | /* |
3092 | | * Make tuple and any needed join variables available to ExecQual and |
3093 | | * ExecProject. The target's existing tuple is installed in the scantuple. |
3094 | | * This target relation's slot is required only in the case of a MATCHED |
3095 | | * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions. |
3096 | | */ |
3097 | 0 | econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot; |
3098 | 0 | econtext->ecxt_innertuple = context->planSlot; |
3099 | 0 | econtext->ecxt_outertuple = NULL; |
3100 | | |
3101 | | /* |
3102 | | * This routine is only invoked for matched target rows, so we should |
3103 | | * either have the tupleid of the target row, or an old tuple from the |
3104 | | * target wholerow junk attr. |
3105 | | */ |
3106 | 0 | Assert(tupleid != NULL || oldtuple != NULL); |
3107 | 0 | ItemPointerSetInvalid(&lockedtid); |
3108 | 0 | if (oldtuple != NULL) |
3109 | 0 | { |
3110 | 0 | Assert(!resultRelInfo->ri_needLockTagTuple); |
3111 | 0 | ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot, |
3112 | 0 | false); |
3113 | 0 | } |
3114 | 0 | else |
3115 | 0 | { |
3116 | 0 | if (resultRelInfo->ri_needLockTagTuple) |
3117 | 0 | { |
3118 | | /* |
3119 | | * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples |
3120 | | * that don't match mas_whenqual. MERGE on system catalogs is a |
3121 | | * minor use case, so don't bother optimizing those. |
3122 | | */ |
3123 | 0 | LockTuple(resultRelInfo->ri_RelationDesc, tupleid, |
3124 | 0 | InplaceUpdateTupleLock); |
3125 | 0 | lockedtid = *tupleid; |
3126 | 0 | } |
3127 | 0 | if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc, |
3128 | 0 | tupleid, |
3129 | 0 | SnapshotAny, |
3130 | 0 | resultRelInfo->ri_oldTupleSlot)) |
3131 | 0 | elog(ERROR, "failed to fetch the target tuple"); |
3132 | 0 | } |
3133 | | |
3134 | | /* |
3135 | | * Test the join condition. If it's satisfied, perform a MATCHED action. |
3136 | | * Otherwise, perform a NOT MATCHED BY SOURCE action. |
3137 | | * |
3138 | | * Note that this join condition will be NULL if there are no NOT MATCHED |
3139 | | * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we |
3140 | | * need only consider MATCHED actions here. |
3141 | | */ |
3142 | 0 | if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext)) |
3143 | 0 | actionStates = mergeActions[MERGE_WHEN_MATCHED]; |
3144 | 0 | else |
3145 | 0 | actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE]; |
3146 | |
|
3147 | 0 | lmerge_matched: |
3148 | |
|
3149 | 0 | foreach(l, actionStates) |
3150 | 0 | { |
3151 | 0 | MergeActionState *relaction = (MergeActionState *) lfirst(l); |
3152 | 0 | CmdType commandType = relaction->mas_action->commandType; |
3153 | 0 | TM_Result result; |
3154 | 0 | UpdateContext updateCxt = {0}; |
3155 | | |
3156 | | /* |
3157 | | * Test condition, if any. |
3158 | | * |
3159 | | * In the absence of any condition, we perform the action |
3160 | | * unconditionally (no need to check separately since ExecQual() will |
3161 | | * return true if there are no conditions to evaluate). |
3162 | | */ |
3163 | 0 | if (!ExecQual(relaction->mas_whenqual, econtext)) |
3164 | 0 | continue; |
3165 | | |
3166 | | /* |
3167 | | * Check if the existing target tuple meets the USING checks of |
3168 | | * UPDATE/DELETE RLS policies. If those checks fail, we throw an |
3169 | | * error. |
3170 | | * |
3171 | | * The WITH CHECK quals for UPDATE RLS policies are applied in |
3172 | | * ExecUpdateAct() and hence we need not do anything special to handle |
3173 | | * them. |
3174 | | * |
3175 | | * NOTE: We must do this after WHEN quals are evaluated, so that we |
3176 | | * check policies only when they matter. |
3177 | | */ |
3178 | 0 | if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING) |
3179 | 0 | { |
3180 | 0 | ExecWithCheckOptions(commandType == CMD_UPDATE ? |
3181 | 0 | WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK, |
3182 | 0 | resultRelInfo, |
3183 | 0 | resultRelInfo->ri_oldTupleSlot, |
3184 | 0 | context->mtstate->ps.state); |
3185 | 0 | } |
3186 | | |
3187 | | /* Perform stated action */ |
3188 | 0 | switch (commandType) |
3189 | 0 | { |
3190 | 0 | case CMD_UPDATE: |
3191 | | |
3192 | | /* |
3193 | | * Project the output tuple, and use that to update the table. |
3194 | | * We don't need to filter out junk attributes, because the |
3195 | | * UPDATE action's targetlist doesn't have any. |
3196 | | */ |
3197 | 0 | newslot = ExecProject(relaction->mas_proj); |
3198 | |
|
3199 | 0 | mtstate->mt_merge_action = relaction; |
3200 | 0 | if (!ExecUpdatePrologue(context, resultRelInfo, |
3201 | 0 | tupleid, NULL, newslot, &result)) |
3202 | 0 | { |
3203 | 0 | if (result == TM_Ok) |
3204 | 0 | goto out; /* "do nothing" */ |
3205 | | |
3206 | 0 | break; /* concurrent update/delete */ |
3207 | 0 | } |
3208 | | |
3209 | | /* INSTEAD OF ROW UPDATE Triggers */ |
3210 | 0 | if (resultRelInfo->ri_TrigDesc && |
3211 | 0 | resultRelInfo->ri_TrigDesc->trig_update_instead_row) |
3212 | 0 | { |
3213 | 0 | if (!ExecIRUpdateTriggers(estate, resultRelInfo, |
3214 | 0 | oldtuple, newslot)) |
3215 | 0 | goto out; /* "do nothing" */ |
3216 | 0 | } |
3217 | 0 | else |
3218 | 0 | { |
3219 | | /* checked ri_needLockTagTuple above */ |
3220 | 0 | Assert(oldtuple == NULL); |
3221 | |
|
3222 | 0 | result = ExecUpdateAct(context, resultRelInfo, tupleid, |
3223 | 0 | NULL, newslot, canSetTag, |
3224 | 0 | &updateCxt); |
3225 | | |
3226 | | /* |
3227 | | * As in ExecUpdate(), if ExecUpdateAct() reports that a |
3228 | | * cross-partition update was done, then there's nothing |
3229 | | * else for us to do --- the UPDATE has been turned into a |
3230 | | * DELETE and an INSERT, and we must not perform any of |
3231 | | * the usual post-update tasks. Also, the RETURNING tuple |
3232 | | * (if any) has been projected, so we can just return |
3233 | | * that. |
3234 | | */ |
3235 | 0 | if (updateCxt.crossPartUpdate) |
3236 | 0 | { |
3237 | 0 | mtstate->mt_merge_updated += 1; |
3238 | 0 | rslot = context->cpUpdateReturningSlot; |
3239 | 0 | goto out; |
3240 | 0 | } |
3241 | 0 | } |
3242 | | |
3243 | 0 | if (result == TM_Ok) |
3244 | 0 | { |
3245 | 0 | ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, |
3246 | 0 | tupleid, NULL, newslot); |
3247 | 0 | mtstate->mt_merge_updated += 1; |
3248 | 0 | } |
3249 | 0 | break; |
3250 | | |
3251 | 0 | case CMD_DELETE: |
3252 | 0 | mtstate->mt_merge_action = relaction; |
3253 | 0 | if (!ExecDeletePrologue(context, resultRelInfo, tupleid, |
3254 | 0 | NULL, NULL, &result)) |
3255 | 0 | { |
3256 | 0 | if (result == TM_Ok) |
3257 | 0 | goto out; /* "do nothing" */ |
3258 | | |
3259 | 0 | break; /* concurrent update/delete */ |
3260 | 0 | } |
3261 | | |
3262 | | /* INSTEAD OF ROW DELETE Triggers */ |
3263 | 0 | if (resultRelInfo->ri_TrigDesc && |
3264 | 0 | resultRelInfo->ri_TrigDesc->trig_delete_instead_row) |
3265 | 0 | { |
3266 | 0 | if (!ExecIRDeleteTriggers(estate, resultRelInfo, |
3267 | 0 | oldtuple)) |
3268 | 0 | goto out; /* "do nothing" */ |
3269 | 0 | } |
3270 | 0 | else |
3271 | 0 | { |
3272 | | /* checked ri_needLockTagTuple above */ |
3273 | 0 | Assert(oldtuple == NULL); |
3274 | |
|
3275 | 0 | result = ExecDeleteAct(context, resultRelInfo, tupleid, |
3276 | 0 | false); |
3277 | 0 | } |
3278 | | |
3279 | 0 | if (result == TM_Ok) |
3280 | 0 | { |
3281 | 0 | ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL, |
3282 | 0 | false); |
3283 | 0 | mtstate->mt_merge_deleted += 1; |
3284 | 0 | } |
3285 | 0 | break; |
3286 | | |
3287 | 0 | case CMD_NOTHING: |
3288 | | /* Doing nothing is always OK */ |
3289 | 0 | result = TM_Ok; |
3290 | 0 | break; |
3291 | | |
3292 | 0 | default: |
3293 | 0 | elog(ERROR, "unknown action in MERGE WHEN clause"); |
3294 | 0 | } |
3295 | | |
3296 | 0 | switch (result) |
3297 | 0 | { |
3298 | 0 | case TM_Ok: |
3299 | | /* all good; perform final actions */ |
3300 | 0 | if (canSetTag && commandType != CMD_NOTHING) |
3301 | 0 | (estate->es_processed)++; |
3302 | |
|
3303 | 0 | break; |
3304 | | |
3305 | 0 | case TM_SelfModified: |
3306 | | |
3307 | | /* |
3308 | | * The target tuple was already updated or deleted by the |
3309 | | * current command, or by a later command in the current |
3310 | | * transaction. The former case is explicitly disallowed by |
3311 | | * the SQL standard for MERGE, which insists that the MERGE |
3312 | | * join condition should not join a target row to more than |
3313 | | * one source row. |
3314 | | * |
3315 | | * The latter case arises if the tuple is modified by a |
3316 | | * command in a BEFORE trigger, or perhaps by a command in a |
3317 | | * volatile function used in the query. In such situations we |
3318 | | * should not ignore the MERGE action, but it is equally |
3319 | | * unsafe to proceed. We don't want to discard the original |
3320 | | * MERGE action while keeping the triggered actions based on |
3321 | | * it; and it would be no better to allow the original MERGE |
3322 | | * action while discarding the updates that it triggered. So |
3323 | | * throwing an error is the only safe course. |
3324 | | */ |
3325 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
3326 | 0 | ereport(ERROR, |
3327 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
3328 | 0 | errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"), |
3329 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
3330 | | |
3331 | 0 | if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax)) |
3332 | 0 | ereport(ERROR, |
3333 | 0 | (errcode(ERRCODE_CARDINALITY_VIOLATION), |
3334 | | /* translator: %s is a SQL command name */ |
3335 | 0 | errmsg("%s command cannot affect row a second time", |
3336 | 0 | "MERGE"), |
3337 | 0 | errhint("Ensure that not more than one source row matches any one target row."))); |
3338 | | |
3339 | | /* This shouldn't happen */ |
3340 | 0 | elog(ERROR, "attempted to update or delete invisible tuple"); |
3341 | 0 | break; |
3342 | | |
3343 | 0 | case TM_Deleted: |
3344 | 0 | if (IsolationUsesXactSnapshot()) |
3345 | 0 | ereport(ERROR, |
3346 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
3347 | 0 | errmsg("could not serialize access due to concurrent delete"))); |
3348 | | |
3349 | | /* |
3350 | | * If the tuple was already deleted, set matched to false to |
3351 | | * let caller handle it under NOT MATCHED [BY TARGET] clauses. |
3352 | | */ |
3353 | 0 | *matched = false; |
3354 | 0 | goto out; |
3355 | | |
3356 | 0 | case TM_Updated: |
3357 | 0 | { |
3358 | 0 | bool was_matched; |
3359 | 0 | Relation resultRelationDesc; |
3360 | 0 | TupleTableSlot *epqslot, |
3361 | 0 | *inputslot; |
3362 | 0 | LockTupleMode lockmode; |
3363 | | |
3364 | | /* |
3365 | | * The target tuple was concurrently updated by some other |
3366 | | * transaction. If we are currently processing a MATCHED |
3367 | | * action, use EvalPlanQual() with the new version of the |
3368 | | * tuple and recheck the join qual, to detect a change |
3369 | | * from the MATCHED to the NOT MATCHED cases. If we are |
3370 | | * already processing a NOT MATCHED BY SOURCE action, we |
3371 | | * skip this (cannot switch from NOT MATCHED BY SOURCE to |
3372 | | * MATCHED). |
3373 | | */ |
3374 | 0 | was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED; |
3375 | 0 | resultRelationDesc = resultRelInfo->ri_RelationDesc; |
3376 | 0 | lockmode = ExecUpdateLockMode(estate, resultRelInfo); |
3377 | |
|
3378 | 0 | if (was_matched) |
3379 | 0 | inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc, |
3380 | 0 | resultRelInfo->ri_RangeTableIndex); |
3381 | 0 | else |
3382 | 0 | inputslot = resultRelInfo->ri_oldTupleSlot; |
3383 | |
|
3384 | 0 | result = table_tuple_lock(resultRelationDesc, tupleid, |
3385 | 0 | estate->es_snapshot, |
3386 | 0 | inputslot, estate->es_output_cid, |
3387 | 0 | lockmode, LockWaitBlock, |
3388 | 0 | TUPLE_LOCK_FLAG_FIND_LAST_VERSION, |
3389 | 0 | &context->tmfd); |
3390 | 0 | switch (result) |
3391 | 0 | { |
3392 | 0 | case TM_Ok: |
3393 | | |
3394 | | /* |
3395 | | * If the tuple was updated and migrated to |
3396 | | * another partition concurrently, the current |
3397 | | * MERGE implementation can't follow. There's |
3398 | | * probably a better way to handle this case, but |
3399 | | * it'd require recognizing the relation to which |
3400 | | * the tuple moved, and setting our current |
3401 | | * resultRelInfo to that. |
3402 | | */ |
3403 | 0 | if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid)) |
3404 | 0 | ereport(ERROR, |
3405 | 0 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
3406 | 0 | errmsg("tuple to be merged was already moved to another partition due to concurrent update"))); |
3407 | | |
3408 | | /* |
3409 | | * If this was a MATCHED case, use EvalPlanQual() |
3410 | | * to recheck the join condition. |
3411 | | */ |
3412 | 0 | if (was_matched) |
3413 | 0 | { |
3414 | 0 | epqslot = EvalPlanQual(epqstate, |
3415 | 0 | resultRelationDesc, |
3416 | 0 | resultRelInfo->ri_RangeTableIndex, |
3417 | 0 | inputslot); |
3418 | | |
3419 | | /* |
3420 | | * If the subplan didn't return a tuple, then |
3421 | | * we must be dealing with an inner join for |
3422 | | * which the join condition no longer matches. |
3423 | | * This can only happen if there are no NOT |
3424 | | * MATCHED actions, and so there is nothing |
3425 | | * more to do. |
3426 | | */ |
3427 | 0 | if (TupIsNull(epqslot)) |
3428 | 0 | goto out; |
3429 | | |
3430 | | /* |
3431 | | * If we got a NULL ctid from the subplan, the |
3432 | | * join quals no longer pass and we switch to |
3433 | | * the NOT MATCHED BY SOURCE case. |
3434 | | */ |
3435 | 0 | (void) ExecGetJunkAttribute(epqslot, |
3436 | 0 | resultRelInfo->ri_RowIdAttNo, |
3437 | 0 | &isNull); |
3438 | 0 | if (isNull) |
3439 | 0 | *matched = false; |
3440 | | |
3441 | | /* |
3442 | | * Otherwise, recheck the join quals to see if |
3443 | | * we need to switch to the NOT MATCHED BY |
3444 | | * SOURCE case. |
3445 | | */ |
3446 | 0 | if (resultRelInfo->ri_needLockTagTuple) |
3447 | 0 | { |
3448 | 0 | if (ItemPointerIsValid(&lockedtid)) |
3449 | 0 | UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid, |
3450 | 0 | InplaceUpdateTupleLock); |
3451 | 0 | LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid, |
3452 | 0 | InplaceUpdateTupleLock); |
3453 | 0 | lockedtid = context->tmfd.ctid; |
3454 | 0 | } |
3455 | 0 | if (!table_tuple_fetch_row_version(resultRelationDesc, |
3456 | 0 | &context->tmfd.ctid, |
3457 | 0 | SnapshotAny, |
3458 | 0 | resultRelInfo->ri_oldTupleSlot)) |
3459 | 0 | elog(ERROR, "failed to fetch the target tuple"); |
3460 | | |
3461 | 0 | if (*matched) |
3462 | 0 | *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition, |
3463 | 0 | econtext); |
3464 | | |
3465 | | /* Switch lists, if necessary */ |
3466 | 0 | if (!*matched) |
3467 | 0 | actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE]; |
3468 | 0 | } |
3469 | | |
3470 | | /* |
3471 | | * Loop back and process the MATCHED or NOT |
3472 | | * MATCHED BY SOURCE actions from the start. |
3473 | | */ |
3474 | 0 | goto lmerge_matched; |
3475 | | |
3476 | 0 | case TM_Deleted: |
3477 | | |
3478 | | /* |
3479 | | * tuple already deleted; tell caller to run NOT |
3480 | | * MATCHED [BY TARGET] actions |
3481 | | */ |
3482 | 0 | *matched = false; |
3483 | 0 | goto out; |
3484 | | |
3485 | 0 | case TM_SelfModified: |
3486 | | |
3487 | | /* |
3488 | | * This can be reached when following an update |
3489 | | * chain from a tuple updated by another session, |
3490 | | * reaching a tuple that was already updated or |
3491 | | * deleted by the current command, or by a later |
3492 | | * command in the current transaction. As above, |
3493 | | * this should always be treated as an error. |
3494 | | */ |
3495 | 0 | if (context->tmfd.cmax != estate->es_output_cid) |
3496 | 0 | ereport(ERROR, |
3497 | 0 | (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), |
3498 | 0 | errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"), |
3499 | 0 | errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); |
3500 | | |
3501 | 0 | if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax)) |
3502 | 0 | ereport(ERROR, |
3503 | 0 | (errcode(ERRCODE_CARDINALITY_VIOLATION), |
3504 | | /* translator: %s is a SQL command name */ |
3505 | 0 | errmsg("%s command cannot affect row a second time", |
3506 | 0 | "MERGE"), |
3507 | 0 | errhint("Ensure that not more than one source row matches any one target row."))); |
3508 | | |
3509 | | /* This shouldn't happen */ |
3510 | 0 | elog(ERROR, "attempted to update or delete invisible tuple"); |
3511 | 0 | goto out; |
3512 | | |
3513 | 0 | default: |
3514 | | /* see table_tuple_lock call in ExecDelete() */ |
3515 | 0 | elog(ERROR, "unexpected table_tuple_lock status: %u", |
3516 | 0 | result); |
3517 | 0 | goto out; |
3518 | 0 | } |
3519 | 0 | } |
3520 | | |
3521 | 0 | case TM_Invisible: |
3522 | 0 | case TM_WouldBlock: |
3523 | 0 | case TM_BeingModified: |
3524 | | /* these should not occur */ |
3525 | 0 | elog(ERROR, "unexpected tuple operation result: %d", result); |
3526 | 0 | break; |
3527 | 0 | } |
3528 | | |
3529 | | /* Process RETURNING if present */ |
3530 | 0 | if (resultRelInfo->ri_projectReturning) |
3531 | 0 | { |
3532 | 0 | switch (commandType) |
3533 | 0 | { |
3534 | 0 | case CMD_UPDATE: |
3535 | 0 | rslot = ExecProcessReturning(context, |
3536 | 0 | resultRelInfo, |
3537 | 0 | CMD_UPDATE, |
3538 | 0 | resultRelInfo->ri_oldTupleSlot, |
3539 | 0 | newslot, |
3540 | 0 | context->planSlot); |
3541 | 0 | break; |
3542 | | |
3543 | 0 | case CMD_DELETE: |
3544 | 0 | rslot = ExecProcessReturning(context, |
3545 | 0 | resultRelInfo, |
3546 | 0 | CMD_DELETE, |
3547 | 0 | resultRelInfo->ri_oldTupleSlot, |
3548 | 0 | NULL, |
3549 | 0 | context->planSlot); |
3550 | 0 | break; |
3551 | | |
3552 | 0 | case CMD_NOTHING: |
3553 | 0 | break; |
3554 | | |
3555 | 0 | default: |
3556 | 0 | elog(ERROR, "unrecognized commandType: %d", |
3557 | 0 | (int) commandType); |
3558 | 0 | } |
3559 | 0 | } |
3560 | | |
3561 | | /* |
3562 | | * We've activated one of the WHEN clauses, so we don't search |
3563 | | * further. This is required behaviour, not an optimization. |
3564 | | */ |
3565 | 0 | break; |
3566 | 0 | } |
3567 | | |
3568 | | /* |
3569 | | * Successfully executed an action or no qualifying action was found. |
3570 | | */ |
3571 | 0 | out: |
3572 | 0 | if (ItemPointerIsValid(&lockedtid)) |
3573 | 0 | UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid, |
3574 | 0 | InplaceUpdateTupleLock); |
3575 | 0 | return rslot; |
3576 | 0 | } |
3577 | | |
3578 | | /* |
3579 | | * Execute the first qualifying NOT MATCHED [BY TARGET] action. |
3580 | | */ |
3581 | | static TupleTableSlot * |
3582 | | ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, |
3583 | | bool canSetTag) |
3584 | 0 | { |
3585 | 0 | ModifyTableState *mtstate = context->mtstate; |
3586 | 0 | ExprContext *econtext = mtstate->ps.ps_ExprContext; |
3587 | 0 | List *actionStates; |
3588 | 0 | TupleTableSlot *rslot = NULL; |
3589 | 0 | ListCell *l; |
3590 | | |
3591 | | /* |
3592 | | * For INSERT actions, the root relation's merge action is OK since the |
3593 | | * INSERT's targetlist and the WHEN conditions can only refer to the |
3594 | | * source relation and hence it does not matter which result relation we |
3595 | | * work with. |
3596 | | * |
3597 | | * XXX does this mean that we can avoid creating copies of actionStates on |
3598 | | * partitioned tables, for not-matched actions? |
3599 | | */ |
3600 | 0 | actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET]; |
3601 | | |
3602 | | /* |
3603 | | * Make source tuple available to ExecQual and ExecProject. We don't need |
3604 | | * the target tuple, since the WHEN quals and targetlist can't refer to |
3605 | | * the target columns. |
3606 | | */ |
3607 | 0 | econtext->ecxt_scantuple = NULL; |
3608 | 0 | econtext->ecxt_innertuple = context->planSlot; |
3609 | 0 | econtext->ecxt_outertuple = NULL; |
3610 | |
|
3611 | 0 | foreach(l, actionStates) |
3612 | 0 | { |
3613 | 0 | MergeActionState *action = (MergeActionState *) lfirst(l); |
3614 | 0 | CmdType commandType = action->mas_action->commandType; |
3615 | 0 | TupleTableSlot *newslot; |
3616 | | |
3617 | | /* |
3618 | | * Test condition, if any. |
3619 | | * |
3620 | | * In the absence of any condition, we perform the action |
3621 | | * unconditionally (no need to check separately since ExecQual() will |
3622 | | * return true if there are no conditions to evaluate). |
3623 | | */ |
3624 | 0 | if (!ExecQual(action->mas_whenqual, econtext)) |
3625 | 0 | continue; |
3626 | | |
3627 | | /* Perform stated action */ |
3628 | 0 | switch (commandType) |
3629 | 0 | { |
3630 | 0 | case CMD_INSERT: |
3631 | | |
3632 | | /* |
3633 | | * Project the tuple. In case of a partitioned table, the |
3634 | | * projection was already built to use the root's descriptor, |
3635 | | * so we don't need to map the tuple here. |
3636 | | */ |
3637 | 0 | newslot = ExecProject(action->mas_proj); |
3638 | 0 | mtstate->mt_merge_action = action; |
3639 | |
|
3640 | 0 | rslot = ExecInsert(context, mtstate->rootResultRelInfo, |
3641 | 0 | newslot, canSetTag, NULL, NULL); |
3642 | 0 | mtstate->mt_merge_inserted += 1; |
3643 | 0 | break; |
3644 | 0 | case CMD_NOTHING: |
3645 | | /* Do nothing */ |
3646 | 0 | break; |
3647 | 0 | default: |
3648 | 0 | elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause"); |
3649 | 0 | } |
3650 | | |
3651 | | /* |
3652 | | * We've activated one of the WHEN clauses, so we don't search |
3653 | | * further. This is required behaviour, not an optimization. |
3654 | | */ |
3655 | 0 | break; |
3656 | 0 | } |
3657 | | |
3658 | 0 | return rslot; |
3659 | 0 | } |
3660 | | |
3661 | | /* |
3662 | | * Initialize state for execution of MERGE. |
3663 | | */ |
3664 | | void |
3665 | | ExecInitMerge(ModifyTableState *mtstate, EState *estate) |
3666 | 0 | { |
3667 | 0 | List *mergeActionLists = mtstate->mt_mergeActionLists; |
3668 | 0 | List *mergeJoinConditions = mtstate->mt_mergeJoinConditions; |
3669 | 0 | ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo; |
3670 | 0 | ResultRelInfo *resultRelInfo; |
3671 | 0 | ExprContext *econtext; |
3672 | 0 | ListCell *lc; |
3673 | 0 | int i; |
3674 | |
|
3675 | 0 | if (mergeActionLists == NIL) |
3676 | 0 | return; |
3677 | | |
3678 | 0 | mtstate->mt_merge_subcommands = 0; |
3679 | |
|
3680 | 0 | if (mtstate->ps.ps_ExprContext == NULL) |
3681 | 0 | ExecAssignExprContext(estate, &mtstate->ps); |
3682 | 0 | econtext = mtstate->ps.ps_ExprContext; |
3683 | | |
3684 | | /* |
3685 | | * Create a MergeActionState for each action on the mergeActionList and |
3686 | | * add it to either a list of matched actions or not-matched actions. |
3687 | | * |
3688 | | * Similar logic appears in ExecInitPartitionInfo(), so if changing |
3689 | | * anything here, do so there too. |
3690 | | */ |
3691 | 0 | i = 0; |
3692 | 0 | foreach(lc, mergeActionLists) |
3693 | 0 | { |
3694 | 0 | List *mergeActionList = lfirst(lc); |
3695 | 0 | Node *joinCondition; |
3696 | 0 | TupleDesc relationDesc; |
3697 | 0 | ListCell *l; |
3698 | |
|
3699 | 0 | joinCondition = (Node *) list_nth(mergeJoinConditions, i); |
3700 | 0 | resultRelInfo = mtstate->resultRelInfo + i; |
3701 | 0 | i++; |
3702 | 0 | relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc); |
3703 | | |
3704 | | /* initialize slots for MERGE fetches from this rel */ |
3705 | 0 | if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) |
3706 | 0 | ExecInitMergeTupleSlots(mtstate, resultRelInfo); |
3707 | | |
3708 | | /* initialize state for join condition checking */ |
3709 | 0 | resultRelInfo->ri_MergeJoinCondition = |
3710 | 0 | ExecInitQual((List *) joinCondition, &mtstate->ps); |
3711 | |
|
3712 | 0 | foreach(l, mergeActionList) |
3713 | 0 | { |
3714 | 0 | MergeAction *action = (MergeAction *) lfirst(l); |
3715 | 0 | MergeActionState *action_state; |
3716 | 0 | TupleTableSlot *tgtslot; |
3717 | 0 | TupleDesc tgtdesc; |
3718 | | |
3719 | | /* |
3720 | | * Build action merge state for this rel. (For partitions, |
3721 | | * equivalent code exists in ExecInitPartitionInfo.) |
3722 | | */ |
3723 | 0 | action_state = makeNode(MergeActionState); |
3724 | 0 | action_state->mas_action = action; |
3725 | 0 | action_state->mas_whenqual = ExecInitQual((List *) action->qual, |
3726 | 0 | &mtstate->ps); |
3727 | | |
3728 | | /* |
3729 | | * We create three lists - one for each MergeMatchKind - and stick |
3730 | | * the MergeActionState into the appropriate list. |
3731 | | */ |
3732 | 0 | resultRelInfo->ri_MergeActions[action->matchKind] = |
3733 | 0 | lappend(resultRelInfo->ri_MergeActions[action->matchKind], |
3734 | 0 | action_state); |
3735 | |
|
3736 | 0 | switch (action->commandType) |
3737 | 0 | { |
3738 | 0 | case CMD_INSERT: |
3739 | | /* INSERT actions always use rootRelInfo */ |
3740 | 0 | ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc, |
3741 | 0 | action->targetList); |
3742 | | |
3743 | | /* |
3744 | | * If the MERGE targets a partitioned table, any INSERT |
3745 | | * actions must be routed through it, not the child |
3746 | | * relations. Initialize the routing struct and the root |
3747 | | * table's "new" tuple slot for that, if not already done. |
3748 | | * The projection we prepare, for all relations, uses the |
3749 | | * root relation descriptor, and targets the plan's root |
3750 | | * slot. (This is consistent with the fact that we |
3751 | | * checked the plan output to match the root relation, |
3752 | | * above.) |
3753 | | */ |
3754 | 0 | if (rootRelInfo->ri_RelationDesc->rd_rel->relkind == |
3755 | 0 | RELKIND_PARTITIONED_TABLE) |
3756 | 0 | { |
3757 | 0 | if (mtstate->mt_partition_tuple_routing == NULL) |
3758 | 0 | { |
3759 | | /* |
3760 | | * Initialize planstate for routing if not already |
3761 | | * done. |
3762 | | * |
3763 | | * Note that the slot is managed as a standalone |
3764 | | * slot belonging to ModifyTableState, so we pass |
3765 | | * NULL for the 2nd argument. |
3766 | | */ |
3767 | 0 | mtstate->mt_root_tuple_slot = |
3768 | 0 | table_slot_create(rootRelInfo->ri_RelationDesc, |
3769 | 0 | NULL); |
3770 | 0 | mtstate->mt_partition_tuple_routing = |
3771 | 0 | ExecSetupPartitionTupleRouting(estate, |
3772 | 0 | rootRelInfo->ri_RelationDesc); |
3773 | 0 | } |
3774 | 0 | tgtslot = mtstate->mt_root_tuple_slot; |
3775 | 0 | tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc); |
3776 | 0 | } |
3777 | 0 | else |
3778 | 0 | { |
3779 | | /* |
3780 | | * If the MERGE targets an inherited table, we insert |
3781 | | * into the root table, so we must initialize its |
3782 | | * "new" tuple slot, if not already done, and use its |
3783 | | * relation descriptor for the projection. |
3784 | | * |
3785 | | * For non-inherited tables, rootRelInfo and |
3786 | | * resultRelInfo are the same, and the "new" tuple |
3787 | | * slot will already have been initialized. |
3788 | | */ |
3789 | 0 | if (rootRelInfo->ri_newTupleSlot == NULL) |
3790 | 0 | rootRelInfo->ri_newTupleSlot = |
3791 | 0 | table_slot_create(rootRelInfo->ri_RelationDesc, |
3792 | 0 | &estate->es_tupleTable); |
3793 | |
|
3794 | 0 | tgtslot = rootRelInfo->ri_newTupleSlot; |
3795 | 0 | tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc); |
3796 | 0 | } |
3797 | |
|
3798 | 0 | action_state->mas_proj = |
3799 | 0 | ExecBuildProjectionInfo(action->targetList, econtext, |
3800 | 0 | tgtslot, |
3801 | 0 | &mtstate->ps, |
3802 | 0 | tgtdesc); |
3803 | |
|
3804 | 0 | mtstate->mt_merge_subcommands |= MERGE_INSERT; |
3805 | 0 | break; |
3806 | 0 | case CMD_UPDATE: |
3807 | 0 | action_state->mas_proj = |
3808 | 0 | ExecBuildUpdateProjection(action->targetList, |
3809 | 0 | true, |
3810 | 0 | action->updateColnos, |
3811 | 0 | relationDesc, |
3812 | 0 | econtext, |
3813 | 0 | resultRelInfo->ri_newTupleSlot, |
3814 | 0 | &mtstate->ps); |
3815 | 0 | mtstate->mt_merge_subcommands |= MERGE_UPDATE; |
3816 | 0 | break; |
3817 | 0 | case CMD_DELETE: |
3818 | 0 | mtstate->mt_merge_subcommands |= MERGE_DELETE; |
3819 | 0 | break; |
3820 | 0 | case CMD_NOTHING: |
3821 | 0 | break; |
3822 | 0 | default: |
3823 | 0 | elog(ERROR, "unknown action in MERGE WHEN clause"); |
3824 | 0 | break; |
3825 | 0 | } |
3826 | 0 | } |
3827 | 0 | } |
3828 | | |
3829 | | /* |
3830 | | * If the MERGE targets an inherited table, any INSERT actions will use |
3831 | | * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array. |
3832 | | * Therefore we must initialize its WITH CHECK OPTION constraints and |
3833 | | * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo |
3834 | | * entries. |
3835 | | * |
3836 | | * Note that the planner does not build a withCheckOptionList or |
3837 | | * returningList for the root relation, but as in ExecInitPartitionInfo, |
3838 | | * we can use the first resultRelInfo entry as a reference to calculate |
3839 | | * the attno's for the root table. |
3840 | | */ |
3841 | 0 | if (rootRelInfo != mtstate->resultRelInfo && |
3842 | 0 | rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE && |
3843 | 0 | (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0) |
3844 | 0 | { |
3845 | 0 | ModifyTable *node = (ModifyTable *) mtstate->ps.plan; |
3846 | 0 | Relation rootRelation = rootRelInfo->ri_RelationDesc; |
3847 | 0 | Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc; |
3848 | 0 | int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; |
3849 | 0 | AttrMap *part_attmap = NULL; |
3850 | 0 | bool found_whole_row; |
3851 | |
|
3852 | 0 | if (node->withCheckOptionLists != NIL) |
3853 | 0 | { |
3854 | 0 | List *wcoList; |
3855 | 0 | List *wcoExprs = NIL; |
3856 | | |
3857 | | /* There should be as many WCO lists as result rels */ |
3858 | 0 | Assert(list_length(node->withCheckOptionLists) == |
3859 | 0 | list_length(node->resultRelations)); |
3860 | | |
3861 | | /* |
3862 | | * Use the first WCO list as a reference. In the most common case, |
3863 | | * this will be for the same relation as rootRelInfo, and so there |
3864 | | * will be no need to adjust its attno's. |
3865 | | */ |
3866 | 0 | wcoList = linitial(node->withCheckOptionLists); |
3867 | 0 | if (rootRelation != firstResultRel) |
3868 | 0 | { |
3869 | | /* Convert any Vars in it to contain the root's attno's */ |
3870 | 0 | part_attmap = |
3871 | 0 | build_attrmap_by_name(RelationGetDescr(rootRelation), |
3872 | 0 | RelationGetDescr(firstResultRel), |
3873 | 0 | false); |
3874 | |
|
3875 | 0 | wcoList = (List *) |
3876 | 0 | map_variable_attnos((Node *) wcoList, |
3877 | 0 | firstVarno, 0, |
3878 | 0 | part_attmap, |
3879 | 0 | RelationGetForm(rootRelation)->reltype, |
3880 | 0 | &found_whole_row); |
3881 | 0 | } |
3882 | |
|
3883 | 0 | foreach(lc, wcoList) |
3884 | 0 | { |
3885 | 0 | WithCheckOption *wco = lfirst_node(WithCheckOption, lc); |
3886 | 0 | ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual), |
3887 | 0 | &mtstate->ps); |
3888 | |
|
3889 | 0 | wcoExprs = lappend(wcoExprs, wcoExpr); |
3890 | 0 | } |
3891 | |
|
3892 | 0 | rootRelInfo->ri_WithCheckOptions = wcoList; |
3893 | 0 | rootRelInfo->ri_WithCheckOptionExprs = wcoExprs; |
3894 | 0 | } |
3895 | |
|
3896 | 0 | if (node->returningLists != NIL) |
3897 | 0 | { |
3898 | 0 | List *returningList; |
3899 | | |
3900 | | /* There should be as many returning lists as result rels */ |
3901 | 0 | Assert(list_length(node->returningLists) == |
3902 | 0 | list_length(node->resultRelations)); |
3903 | | |
3904 | | /* |
3905 | | * Use the first returning list as a reference. In the most common |
3906 | | * case, this will be for the same relation as rootRelInfo, and so |
3907 | | * there will be no need to adjust its attno's. |
3908 | | */ |
3909 | 0 | returningList = linitial(node->returningLists); |
3910 | 0 | if (rootRelation != firstResultRel) |
3911 | 0 | { |
3912 | | /* Convert any Vars in it to contain the root's attno's */ |
3913 | 0 | if (part_attmap == NULL) |
3914 | 0 | part_attmap = |
3915 | 0 | build_attrmap_by_name(RelationGetDescr(rootRelation), |
3916 | 0 | RelationGetDescr(firstResultRel), |
3917 | 0 | false); |
3918 | |
|
3919 | 0 | returningList = (List *) |
3920 | 0 | map_variable_attnos((Node *) returningList, |
3921 | 0 | firstVarno, 0, |
3922 | 0 | part_attmap, |
3923 | 0 | RelationGetForm(rootRelation)->reltype, |
3924 | 0 | &found_whole_row); |
3925 | 0 | } |
3926 | 0 | rootRelInfo->ri_returningList = returningList; |
3927 | | |
3928 | | /* Initialize the RETURNING projection */ |
3929 | 0 | rootRelInfo->ri_projectReturning = |
3930 | 0 | ExecBuildProjectionInfo(returningList, econtext, |
3931 | 0 | mtstate->ps.ps_ResultTupleSlot, |
3932 | 0 | &mtstate->ps, |
3933 | 0 | RelationGetDescr(rootRelation)); |
3934 | 0 | } |
3935 | 0 | } |
3936 | 0 | } |
3937 | | |
3938 | | /* |
3939 | | * Initializes the tuple slots in a ResultRelInfo for any MERGE action. |
3940 | | * |
3941 | | * We mark 'projectNewInfoValid' even though the projections themselves |
3942 | | * are not initialized here. |
3943 | | */ |
3944 | | void |
3945 | | ExecInitMergeTupleSlots(ModifyTableState *mtstate, |
3946 | | ResultRelInfo *resultRelInfo) |
3947 | 0 | { |
3948 | 0 | EState *estate = mtstate->ps.state; |
3949 | |
|
3950 | 0 | Assert(!resultRelInfo->ri_projectNewInfoValid); |
3951 | |
|
3952 | 0 | resultRelInfo->ri_oldTupleSlot = |
3953 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
3954 | 0 | &estate->es_tupleTable); |
3955 | 0 | resultRelInfo->ri_newTupleSlot = |
3956 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
3957 | 0 | &estate->es_tupleTable); |
3958 | 0 | resultRelInfo->ri_projectNewInfoValid = true; |
3959 | 0 | } |
3960 | | |
3961 | | /* |
3962 | | * Process BEFORE EACH STATEMENT triggers |
3963 | | */ |
3964 | | static void |
3965 | | fireBSTriggers(ModifyTableState *node) |
3966 | 0 | { |
3967 | 0 | ModifyTable *plan = (ModifyTable *) node->ps.plan; |
3968 | 0 | ResultRelInfo *resultRelInfo = node->rootResultRelInfo; |
3969 | |
|
3970 | 0 | switch (node->operation) |
3971 | 0 | { |
3972 | 0 | case CMD_INSERT: |
3973 | 0 | ExecBSInsertTriggers(node->ps.state, resultRelInfo); |
3974 | 0 | if (plan->onConflictAction == ONCONFLICT_UPDATE) |
3975 | 0 | ExecBSUpdateTriggers(node->ps.state, |
3976 | 0 | resultRelInfo); |
3977 | 0 | break; |
3978 | 0 | case CMD_UPDATE: |
3979 | 0 | ExecBSUpdateTriggers(node->ps.state, resultRelInfo); |
3980 | 0 | break; |
3981 | 0 | case CMD_DELETE: |
3982 | 0 | ExecBSDeleteTriggers(node->ps.state, resultRelInfo); |
3983 | 0 | break; |
3984 | 0 | case CMD_MERGE: |
3985 | 0 | if (node->mt_merge_subcommands & MERGE_INSERT) |
3986 | 0 | ExecBSInsertTriggers(node->ps.state, resultRelInfo); |
3987 | 0 | if (node->mt_merge_subcommands & MERGE_UPDATE) |
3988 | 0 | ExecBSUpdateTriggers(node->ps.state, resultRelInfo); |
3989 | 0 | if (node->mt_merge_subcommands & MERGE_DELETE) |
3990 | 0 | ExecBSDeleteTriggers(node->ps.state, resultRelInfo); |
3991 | 0 | break; |
3992 | 0 | default: |
3993 | 0 | elog(ERROR, "unknown operation"); |
3994 | 0 | break; |
3995 | 0 | } |
3996 | 0 | } |
3997 | | |
3998 | | /* |
3999 | | * Process AFTER EACH STATEMENT triggers |
4000 | | */ |
4001 | | static void |
4002 | | fireASTriggers(ModifyTableState *node) |
4003 | 0 | { |
4004 | 0 | ModifyTable *plan = (ModifyTable *) node->ps.plan; |
4005 | 0 | ResultRelInfo *resultRelInfo = node->rootResultRelInfo; |
4006 | |
|
4007 | 0 | switch (node->operation) |
4008 | 0 | { |
4009 | 0 | case CMD_INSERT: |
4010 | 0 | if (plan->onConflictAction == ONCONFLICT_UPDATE) |
4011 | 0 | ExecASUpdateTriggers(node->ps.state, |
4012 | 0 | resultRelInfo, |
4013 | 0 | node->mt_oc_transition_capture); |
4014 | 0 | ExecASInsertTriggers(node->ps.state, resultRelInfo, |
4015 | 0 | node->mt_transition_capture); |
4016 | 0 | break; |
4017 | 0 | case CMD_UPDATE: |
4018 | 0 | ExecASUpdateTriggers(node->ps.state, resultRelInfo, |
4019 | 0 | node->mt_transition_capture); |
4020 | 0 | break; |
4021 | 0 | case CMD_DELETE: |
4022 | 0 | ExecASDeleteTriggers(node->ps.state, resultRelInfo, |
4023 | 0 | node->mt_transition_capture); |
4024 | 0 | break; |
4025 | 0 | case CMD_MERGE: |
4026 | 0 | if (node->mt_merge_subcommands & MERGE_DELETE) |
4027 | 0 | ExecASDeleteTriggers(node->ps.state, resultRelInfo, |
4028 | 0 | node->mt_transition_capture); |
4029 | 0 | if (node->mt_merge_subcommands & MERGE_UPDATE) |
4030 | 0 | ExecASUpdateTriggers(node->ps.state, resultRelInfo, |
4031 | 0 | node->mt_transition_capture); |
4032 | 0 | if (node->mt_merge_subcommands & MERGE_INSERT) |
4033 | 0 | ExecASInsertTriggers(node->ps.state, resultRelInfo, |
4034 | 0 | node->mt_transition_capture); |
4035 | 0 | break; |
4036 | 0 | default: |
4037 | 0 | elog(ERROR, "unknown operation"); |
4038 | 0 | break; |
4039 | 0 | } |
4040 | 0 | } |
4041 | | |
4042 | | /* |
4043 | | * Set up the state needed for collecting transition tuples for AFTER |
4044 | | * triggers. |
4045 | | */ |
4046 | | static void |
4047 | | ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) |
4048 | 0 | { |
4049 | 0 | ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; |
4050 | 0 | ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo; |
4051 | | |
4052 | | /* Check for transition tables on the directly targeted relation. */ |
4053 | 0 | mtstate->mt_transition_capture = |
4054 | 0 | MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc, |
4055 | 0 | RelationGetRelid(targetRelInfo->ri_RelationDesc), |
4056 | 0 | mtstate->operation); |
4057 | 0 | if (plan->operation == CMD_INSERT && |
4058 | 0 | plan->onConflictAction == ONCONFLICT_UPDATE) |
4059 | 0 | mtstate->mt_oc_transition_capture = |
4060 | 0 | MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc, |
4061 | 0 | RelationGetRelid(targetRelInfo->ri_RelationDesc), |
4062 | 0 | CMD_UPDATE); |
4063 | 0 | } |
4064 | | |
4065 | | /* |
4066 | | * ExecPrepareTupleRouting --- prepare for routing one tuple |
4067 | | * |
4068 | | * Determine the partition in which the tuple in slot is to be inserted, |
4069 | | * and return its ResultRelInfo in *partRelInfo. The return value is |
4070 | | * a slot holding the tuple of the partition rowtype. |
4071 | | * |
4072 | | * This also sets the transition table information in mtstate based on the |
4073 | | * selected partition. |
4074 | | */ |
4075 | | static TupleTableSlot * |
4076 | | ExecPrepareTupleRouting(ModifyTableState *mtstate, |
4077 | | EState *estate, |
4078 | | PartitionTupleRouting *proute, |
4079 | | ResultRelInfo *targetRelInfo, |
4080 | | TupleTableSlot *slot, |
4081 | | ResultRelInfo **partRelInfo) |
4082 | 0 | { |
4083 | 0 | ResultRelInfo *partrel; |
4084 | 0 | TupleConversionMap *map; |
4085 | | |
4086 | | /* |
4087 | | * Lookup the target partition's ResultRelInfo. If ExecFindPartition does |
4088 | | * not find a valid partition for the tuple in 'slot' then an error is |
4089 | | * raised. An error may also be raised if the found partition is not a |
4090 | | * valid target for INSERTs. This is required since a partitioned table |
4091 | | * UPDATE to another partition becomes a DELETE+INSERT. |
4092 | | */ |
4093 | 0 | partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate); |
4094 | | |
4095 | | /* |
4096 | | * If we're capturing transition tuples, we might need to convert from the |
4097 | | * partition rowtype to root partitioned table's rowtype. But if there |
4098 | | * are no BEFORE triggers on the partition that could change the tuple, we |
4099 | | * can just remember the original unconverted tuple to avoid a needless |
4100 | | * round trip conversion. |
4101 | | */ |
4102 | 0 | if (mtstate->mt_transition_capture != NULL) |
4103 | 0 | { |
4104 | 0 | bool has_before_insert_row_trig; |
4105 | |
|
4106 | 0 | has_before_insert_row_trig = (partrel->ri_TrigDesc && |
4107 | 0 | partrel->ri_TrigDesc->trig_insert_before_row); |
4108 | |
|
4109 | 0 | mtstate->mt_transition_capture->tcs_original_insert_tuple = |
4110 | 0 | !has_before_insert_row_trig ? slot : NULL; |
4111 | 0 | } |
4112 | | |
4113 | | /* |
4114 | | * Convert the tuple, if necessary. |
4115 | | */ |
4116 | 0 | map = ExecGetRootToChildMap(partrel, estate); |
4117 | 0 | if (map != NULL) |
4118 | 0 | { |
4119 | 0 | TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot; |
4120 | |
|
4121 | 0 | slot = execute_attr_map_slot(map->attrMap, slot, new_slot); |
4122 | 0 | } |
4123 | |
|
4124 | 0 | *partRelInfo = partrel; |
4125 | 0 | return slot; |
4126 | 0 | } |
4127 | | |
4128 | | /* ---------------------------------------------------------------- |
4129 | | * ExecModifyTable |
4130 | | * |
4131 | | * Perform table modifications as required, and return RETURNING results |
4132 | | * if needed. |
4133 | | * ---------------------------------------------------------------- |
4134 | | */ |
4135 | | static TupleTableSlot * |
4136 | | ExecModifyTable(PlanState *pstate) |
4137 | 0 | { |
4138 | 0 | ModifyTableState *node = castNode(ModifyTableState, pstate); |
4139 | 0 | ModifyTableContext context; |
4140 | 0 | EState *estate = node->ps.state; |
4141 | 0 | CmdType operation = node->operation; |
4142 | 0 | ResultRelInfo *resultRelInfo; |
4143 | 0 | PlanState *subplanstate; |
4144 | 0 | TupleTableSlot *slot; |
4145 | 0 | TupleTableSlot *oldSlot; |
4146 | 0 | ItemPointerData tuple_ctid; |
4147 | 0 | HeapTupleData oldtupdata; |
4148 | 0 | HeapTuple oldtuple; |
4149 | 0 | ItemPointer tupleid; |
4150 | 0 | bool tuplock; |
4151 | |
|
4152 | 0 | CHECK_FOR_INTERRUPTS(); |
4153 | | |
4154 | | /* |
4155 | | * This should NOT get called during EvalPlanQual; we should have passed a |
4156 | | * subplan tree to EvalPlanQual, instead. Use a runtime test not just |
4157 | | * Assert because this condition is easy to miss in testing. (Note: |
4158 | | * although ModifyTable should not get executed within an EvalPlanQual |
4159 | | * operation, we do have to allow it to be initialized and shut down in |
4160 | | * case it is within a CTE subplan. Hence this test must be here, not in |
4161 | | * ExecInitModifyTable.) |
4162 | | */ |
4163 | 0 | if (estate->es_epq_active != NULL) |
4164 | 0 | elog(ERROR, "ModifyTable should not be called during EvalPlanQual"); |
4165 | | |
4166 | | /* |
4167 | | * If we've already completed processing, don't try to do more. We need |
4168 | | * this test because ExecPostprocessPlan might call us an extra time, and |
4169 | | * our subplan's nodes aren't necessarily robust against being called |
4170 | | * extra times. |
4171 | | */ |
4172 | 0 | if (node->mt_done) |
4173 | 0 | return NULL; |
4174 | | |
4175 | | /* |
4176 | | * On first call, fire BEFORE STATEMENT triggers before proceeding. |
4177 | | */ |
4178 | 0 | if (node->fireBSTriggers) |
4179 | 0 | { |
4180 | 0 | fireBSTriggers(node); |
4181 | 0 | node->fireBSTriggers = false; |
4182 | 0 | } |
4183 | | |
4184 | | /* Preload local variables */ |
4185 | 0 | resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; |
4186 | 0 | subplanstate = outerPlanState(node); |
4187 | | |
4188 | | /* Set global context */ |
4189 | 0 | context.mtstate = node; |
4190 | 0 | context.epqstate = &node->mt_epqstate; |
4191 | 0 | context.estate = estate; |
4192 | | |
4193 | | /* |
4194 | | * Fetch rows from subplan, and execute the required table modification |
4195 | | * for each row. |
4196 | | */ |
4197 | 0 | for (;;) |
4198 | 0 | { |
4199 | | /* |
4200 | | * Reset the per-output-tuple exprcontext. This is needed because |
4201 | | * triggers expect to use that context as workspace. It's a bit ugly |
4202 | | * to do this below the top level of the plan, however. We might need |
4203 | | * to rethink this later. |
4204 | | */ |
4205 | 0 | ResetPerTupleExprContext(estate); |
4206 | | |
4207 | | /* |
4208 | | * Reset per-tuple memory context used for processing on conflict and |
4209 | | * returning clauses, to free any expression evaluation storage |
4210 | | * allocated in the previous cycle. |
4211 | | */ |
4212 | 0 | if (pstate->ps_ExprContext) |
4213 | 0 | ResetExprContext(pstate->ps_ExprContext); |
4214 | | |
4215 | | /* |
4216 | | * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action |
4217 | | * to execute, do so now --- see the comments in ExecMerge(). |
4218 | | */ |
4219 | 0 | if (node->mt_merge_pending_not_matched != NULL) |
4220 | 0 | { |
4221 | 0 | context.planSlot = node->mt_merge_pending_not_matched; |
4222 | 0 | context.cpDeletedSlot = NULL; |
4223 | |
|
4224 | 0 | slot = ExecMergeNotMatched(&context, node->resultRelInfo, |
4225 | 0 | node->canSetTag); |
4226 | | |
4227 | | /* Clear the pending action */ |
4228 | 0 | node->mt_merge_pending_not_matched = NULL; |
4229 | | |
4230 | | /* |
4231 | | * If we got a RETURNING result, return it to the caller. We'll |
4232 | | * continue the work on next call. |
4233 | | */ |
4234 | 0 | if (slot) |
4235 | 0 | return slot; |
4236 | | |
4237 | 0 | continue; /* continue with the next tuple */ |
4238 | 0 | } |
4239 | | |
4240 | | /* Fetch the next row from subplan */ |
4241 | 0 | context.planSlot = ExecProcNode(subplanstate); |
4242 | 0 | context.cpDeletedSlot = NULL; |
4243 | | |
4244 | | /* No more tuples to process? */ |
4245 | 0 | if (TupIsNull(context.planSlot)) |
4246 | 0 | break; |
4247 | | |
4248 | | /* |
4249 | | * When there are multiple result relations, each tuple contains a |
4250 | | * junk column that gives the OID of the rel from which it came. |
4251 | | * Extract it and select the correct result relation. |
4252 | | */ |
4253 | 0 | if (AttributeNumberIsValid(node->mt_resultOidAttno)) |
4254 | 0 | { |
4255 | 0 | Datum datum; |
4256 | 0 | bool isNull; |
4257 | 0 | Oid resultoid; |
4258 | |
|
4259 | 0 | datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, |
4260 | 0 | &isNull); |
4261 | 0 | if (isNull) |
4262 | 0 | { |
4263 | | /* |
4264 | | * For commands other than MERGE, any tuples having InvalidOid |
4265 | | * for tableoid are errors. For MERGE, we may need to handle |
4266 | | * them as WHEN NOT MATCHED clauses if any, so do that. |
4267 | | * |
4268 | | * Note that we use the node's toplevel resultRelInfo, not any |
4269 | | * specific partition's. |
4270 | | */ |
4271 | 0 | if (operation == CMD_MERGE) |
4272 | 0 | { |
4273 | 0 | EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot); |
4274 | |
|
4275 | 0 | slot = ExecMerge(&context, node->resultRelInfo, |
4276 | 0 | NULL, NULL, node->canSetTag); |
4277 | | |
4278 | | /* |
4279 | | * If we got a RETURNING result, return it to the caller. |
4280 | | * We'll continue the work on next call. |
4281 | | */ |
4282 | 0 | if (slot) |
4283 | 0 | return slot; |
4284 | | |
4285 | 0 | continue; /* continue with the next tuple */ |
4286 | 0 | } |
4287 | | |
4288 | 0 | elog(ERROR, "tableoid is NULL"); |
4289 | 0 | } |
4290 | 0 | resultoid = DatumGetObjectId(datum); |
4291 | | |
4292 | | /* If it's not the same as last time, we need to locate the rel */ |
4293 | 0 | if (resultoid != node->mt_lastResultOid) |
4294 | 0 | resultRelInfo = ExecLookupResultRelByOid(node, resultoid, |
4295 | 0 | false, true); |
4296 | 0 | } |
4297 | | |
4298 | | /* |
4299 | | * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do |
4300 | | * here is compute the RETURNING expressions. |
4301 | | */ |
4302 | 0 | if (resultRelInfo->ri_usesFdwDirectModify) |
4303 | 0 | { |
4304 | 0 | Assert(resultRelInfo->ri_projectReturning); |
4305 | | |
4306 | | /* |
4307 | | * A scan slot containing the data that was actually inserted, |
4308 | | * updated or deleted has already been made available to |
4309 | | * ExecProcessReturning by IterateDirectModify, so no need to |
4310 | | * provide it here. The individual old and new slots are not |
4311 | | * needed, since direct-modify is disabled if the RETURNING list |
4312 | | * refers to OLD/NEW values. |
4313 | | */ |
4314 | 0 | Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 && |
4315 | 0 | (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0); |
4316 | |
|
4317 | 0 | slot = ExecProcessReturning(&context, resultRelInfo, operation, |
4318 | 0 | NULL, NULL, context.planSlot); |
4319 | |
|
4320 | 0 | return slot; |
4321 | 0 | } |
4322 | | |
4323 | 0 | EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot); |
4324 | 0 | slot = context.planSlot; |
4325 | |
|
4326 | 0 | tupleid = NULL; |
4327 | 0 | oldtuple = NULL; |
4328 | | |
4329 | | /* |
4330 | | * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple |
4331 | | * to be updated/deleted/merged. For a heap relation, that's a TID; |
4332 | | * otherwise we may have a wholerow junk attr that carries the old |
4333 | | * tuple in toto. Keep this in step with the part of |
4334 | | * ExecInitModifyTable that sets up ri_RowIdAttNo. |
4335 | | */ |
4336 | 0 | if (operation == CMD_UPDATE || operation == CMD_DELETE || |
4337 | 0 | operation == CMD_MERGE) |
4338 | 0 | { |
4339 | 0 | char relkind; |
4340 | 0 | Datum datum; |
4341 | 0 | bool isNull; |
4342 | |
|
4343 | 0 | relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; |
4344 | 0 | if (relkind == RELKIND_RELATION || |
4345 | 0 | relkind == RELKIND_MATVIEW || |
4346 | 0 | relkind == RELKIND_PARTITIONED_TABLE) |
4347 | 0 | { |
4348 | | /* ri_RowIdAttNo refers to a ctid attribute */ |
4349 | 0 | Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); |
4350 | 0 | datum = ExecGetJunkAttribute(slot, |
4351 | 0 | resultRelInfo->ri_RowIdAttNo, |
4352 | 0 | &isNull); |
4353 | | |
4354 | | /* |
4355 | | * For commands other than MERGE, any tuples having a null row |
4356 | | * identifier are errors. For MERGE, we may need to handle |
4357 | | * them as WHEN NOT MATCHED clauses if any, so do that. |
4358 | | * |
4359 | | * Note that we use the node's toplevel resultRelInfo, not any |
4360 | | * specific partition's. |
4361 | | */ |
4362 | 0 | if (isNull) |
4363 | 0 | { |
4364 | 0 | if (operation == CMD_MERGE) |
4365 | 0 | { |
4366 | 0 | EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot); |
4367 | |
|
4368 | 0 | slot = ExecMerge(&context, node->resultRelInfo, |
4369 | 0 | NULL, NULL, node->canSetTag); |
4370 | | |
4371 | | /* |
4372 | | * If we got a RETURNING result, return it to the |
4373 | | * caller. We'll continue the work on next call. |
4374 | | */ |
4375 | 0 | if (slot) |
4376 | 0 | return slot; |
4377 | | |
4378 | 0 | continue; /* continue with the next tuple */ |
4379 | 0 | } |
4380 | | |
4381 | 0 | elog(ERROR, "ctid is NULL"); |
4382 | 0 | } |
4383 | | |
4384 | 0 | tupleid = (ItemPointer) DatumGetPointer(datum); |
4385 | 0 | tuple_ctid = *tupleid; /* be sure we don't free ctid!! */ |
4386 | 0 | tupleid = &tuple_ctid; |
4387 | 0 | } |
4388 | | |
4389 | | /* |
4390 | | * Use the wholerow attribute, when available, to reconstruct the |
4391 | | * old relation tuple. The old tuple serves one or both of two |
4392 | | * purposes: 1) it serves as the OLD tuple for row triggers, 2) it |
4393 | | * provides values for any unchanged columns for the NEW tuple of |
4394 | | * an UPDATE, because the subplan does not produce all the columns |
4395 | | * of the target table. |
4396 | | * |
4397 | | * Note that the wholerow attribute does not carry system columns, |
4398 | | * so foreign table triggers miss seeing those, except that we |
4399 | | * know enough here to set t_tableOid. Quite separately from |
4400 | | * this, the FDW may fetch its own junk attrs to identify the row. |
4401 | | * |
4402 | | * Other relevant relkinds, currently limited to views, always |
4403 | | * have a wholerow attribute. |
4404 | | */ |
4405 | 0 | else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) |
4406 | 0 | { |
4407 | 0 | datum = ExecGetJunkAttribute(slot, |
4408 | 0 | resultRelInfo->ri_RowIdAttNo, |
4409 | 0 | &isNull); |
4410 | | |
4411 | | /* |
4412 | | * For commands other than MERGE, any tuples having a null row |
4413 | | * identifier are errors. For MERGE, we may need to handle |
4414 | | * them as WHEN NOT MATCHED clauses if any, so do that. |
4415 | | * |
4416 | | * Note that we use the node's toplevel resultRelInfo, not any |
4417 | | * specific partition's. |
4418 | | */ |
4419 | 0 | if (isNull) |
4420 | 0 | { |
4421 | 0 | if (operation == CMD_MERGE) |
4422 | 0 | { |
4423 | 0 | EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot); |
4424 | |
|
4425 | 0 | slot = ExecMerge(&context, node->resultRelInfo, |
4426 | 0 | NULL, NULL, node->canSetTag); |
4427 | | |
4428 | | /* |
4429 | | * If we got a RETURNING result, return it to the |
4430 | | * caller. We'll continue the work on next call. |
4431 | | */ |
4432 | 0 | if (slot) |
4433 | 0 | return slot; |
4434 | | |
4435 | 0 | continue; /* continue with the next tuple */ |
4436 | 0 | } |
4437 | | |
4438 | 0 | elog(ERROR, "wholerow is NULL"); |
4439 | 0 | } |
4440 | | |
4441 | 0 | oldtupdata.t_data = DatumGetHeapTupleHeader(datum); |
4442 | 0 | oldtupdata.t_len = |
4443 | 0 | HeapTupleHeaderGetDatumLength(oldtupdata.t_data); |
4444 | 0 | ItemPointerSetInvalid(&(oldtupdata.t_self)); |
4445 | | /* Historically, view triggers see invalid t_tableOid. */ |
4446 | 0 | oldtupdata.t_tableOid = |
4447 | 0 | (relkind == RELKIND_VIEW) ? InvalidOid : |
4448 | 0 | RelationGetRelid(resultRelInfo->ri_RelationDesc); |
4449 | |
|
4450 | 0 | oldtuple = &oldtupdata; |
4451 | 0 | } |
4452 | 0 | else |
4453 | 0 | { |
4454 | | /* Only foreign tables are allowed to omit a row-ID attr */ |
4455 | 0 | Assert(relkind == RELKIND_FOREIGN_TABLE); |
4456 | 0 | } |
4457 | 0 | } |
4458 | | |
4459 | 0 | switch (operation) |
4460 | 0 | { |
4461 | 0 | case CMD_INSERT: |
4462 | | /* Initialize projection info if first time for this table */ |
4463 | 0 | if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) |
4464 | 0 | ExecInitInsertProjection(node, resultRelInfo); |
4465 | 0 | slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); |
4466 | 0 | slot = ExecInsert(&context, resultRelInfo, slot, |
4467 | 0 | node->canSetTag, NULL, NULL); |
4468 | 0 | break; |
4469 | | |
4470 | 0 | case CMD_UPDATE: |
4471 | 0 | tuplock = false; |
4472 | | |
4473 | | /* Initialize projection info if first time for this table */ |
4474 | 0 | if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) |
4475 | 0 | ExecInitUpdateProjection(node, resultRelInfo); |
4476 | | |
4477 | | /* |
4478 | | * Make the new tuple by combining plan's output tuple with |
4479 | | * the old tuple being updated. |
4480 | | */ |
4481 | 0 | oldSlot = resultRelInfo->ri_oldTupleSlot; |
4482 | 0 | if (oldtuple != NULL) |
4483 | 0 | { |
4484 | 0 | Assert(!resultRelInfo->ri_needLockTagTuple); |
4485 | | /* Use the wholerow junk attr as the old tuple. */ |
4486 | 0 | ExecForceStoreHeapTuple(oldtuple, oldSlot, false); |
4487 | 0 | } |
4488 | 0 | else |
4489 | 0 | { |
4490 | | /* Fetch the most recent version of old tuple. */ |
4491 | 0 | Relation relation = resultRelInfo->ri_RelationDesc; |
4492 | |
|
4493 | 0 | if (resultRelInfo->ri_needLockTagTuple) |
4494 | 0 | { |
4495 | 0 | LockTuple(relation, tupleid, InplaceUpdateTupleLock); |
4496 | 0 | tuplock = true; |
4497 | 0 | } |
4498 | 0 | if (!table_tuple_fetch_row_version(relation, tupleid, |
4499 | 0 | SnapshotAny, |
4500 | 0 | oldSlot)) |
4501 | 0 | elog(ERROR, "failed to fetch tuple being updated"); |
4502 | 0 | } |
4503 | 0 | slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, |
4504 | 0 | oldSlot); |
4505 | | |
4506 | | /* Now apply the update. */ |
4507 | 0 | slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, |
4508 | 0 | oldSlot, slot, node->canSetTag); |
4509 | 0 | if (tuplock) |
4510 | 0 | UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid, |
4511 | 0 | InplaceUpdateTupleLock); |
4512 | 0 | break; |
4513 | | |
4514 | 0 | case CMD_DELETE: |
4515 | 0 | slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, |
4516 | 0 | true, false, node->canSetTag, NULL, NULL, NULL); |
4517 | 0 | break; |
4518 | | |
4519 | 0 | case CMD_MERGE: |
4520 | 0 | slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple, |
4521 | 0 | node->canSetTag); |
4522 | 0 | break; |
4523 | | |
4524 | 0 | default: |
4525 | 0 | elog(ERROR, "unknown operation"); |
4526 | 0 | break; |
4527 | 0 | } |
4528 | | |
4529 | | /* |
4530 | | * If we got a RETURNING result, return it to caller. We'll continue |
4531 | | * the work on next call. |
4532 | | */ |
4533 | 0 | if (slot) |
4534 | 0 | return slot; |
4535 | 0 | } |
4536 | | |
4537 | | /* |
4538 | | * Insert remaining tuples for batch insert. |
4539 | | */ |
4540 | 0 | if (estate->es_insert_pending_result_relations != NIL) |
4541 | 0 | ExecPendingInserts(estate); |
4542 | | |
4543 | | /* |
4544 | | * We're done, but fire AFTER STATEMENT triggers before exiting. |
4545 | | */ |
4546 | 0 | fireASTriggers(node); |
4547 | |
|
4548 | 0 | node->mt_done = true; |
4549 | |
|
4550 | 0 | return NULL; |
4551 | 0 | } |
4552 | | |
4553 | | /* |
4554 | | * ExecLookupResultRelByOid |
4555 | | * If the table with given OID is among the result relations to be |
4556 | | * updated by the given ModifyTable node, return its ResultRelInfo. |
4557 | | * |
4558 | | * If not found, return NULL if missing_ok, else raise error. |
4559 | | * |
4560 | | * If update_cache is true, then upon successful lookup, update the node's |
4561 | | * one-element cache. ONLY ExecModifyTable may pass true for this. |
4562 | | */ |
4563 | | ResultRelInfo * |
4564 | | ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, |
4565 | | bool missing_ok, bool update_cache) |
4566 | 0 | { |
4567 | 0 | if (node->mt_resultOidHash) |
4568 | 0 | { |
4569 | | /* Use the pre-built hash table to locate the rel */ |
4570 | 0 | MTTargetRelLookup *mtlookup; |
4571 | |
|
4572 | 0 | mtlookup = (MTTargetRelLookup *) |
4573 | 0 | hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL); |
4574 | 0 | if (mtlookup) |
4575 | 0 | { |
4576 | 0 | if (update_cache) |
4577 | 0 | { |
4578 | 0 | node->mt_lastResultOid = resultoid; |
4579 | 0 | node->mt_lastResultIndex = mtlookup->relationIndex; |
4580 | 0 | } |
4581 | 0 | return node->resultRelInfo + mtlookup->relationIndex; |
4582 | 0 | } |
4583 | 0 | } |
4584 | 0 | else |
4585 | 0 | { |
4586 | | /* With few target rels, just search the ResultRelInfo array */ |
4587 | 0 | for (int ndx = 0; ndx < node->mt_nrels; ndx++) |
4588 | 0 | { |
4589 | 0 | ResultRelInfo *rInfo = node->resultRelInfo + ndx; |
4590 | |
|
4591 | 0 | if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid) |
4592 | 0 | { |
4593 | 0 | if (update_cache) |
4594 | 0 | { |
4595 | 0 | node->mt_lastResultOid = resultoid; |
4596 | 0 | node->mt_lastResultIndex = ndx; |
4597 | 0 | } |
4598 | 0 | return rInfo; |
4599 | 0 | } |
4600 | 0 | } |
4601 | 0 | } |
4602 | | |
4603 | 0 | if (!missing_ok) |
4604 | 0 | elog(ERROR, "incorrect result relation OID %u", resultoid); |
4605 | 0 | return NULL; |
4606 | 0 | } |
4607 | | |
4608 | | /* ---------------------------------------------------------------- |
4609 | | * ExecInitModifyTable |
4610 | | * ---------------------------------------------------------------- |
4611 | | */ |
4612 | | ModifyTableState * |
4613 | | ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) |
4614 | 0 | { |
4615 | 0 | ModifyTableState *mtstate; |
4616 | 0 | Plan *subplan = outerPlan(node); |
4617 | 0 | CmdType operation = node->operation; |
4618 | 0 | int total_nrels = list_length(node->resultRelations); |
4619 | 0 | int nrels; |
4620 | 0 | List *resultRelations = NIL; |
4621 | 0 | List *withCheckOptionLists = NIL; |
4622 | 0 | List *returningLists = NIL; |
4623 | 0 | List *updateColnosLists = NIL; |
4624 | 0 | List *mergeActionLists = NIL; |
4625 | 0 | List *mergeJoinConditions = NIL; |
4626 | 0 | ResultRelInfo *resultRelInfo; |
4627 | 0 | List *arowmarks; |
4628 | 0 | ListCell *l; |
4629 | 0 | int i; |
4630 | 0 | Relation rel; |
4631 | | |
4632 | | /* check for unsupported flags */ |
4633 | 0 | Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); |
4634 | | |
4635 | | /* |
4636 | | * Only consider unpruned relations for initializing their ResultRelInfo |
4637 | | * struct and other fields such as withCheckOptions, etc. |
4638 | | * |
4639 | | * Note: We must avoid pruning every result relation. This is important |
4640 | | * for MERGE, since even if every result relation is pruned from the |
4641 | | * subplan, there might still be NOT MATCHED rows, for which there may be |
4642 | | * INSERT actions to perform. To allow these actions to be found, at |
4643 | | * least one result relation must be kept. Also, when inserting into a |
4644 | | * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct |
4645 | | * as a reference for building the ResultRelInfo of the target partition. |
4646 | | * In either case, it doesn't matter which result relation is kept, so we |
4647 | | * just keep the first one, if all others have been pruned. See also, |
4648 | | * ExecDoInitialPruning(), which ensures that this first result relation |
4649 | | * has been locked. |
4650 | | */ |
4651 | 0 | i = 0; |
4652 | 0 | foreach(l, node->resultRelations) |
4653 | 0 | { |
4654 | 0 | Index rti = lfirst_int(l); |
4655 | 0 | bool keep_rel; |
4656 | |
|
4657 | 0 | keep_rel = bms_is_member(rti, estate->es_unpruned_relids); |
4658 | 0 | if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL) |
4659 | 0 | { |
4660 | | /* all result relations pruned; keep the first one */ |
4661 | 0 | keep_rel = true; |
4662 | 0 | rti = linitial_int(node->resultRelations); |
4663 | 0 | i = 0; |
4664 | 0 | } |
4665 | |
|
4666 | 0 | if (keep_rel) |
4667 | 0 | { |
4668 | 0 | resultRelations = lappend_int(resultRelations, rti); |
4669 | 0 | if (node->withCheckOptionLists) |
4670 | 0 | { |
4671 | 0 | List *withCheckOptions = list_nth_node(List, |
4672 | 0 | node->withCheckOptionLists, |
4673 | 0 | i); |
4674 | |
|
4675 | 0 | withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions); |
4676 | 0 | } |
4677 | 0 | if (node->returningLists) |
4678 | 0 | { |
4679 | 0 | List *returningList = list_nth_node(List, |
4680 | 0 | node->returningLists, |
4681 | 0 | i); |
4682 | |
|
4683 | 0 | returningLists = lappend(returningLists, returningList); |
4684 | 0 | } |
4685 | 0 | if (node->updateColnosLists) |
4686 | 0 | { |
4687 | 0 | List *updateColnosList = list_nth(node->updateColnosLists, i); |
4688 | |
|
4689 | 0 | updateColnosLists = lappend(updateColnosLists, updateColnosList); |
4690 | 0 | } |
4691 | 0 | if (node->mergeActionLists) |
4692 | 0 | { |
4693 | 0 | List *mergeActionList = list_nth(node->mergeActionLists, i); |
4694 | |
|
4695 | 0 | mergeActionLists = lappend(mergeActionLists, mergeActionList); |
4696 | 0 | } |
4697 | 0 | if (node->mergeJoinConditions) |
4698 | 0 | { |
4699 | 0 | List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i); |
4700 | |
|
4701 | 0 | mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition); |
4702 | 0 | } |
4703 | 0 | } |
4704 | 0 | i++; |
4705 | 0 | } |
4706 | 0 | nrels = list_length(resultRelations); |
4707 | 0 | Assert(nrels > 0); |
4708 | | |
4709 | | /* |
4710 | | * create state structure |
4711 | | */ |
4712 | 0 | mtstate = makeNode(ModifyTableState); |
4713 | 0 | mtstate->ps.plan = (Plan *) node; |
4714 | 0 | mtstate->ps.state = estate; |
4715 | 0 | mtstate->ps.ExecProcNode = ExecModifyTable; |
4716 | |
|
4717 | 0 | mtstate->operation = operation; |
4718 | 0 | mtstate->canSetTag = node->canSetTag; |
4719 | 0 | mtstate->mt_done = false; |
4720 | |
|
4721 | 0 | mtstate->mt_nrels = nrels; |
4722 | 0 | mtstate->resultRelInfo = (ResultRelInfo *) |
4723 | 0 | palloc(nrels * sizeof(ResultRelInfo)); |
4724 | |
|
4725 | 0 | mtstate->mt_merge_pending_not_matched = NULL; |
4726 | 0 | mtstate->mt_merge_inserted = 0; |
4727 | 0 | mtstate->mt_merge_updated = 0; |
4728 | 0 | mtstate->mt_merge_deleted = 0; |
4729 | 0 | mtstate->mt_updateColnosLists = updateColnosLists; |
4730 | 0 | mtstate->mt_mergeActionLists = mergeActionLists; |
4731 | 0 | mtstate->mt_mergeJoinConditions = mergeJoinConditions; |
4732 | | |
4733 | | /*---------- |
4734 | | * Resolve the target relation. This is the same as: |
4735 | | * |
4736 | | * - the relation for which we will fire FOR STATEMENT triggers, |
4737 | | * - the relation into whose tuple format all captured transition tuples |
4738 | | * must be converted, and |
4739 | | * - the root partitioned table used for tuple routing. |
4740 | | * |
4741 | | * If it's a partitioned or inherited table, the root partition or |
4742 | | * appendrel RTE doesn't appear elsewhere in the plan and its RT index is |
4743 | | * given explicitly in node->rootRelation. Otherwise, the target relation |
4744 | | * is the sole relation in the node->resultRelations list and, since it can |
4745 | | * never be pruned, also in the resultRelations list constructed above. |
4746 | | *---------- |
4747 | | */ |
4748 | 0 | if (node->rootRelation > 0) |
4749 | 0 | { |
4750 | 0 | Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids)); |
4751 | 0 | mtstate->rootResultRelInfo = makeNode(ResultRelInfo); |
4752 | 0 | ExecInitResultRelation(estate, mtstate->rootResultRelInfo, |
4753 | 0 | node->rootRelation); |
4754 | 0 | } |
4755 | 0 | else |
4756 | 0 | { |
4757 | 0 | Assert(list_length(node->resultRelations) == 1); |
4758 | 0 | Assert(list_length(resultRelations) == 1); |
4759 | 0 | mtstate->rootResultRelInfo = mtstate->resultRelInfo; |
4760 | 0 | ExecInitResultRelation(estate, mtstate->resultRelInfo, |
4761 | 0 | linitial_int(resultRelations)); |
4762 | 0 | } |
4763 | | |
4764 | | /* set up epqstate with dummy subplan data for the moment */ |
4765 | 0 | EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, |
4766 | 0 | node->epqParam, resultRelations); |
4767 | 0 | mtstate->fireBSTriggers = true; |
4768 | | |
4769 | | /* |
4770 | | * Build state for collecting transition tuples. This requires having a |
4771 | | * valid trigger query context, so skip it in explain-only mode. |
4772 | | */ |
4773 | 0 | if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) |
4774 | 0 | ExecSetupTransitionCaptureState(mtstate, estate); |
4775 | | |
4776 | | /* |
4777 | | * Open all the result relations and initialize the ResultRelInfo structs. |
4778 | | * (But root relation was initialized above, if it's part of the array.) |
4779 | | * We must do this before initializing the subplan, because direct-modify |
4780 | | * FDWs expect their ResultRelInfos to be available. |
4781 | | */ |
4782 | 0 | resultRelInfo = mtstate->resultRelInfo; |
4783 | 0 | i = 0; |
4784 | 0 | foreach(l, resultRelations) |
4785 | 0 | { |
4786 | 0 | Index resultRelation = lfirst_int(l); |
4787 | 0 | List *mergeActions = NIL; |
4788 | |
|
4789 | 0 | if (mergeActionLists) |
4790 | 0 | mergeActions = list_nth(mergeActionLists, i); |
4791 | |
|
4792 | 0 | if (resultRelInfo != mtstate->rootResultRelInfo) |
4793 | 0 | { |
4794 | 0 | ExecInitResultRelation(estate, resultRelInfo, resultRelation); |
4795 | | |
4796 | | /* |
4797 | | * For child result relations, store the root result relation |
4798 | | * pointer. We do so for the convenience of places that want to |
4799 | | * look at the query's original target relation but don't have the |
4800 | | * mtstate handy. |
4801 | | */ |
4802 | 0 | resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo; |
4803 | 0 | } |
4804 | | |
4805 | | /* Initialize the usesFdwDirectModify flag */ |
4806 | 0 | resultRelInfo->ri_usesFdwDirectModify = |
4807 | 0 | bms_is_member(i, node->fdwDirectModifyPlans); |
4808 | | |
4809 | | /* |
4810 | | * Verify result relation is a valid target for the current operation |
4811 | | */ |
4812 | 0 | CheckValidResultRel(resultRelInfo, operation, mergeActions); |
4813 | |
|
4814 | 0 | resultRelInfo++; |
4815 | 0 | i++; |
4816 | 0 | } |
4817 | | |
4818 | | /* |
4819 | | * Now we may initialize the subplan. |
4820 | | */ |
4821 | 0 | outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags); |
4822 | | |
4823 | | /* |
4824 | | * Do additional per-result-relation initialization. |
4825 | | */ |
4826 | 0 | for (i = 0; i < nrels; i++) |
4827 | 0 | { |
4828 | 0 | resultRelInfo = &mtstate->resultRelInfo[i]; |
4829 | | |
4830 | | /* Let FDWs init themselves for foreign-table result rels */ |
4831 | 0 | if (!resultRelInfo->ri_usesFdwDirectModify && |
4832 | 0 | resultRelInfo->ri_FdwRoutine != NULL && |
4833 | 0 | resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL) |
4834 | 0 | { |
4835 | 0 | List *fdw_private = (List *) list_nth(node->fdwPrivLists, i); |
4836 | |
|
4837 | 0 | resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate, |
4838 | 0 | resultRelInfo, |
4839 | 0 | fdw_private, |
4840 | 0 | i, |
4841 | 0 | eflags); |
4842 | 0 | } |
4843 | | |
4844 | | /* |
4845 | | * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either |
4846 | | * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign |
4847 | | * tables, the FDW might have created additional junk attr(s), but |
4848 | | * those are no concern of ours. |
4849 | | */ |
4850 | 0 | if (operation == CMD_UPDATE || operation == CMD_DELETE || |
4851 | 0 | operation == CMD_MERGE) |
4852 | 0 | { |
4853 | 0 | char relkind; |
4854 | |
|
4855 | 0 | relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; |
4856 | 0 | if (relkind == RELKIND_RELATION || |
4857 | 0 | relkind == RELKIND_MATVIEW || |
4858 | 0 | relkind == RELKIND_PARTITIONED_TABLE) |
4859 | 0 | { |
4860 | 0 | resultRelInfo->ri_RowIdAttNo = |
4861 | 0 | ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid"); |
4862 | 0 | if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) |
4863 | 0 | elog(ERROR, "could not find junk ctid column"); |
4864 | 0 | } |
4865 | 0 | else if (relkind == RELKIND_FOREIGN_TABLE) |
4866 | 0 | { |
4867 | | /* |
4868 | | * We don't support MERGE with foreign tables for now. (It's |
4869 | | * problematic because the implementation uses CTID.) |
4870 | | */ |
4871 | 0 | Assert(operation != CMD_MERGE); |
4872 | | |
4873 | | /* |
4874 | | * When there is a row-level trigger, there should be a |
4875 | | * wholerow attribute. We also require it to be present in |
4876 | | * UPDATE and MERGE, so we can get the values of unchanged |
4877 | | * columns. |
4878 | | */ |
4879 | 0 | resultRelInfo->ri_RowIdAttNo = |
4880 | 0 | ExecFindJunkAttributeInTlist(subplan->targetlist, |
4881 | 0 | "wholerow"); |
4882 | 0 | if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) && |
4883 | 0 | !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) |
4884 | 0 | elog(ERROR, "could not find junk wholerow column"); |
4885 | 0 | } |
4886 | 0 | else |
4887 | 0 | { |
4888 | | /* Other valid target relkinds must provide wholerow */ |
4889 | 0 | resultRelInfo->ri_RowIdAttNo = |
4890 | 0 | ExecFindJunkAttributeInTlist(subplan->targetlist, |
4891 | 0 | "wholerow"); |
4892 | 0 | if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) |
4893 | 0 | elog(ERROR, "could not find junk wholerow column"); |
4894 | 0 | } |
4895 | 0 | } |
4896 | 0 | } |
4897 | | |
4898 | | /* |
4899 | | * If this is an inherited update/delete/merge, there will be a junk |
4900 | | * attribute named "tableoid" present in the subplan's targetlist. It |
4901 | | * will be used to identify the result relation for a given tuple to be |
4902 | | * updated/deleted/merged. |
4903 | | */ |
4904 | 0 | mtstate->mt_resultOidAttno = |
4905 | 0 | ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid"); |
4906 | 0 | Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1); |
4907 | 0 | mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */ |
4908 | 0 | mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */ |
4909 | | |
4910 | | /* Get the root target relation */ |
4911 | 0 | rel = mtstate->rootResultRelInfo->ri_RelationDesc; |
4912 | | |
4913 | | /* |
4914 | | * Build state for tuple routing if it's a partitioned INSERT. An UPDATE |
4915 | | * or MERGE might need this too, but only if it actually moves tuples |
4916 | | * between partitions; in that case setup is done by |
4917 | | * ExecCrossPartitionUpdate. |
4918 | | */ |
4919 | 0 | if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && |
4920 | 0 | operation == CMD_INSERT) |
4921 | 0 | mtstate->mt_partition_tuple_routing = |
4922 | 0 | ExecSetupPartitionTupleRouting(estate, rel); |
4923 | | |
4924 | | /* |
4925 | | * Initialize any WITH CHECK OPTION constraints if needed. |
4926 | | */ |
4927 | 0 | resultRelInfo = mtstate->resultRelInfo; |
4928 | 0 | foreach(l, withCheckOptionLists) |
4929 | 0 | { |
4930 | 0 | List *wcoList = (List *) lfirst(l); |
4931 | 0 | List *wcoExprs = NIL; |
4932 | 0 | ListCell *ll; |
4933 | |
|
4934 | 0 | foreach(ll, wcoList) |
4935 | 0 | { |
4936 | 0 | WithCheckOption *wco = (WithCheckOption *) lfirst(ll); |
4937 | 0 | ExprState *wcoExpr = ExecInitQual((List *) wco->qual, |
4938 | 0 | &mtstate->ps); |
4939 | |
|
4940 | 0 | wcoExprs = lappend(wcoExprs, wcoExpr); |
4941 | 0 | } |
4942 | |
|
4943 | 0 | resultRelInfo->ri_WithCheckOptions = wcoList; |
4944 | 0 | resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; |
4945 | 0 | resultRelInfo++; |
4946 | 0 | } |
4947 | | |
4948 | | /* |
4949 | | * Initialize RETURNING projections if needed. |
4950 | | */ |
4951 | 0 | if (returningLists) |
4952 | 0 | { |
4953 | 0 | TupleTableSlot *slot; |
4954 | 0 | ExprContext *econtext; |
4955 | | |
4956 | | /* |
4957 | | * Initialize result tuple slot and assign its rowtype using the plan |
4958 | | * node's declared targetlist, which the planner set up to be the same |
4959 | | * as the first (before runtime pruning) RETURNING list. We assume |
4960 | | * all the result rels will produce compatible output. |
4961 | | */ |
4962 | 0 | ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual); |
4963 | 0 | slot = mtstate->ps.ps_ResultTupleSlot; |
4964 | | |
4965 | | /* Need an econtext too */ |
4966 | 0 | if (mtstate->ps.ps_ExprContext == NULL) |
4967 | 0 | ExecAssignExprContext(estate, &mtstate->ps); |
4968 | 0 | econtext = mtstate->ps.ps_ExprContext; |
4969 | | |
4970 | | /* |
4971 | | * Build a projection for each result rel. |
4972 | | */ |
4973 | 0 | resultRelInfo = mtstate->resultRelInfo; |
4974 | 0 | foreach(l, returningLists) |
4975 | 0 | { |
4976 | 0 | List *rlist = (List *) lfirst(l); |
4977 | |
|
4978 | 0 | resultRelInfo->ri_returningList = rlist; |
4979 | 0 | resultRelInfo->ri_projectReturning = |
4980 | 0 | ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, |
4981 | 0 | resultRelInfo->ri_RelationDesc->rd_att); |
4982 | 0 | resultRelInfo++; |
4983 | 0 | } |
4984 | 0 | } |
4985 | 0 | else |
4986 | 0 | { |
4987 | | /* |
4988 | | * We still must construct a dummy result tuple type, because InitPlan |
4989 | | * expects one (maybe should change that?). |
4990 | | */ |
4991 | 0 | ExecInitResultTypeTL(&mtstate->ps); |
4992 | |
|
4993 | 0 | mtstate->ps.ps_ExprContext = NULL; |
4994 | 0 | } |
4995 | | |
4996 | | /* Set the list of arbiter indexes if needed for ON CONFLICT */ |
4997 | 0 | resultRelInfo = mtstate->resultRelInfo; |
4998 | 0 | if (node->onConflictAction != ONCONFLICT_NONE) |
4999 | 0 | { |
5000 | | /* insert may only have one relation, inheritance is not expanded */ |
5001 | 0 | Assert(total_nrels == 1); |
5002 | 0 | resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes; |
5003 | 0 | } |
5004 | | |
5005 | | /* |
5006 | | * If needed, Initialize target list, projection and qual for ON CONFLICT |
5007 | | * DO UPDATE. |
5008 | | */ |
5009 | 0 | if (node->onConflictAction == ONCONFLICT_UPDATE) |
5010 | 0 | { |
5011 | 0 | OnConflictSetState *onconfl = makeNode(OnConflictSetState); |
5012 | 0 | ExprContext *econtext; |
5013 | 0 | TupleDesc relationDesc; |
5014 | | |
5015 | | /* already exists if created by RETURNING processing above */ |
5016 | 0 | if (mtstate->ps.ps_ExprContext == NULL) |
5017 | 0 | ExecAssignExprContext(estate, &mtstate->ps); |
5018 | |
|
5019 | 0 | econtext = mtstate->ps.ps_ExprContext; |
5020 | 0 | relationDesc = resultRelInfo->ri_RelationDesc->rd_att; |
5021 | | |
5022 | | /* create state for DO UPDATE SET operation */ |
5023 | 0 | resultRelInfo->ri_onConflict = onconfl; |
5024 | | |
5025 | | /* initialize slot for the existing tuple */ |
5026 | 0 | onconfl->oc_Existing = |
5027 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
5028 | 0 | &mtstate->ps.state->es_tupleTable); |
5029 | | |
5030 | | /* |
5031 | | * Create the tuple slot for the UPDATE SET projection. We want a slot |
5032 | | * of the table's type here, because the slot will be used to insert |
5033 | | * into the table, and for RETURNING processing - which may access |
5034 | | * system attributes. |
5035 | | */ |
5036 | 0 | onconfl->oc_ProjSlot = |
5037 | 0 | table_slot_create(resultRelInfo->ri_RelationDesc, |
5038 | 0 | &mtstate->ps.state->es_tupleTable); |
5039 | | |
5040 | | /* build UPDATE SET projection state */ |
5041 | 0 | onconfl->oc_ProjInfo = |
5042 | 0 | ExecBuildUpdateProjection(node->onConflictSet, |
5043 | 0 | true, |
5044 | 0 | node->onConflictCols, |
5045 | 0 | relationDesc, |
5046 | 0 | econtext, |
5047 | 0 | onconfl->oc_ProjSlot, |
5048 | 0 | &mtstate->ps); |
5049 | | |
5050 | | /* initialize state to evaluate the WHERE clause, if any */ |
5051 | 0 | if (node->onConflictWhere) |
5052 | 0 | { |
5053 | 0 | ExprState *qualexpr; |
5054 | |
|
5055 | 0 | qualexpr = ExecInitQual((List *) node->onConflictWhere, |
5056 | 0 | &mtstate->ps); |
5057 | 0 | onconfl->oc_WhereClause = qualexpr; |
5058 | 0 | } |
5059 | 0 | } |
5060 | | |
5061 | | /* |
5062 | | * If we have any secondary relations in an UPDATE or DELETE, they need to |
5063 | | * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the |
5064 | | * EvalPlanQual mechanism needs to be told about them. This also goes for |
5065 | | * the source relations in a MERGE. Locate the relevant ExecRowMarks. |
5066 | | */ |
5067 | 0 | arowmarks = NIL; |
5068 | 0 | foreach(l, node->rowMarks) |
5069 | 0 | { |
5070 | 0 | PlanRowMark *rc = lfirst_node(PlanRowMark, l); |
5071 | 0 | ExecRowMark *erm; |
5072 | 0 | ExecAuxRowMark *aerm; |
5073 | | |
5074 | | /* |
5075 | | * Ignore "parent" rowmarks, because they are irrelevant at runtime. |
5076 | | * Also ignore the rowmarks belonging to child tables that have been |
5077 | | * pruned in ExecDoInitialPruning(). |
5078 | | */ |
5079 | 0 | if (rc->isParent || |
5080 | 0 | !bms_is_member(rc->rti, estate->es_unpruned_relids)) |
5081 | 0 | continue; |
5082 | | |
5083 | | /* Find ExecRowMark and build ExecAuxRowMark */ |
5084 | 0 | erm = ExecFindRowMark(estate, rc->rti, false); |
5085 | 0 | aerm = ExecBuildAuxRowMark(erm, subplan->targetlist); |
5086 | 0 | arowmarks = lappend(arowmarks, aerm); |
5087 | 0 | } |
5088 | | |
5089 | | /* For a MERGE command, initialize its state */ |
5090 | 0 | if (mtstate->operation == CMD_MERGE) |
5091 | 0 | ExecInitMerge(mtstate, estate); |
5092 | |
|
5093 | 0 | EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks); |
5094 | | |
5095 | | /* |
5096 | | * If there are a lot of result relations, use a hash table to speed the |
5097 | | * lookups. If there are not a lot, a simple linear search is faster. |
5098 | | * |
5099 | | * It's not clear where the threshold is, but try 64 for starters. In a |
5100 | | * debugging build, use a small threshold so that we get some test |
5101 | | * coverage of both code paths. |
5102 | | */ |
5103 | | #ifdef USE_ASSERT_CHECKING |
5104 | | #define MT_NRELS_HASH 4 |
5105 | | #else |
5106 | 0 | #define MT_NRELS_HASH 64 |
5107 | 0 | #endif |
5108 | 0 | if (nrels >= MT_NRELS_HASH) |
5109 | 0 | { |
5110 | 0 | HASHCTL hash_ctl; |
5111 | |
|
5112 | 0 | hash_ctl.keysize = sizeof(Oid); |
5113 | 0 | hash_ctl.entrysize = sizeof(MTTargetRelLookup); |
5114 | 0 | hash_ctl.hcxt = CurrentMemoryContext; |
5115 | 0 | mtstate->mt_resultOidHash = |
5116 | 0 | hash_create("ModifyTable target hash", |
5117 | 0 | nrels, &hash_ctl, |
5118 | 0 | HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); |
5119 | 0 | for (i = 0; i < nrels; i++) |
5120 | 0 | { |
5121 | 0 | Oid hashkey; |
5122 | 0 | MTTargetRelLookup *mtlookup; |
5123 | 0 | bool found; |
5124 | |
|
5125 | 0 | resultRelInfo = &mtstate->resultRelInfo[i]; |
5126 | 0 | hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc); |
5127 | 0 | mtlookup = (MTTargetRelLookup *) |
5128 | 0 | hash_search(mtstate->mt_resultOidHash, &hashkey, |
5129 | 0 | HASH_ENTER, &found); |
5130 | 0 | Assert(!found); |
5131 | 0 | mtlookup->relationIndex = i; |
5132 | 0 | } |
5133 | 0 | } |
5134 | 0 | else |
5135 | 0 | mtstate->mt_resultOidHash = NULL; |
5136 | | |
5137 | | /* |
5138 | | * Determine if the FDW supports batch insert and determine the batch size |
5139 | | * (a FDW may support batching, but it may be disabled for the |
5140 | | * server/table). |
5141 | | * |
5142 | | * We only do this for INSERT, so that for UPDATE/DELETE the batch size |
5143 | | * remains set to 0. |
5144 | | */ |
5145 | 0 | if (operation == CMD_INSERT) |
5146 | 0 | { |
5147 | | /* insert may only have one relation, inheritance is not expanded */ |
5148 | 0 | Assert(total_nrels == 1); |
5149 | 0 | resultRelInfo = mtstate->resultRelInfo; |
5150 | 0 | if (!resultRelInfo->ri_usesFdwDirectModify && |
5151 | 0 | resultRelInfo->ri_FdwRoutine != NULL && |
5152 | 0 | resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize && |
5153 | 0 | resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert) |
5154 | 0 | { |
5155 | 0 | resultRelInfo->ri_BatchSize = |
5156 | 0 | resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo); |
5157 | 0 | Assert(resultRelInfo->ri_BatchSize >= 1); |
5158 | 0 | } |
5159 | 0 | else |
5160 | 0 | resultRelInfo->ri_BatchSize = 1; |
5161 | 0 | } |
5162 | | |
5163 | | /* |
5164 | | * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it |
5165 | | * to estate->es_auxmodifytables so that it will be run to completion by |
5166 | | * ExecPostprocessPlan. (It'd actually work fine to add the primary |
5167 | | * ModifyTable node too, but there's no need.) Note the use of lcons not |
5168 | | * lappend: we need later-initialized ModifyTable nodes to be shut down |
5169 | | * before earlier ones. This ensures that we don't throw away RETURNING |
5170 | | * rows that need to be seen by a later CTE subplan. |
5171 | | */ |
5172 | 0 | if (!mtstate->canSetTag) |
5173 | 0 | estate->es_auxmodifytables = lcons(mtstate, |
5174 | 0 | estate->es_auxmodifytables); |
5175 | |
|
5176 | 0 | return mtstate; |
5177 | 0 | } |
5178 | | |
5179 | | /* ---------------------------------------------------------------- |
5180 | | * ExecEndModifyTable |
5181 | | * |
5182 | | * Shuts down the plan. |
5183 | | * |
5184 | | * Returns nothing of interest. |
5185 | | * ---------------------------------------------------------------- |
5186 | | */ |
5187 | | void |
5188 | | ExecEndModifyTable(ModifyTableState *node) |
5189 | 0 | { |
5190 | 0 | int i; |
5191 | | |
5192 | | /* |
5193 | | * Allow any FDWs to shut down |
5194 | | */ |
5195 | 0 | for (i = 0; i < node->mt_nrels; i++) |
5196 | 0 | { |
5197 | 0 | int j; |
5198 | 0 | ResultRelInfo *resultRelInfo = node->resultRelInfo + i; |
5199 | |
|
5200 | 0 | if (!resultRelInfo->ri_usesFdwDirectModify && |
5201 | 0 | resultRelInfo->ri_FdwRoutine != NULL && |
5202 | 0 | resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL) |
5203 | 0 | resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state, |
5204 | 0 | resultRelInfo); |
5205 | | |
5206 | | /* |
5207 | | * Cleanup the initialized batch slots. This only matters for FDWs |
5208 | | * with batching, but the other cases will have ri_NumSlotsInitialized |
5209 | | * == 0. |
5210 | | */ |
5211 | 0 | for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++) |
5212 | 0 | { |
5213 | 0 | ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]); |
5214 | 0 | ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]); |
5215 | 0 | } |
5216 | 0 | } |
5217 | | |
5218 | | /* |
5219 | | * Close all the partitioned tables, leaf partitions, and their indices |
5220 | | * and release the slot used for tuple routing, if set. |
5221 | | */ |
5222 | 0 | if (node->mt_partition_tuple_routing) |
5223 | 0 | { |
5224 | 0 | ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing); |
5225 | |
|
5226 | 0 | if (node->mt_root_tuple_slot) |
5227 | 0 | ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot); |
5228 | 0 | } |
5229 | | |
5230 | | /* |
5231 | | * Terminate EPQ execution if active |
5232 | | */ |
5233 | 0 | EvalPlanQualEnd(&node->mt_epqstate); |
5234 | | |
5235 | | /* |
5236 | | * shut down subplan |
5237 | | */ |
5238 | 0 | ExecEndNode(outerPlanState(node)); |
5239 | 0 | } |
5240 | | |
5241 | | void |
5242 | | ExecReScanModifyTable(ModifyTableState *node) |
5243 | 0 | { |
5244 | | /* |
5245 | | * Currently, we don't need to support rescan on ModifyTable nodes. The |
5246 | | * semantics of that would be a bit debatable anyway. |
5247 | | */ |
5248 | 0 | elog(ERROR, "ExecReScanModifyTable is not implemented"); |
5249 | 0 | } |