Coverage Report

Created: 2025-07-03 06:49

/src/postgres/src/backend/executor/nodeLockRows.c
Line
Count
Source (jump to first uncovered line)
1
/*-------------------------------------------------------------------------
2
 *
3
 * nodeLockRows.c
4
 *    Routines to handle FOR UPDATE/FOR SHARE row locking
5
 *
6
 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7
 * Portions Copyright (c) 1994, Regents of the University of California
8
 *
9
 *
10
 * IDENTIFICATION
11
 *    src/backend/executor/nodeLockRows.c
12
 *
13
 *-------------------------------------------------------------------------
14
 */
15
/*
16
 * INTERFACE ROUTINES
17
 *    ExecLockRows    - fetch locked rows
18
 *    ExecInitLockRows  - initialize node and subnodes..
19
 *    ExecEndLockRows   - shutdown node and subnodes
20
 */
21
22
#include "postgres.h"
23
24
#include "access/tableam.h"
25
#include "access/xact.h"
26
#include "executor/executor.h"
27
#include "executor/nodeLockRows.h"
28
#include "foreign/fdwapi.h"
29
#include "miscadmin.h"
30
#include "utils/rel.h"
31
32
33
/* ----------------------------------------------------------------
34
 *    ExecLockRows
35
 * ----------------------------------------------------------------
36
 */
37
static TupleTableSlot *     /* return: a tuple or NULL */
38
ExecLockRows(PlanState *pstate)
39
0
{
40
0
  LockRowsState *node = castNode(LockRowsState, pstate);
41
0
  TupleTableSlot *slot;
42
0
  EState     *estate;
43
0
  PlanState  *outerPlan;
44
0
  bool    epq_needed;
45
0
  ListCell   *lc;
46
47
0
  CHECK_FOR_INTERRUPTS();
48
49
  /*
50
   * get information from the node
51
   */
52
0
  estate = node->ps.state;
53
0
  outerPlan = outerPlanState(node);
54
55
  /*
56
   * Get next tuple from subplan, if any.
57
   */
58
0
lnext:
59
0
  slot = ExecProcNode(outerPlan);
60
61
0
  if (TupIsNull(slot))
62
0
  {
63
    /* Release any resources held by EPQ mechanism before exiting */
64
0
    EvalPlanQualEnd(&node->lr_epqstate);
65
0
    return NULL;
66
0
  }
67
68
  /* We don't need EvalPlanQual unless we get updated tuple version(s) */
69
0
  epq_needed = false;
70
71
  /*
72
   * Attempt to lock the source tuple(s).  (Note we only have locking
73
   * rowmarks in lr_arowMarks.)
74
   */
75
0
  foreach(lc, node->lr_arowMarks)
76
0
  {
77
0
    ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
78
0
    ExecRowMark *erm = aerm->rowmark;
79
0
    Datum   datum;
80
0
    bool    isNull;
81
0
    ItemPointerData tid;
82
0
    TM_FailureData tmfd;
83
0
    LockTupleMode lockmode;
84
0
    int     lockflags = 0;
85
0
    TM_Result test;
86
0
    TupleTableSlot *markSlot;
87
88
    /* clear any leftover test tuple for this rel */
89
0
    markSlot = EvalPlanQualSlot(&node->lr_epqstate, erm->relation, erm->rti);
90
0
    ExecClearTuple(markSlot);
91
92
    /* if child rel, must check whether it produced this row */
93
0
    if (erm->rti != erm->prti)
94
0
    {
95
0
      Oid     tableoid;
96
97
0
      datum = ExecGetJunkAttribute(slot,
98
0
                     aerm->toidAttNo,
99
0
                     &isNull);
100
      /* shouldn't ever get a null result... */
101
0
      if (isNull)
102
0
        elog(ERROR, "tableoid is NULL");
103
0
      tableoid = DatumGetObjectId(datum);
104
105
0
      Assert(OidIsValid(erm->relid));
106
0
      if (tableoid != erm->relid)
107
0
      {
108
        /* this child is inactive right now */
109
0
        erm->ermActive = false;
110
0
        ItemPointerSetInvalid(&(erm->curCtid));
111
0
        continue;
112
0
      }
113
0
    }
114
0
    erm->ermActive = true;
115
116
    /* fetch the tuple's ctid */
117
0
    datum = ExecGetJunkAttribute(slot,
118
0
                   aerm->ctidAttNo,
119
0
                   &isNull);
120
    /* shouldn't ever get a null result... */
121
0
    if (isNull)
122
0
      elog(ERROR, "ctid is NULL");
123
124
    /* requests for foreign tables must be passed to their FDW */
125
0
    if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
126
0
    {
127
0
      FdwRoutine *fdwroutine;
128
0
      bool    updated = false;
129
130
0
      fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
131
      /* this should have been checked already, but let's be safe */
132
0
      if (fdwroutine->RefetchForeignRow == NULL)
133
0
        ereport(ERROR,
134
0
            (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
135
0
             errmsg("cannot lock rows in foreign table \"%s\"",
136
0
                RelationGetRelationName(erm->relation))));
137
138
0
      fdwroutine->RefetchForeignRow(estate,
139
0
                      erm,
140
0
                      datum,
141
0
                      markSlot,
142
0
                      &updated);
143
0
      if (TupIsNull(markSlot))
144
0
      {
145
        /* couldn't get the lock, so skip this row */
146
0
        goto lnext;
147
0
      }
148
149
      /*
150
       * if FDW says tuple was updated before getting locked, we need to
151
       * perform EPQ testing to see if quals are still satisfied
152
       */
153
0
      if (updated)
154
0
        epq_needed = true;
155
156
0
      continue;
157
0
    }
158
159
    /* okay, try to lock (and fetch) the tuple */
160
0
    tid = *((ItemPointer) DatumGetPointer(datum));
161
0
    switch (erm->markType)
162
0
    {
163
0
      case ROW_MARK_EXCLUSIVE:
164
0
        lockmode = LockTupleExclusive;
165
0
        break;
166
0
      case ROW_MARK_NOKEYEXCLUSIVE:
167
0
        lockmode = LockTupleNoKeyExclusive;
168
0
        break;
169
0
      case ROW_MARK_SHARE:
170
0
        lockmode = LockTupleShare;
171
0
        break;
172
0
      case ROW_MARK_KEYSHARE:
173
0
        lockmode = LockTupleKeyShare;
174
0
        break;
175
0
      default:
176
0
        elog(ERROR, "unsupported rowmark type");
177
0
        lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
178
0
        break;
179
0
    }
180
181
0
    lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS;
182
0
    if (!IsolationUsesXactSnapshot())
183
0
      lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
184
185
0
    test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot,
186
0
                markSlot, estate->es_output_cid,
187
0
                lockmode, erm->waitPolicy,
188
0
                lockflags,
189
0
                &tmfd);
190
191
0
    switch (test)
192
0
    {
193
0
      case TM_WouldBlock:
194
        /* couldn't lock tuple in SKIP LOCKED mode */
195
0
        goto lnext;
196
197
0
      case TM_SelfModified:
198
199
        /*
200
         * The target tuple was already updated or deleted by the
201
         * current command, or by a later command in the current
202
         * transaction.  We *must* ignore the tuple in the former
203
         * case, so as to avoid the "Halloween problem" of repeated
204
         * update attempts.  In the latter case it might be sensible
205
         * to fetch the updated tuple instead, but doing so would
206
         * require changing heap_update and heap_delete to not
207
         * complain about updating "invisible" tuples, which seems
208
         * pretty scary (table_tuple_lock will not complain, but few
209
         * callers expect TM_Invisible, and we're not one of them). So
210
         * for now, treat the tuple as deleted and do not process.
211
         */
212
0
        goto lnext;
213
214
0
      case TM_Ok:
215
216
        /*
217
         * Got the lock successfully, the locked tuple saved in
218
         * markSlot for, if needed, EvalPlanQual testing below.
219
         */
220
0
        if (tmfd.traversed)
221
0
          epq_needed = true;
222
0
        break;
223
224
0
      case TM_Updated:
225
0
        if (IsolationUsesXactSnapshot())
226
0
          ereport(ERROR,
227
0
              (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
228
0
               errmsg("could not serialize access due to concurrent update")));
229
0
        elog(ERROR, "unexpected table_tuple_lock status: %u",
230
0
           test);
231
0
        break;
232
233
0
      case TM_Deleted:
234
0
        if (IsolationUsesXactSnapshot())
235
0
          ereport(ERROR,
236
0
              (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
237
0
               errmsg("could not serialize access due to concurrent update")));
238
        /* tuple was deleted so don't return it */
239
0
        goto lnext;
240
241
0
      case TM_Invisible:
242
0
        elog(ERROR, "attempted to lock invisible tuple");
243
0
        break;
244
245
0
      default:
246
0
        elog(ERROR, "unrecognized table_tuple_lock status: %u",
247
0
           test);
248
0
    }
249
250
    /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
251
0
    erm->curCtid = tid;
252
0
  }
253
254
  /*
255
   * If we need to do EvalPlanQual testing, do so.
256
   */
257
0
  if (epq_needed)
258
0
  {
259
    /* Initialize EPQ machinery */
260
0
    EvalPlanQualBegin(&node->lr_epqstate);
261
262
    /*
263
     * To fetch non-locked source rows the EPQ logic needs to access junk
264
     * columns from the tuple being tested.
265
     */
266
0
    EvalPlanQualSetSlot(&node->lr_epqstate, slot);
267
268
    /*
269
     * And finally we can re-evaluate the tuple.
270
     */
271
0
    slot = EvalPlanQualNext(&node->lr_epqstate);
272
0
    if (TupIsNull(slot))
273
0
    {
274
      /* Updated tuple fails qual, so ignore it and go on */
275
0
      goto lnext;
276
0
    }
277
0
  }
278
279
  /* Got all locks, so return the current tuple */
280
0
  return slot;
281
0
}
282
283
/* ----------------------------------------------------------------
284
 *    ExecInitLockRows
285
 *
286
 *    This initializes the LockRows node state structures and
287
 *    the node's subplan.
288
 * ----------------------------------------------------------------
289
 */
290
LockRowsState *
291
ExecInitLockRows(LockRows *node, EState *estate, int eflags)
292
0
{
293
0
  LockRowsState *lrstate;
294
0
  Plan     *outerPlan = outerPlan(node);
295
0
  List     *epq_arowmarks;
296
0
  ListCell   *lc;
297
298
  /* check for unsupported flags */
299
0
  Assert(!(eflags & EXEC_FLAG_MARK));
300
301
  /*
302
   * create state structure
303
   */
304
0
  lrstate = makeNode(LockRowsState);
305
0
  lrstate->ps.plan = (Plan *) node;
306
0
  lrstate->ps.state = estate;
307
0
  lrstate->ps.ExecProcNode = ExecLockRows;
308
309
  /*
310
   * Miscellaneous initialization
311
   *
312
   * LockRows nodes never call ExecQual or ExecProject, therefore no
313
   * ExprContext is needed.
314
   */
315
316
  /*
317
   * Initialize result type.
318
   */
319
0
  ExecInitResultTypeTL(&lrstate->ps);
320
321
  /*
322
   * then initialize outer plan
323
   */
324
0
  outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
325
326
  /* node returns unmodified slots from the outer plan */
327
0
  lrstate->ps.resultopsset = true;
328
0
  lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate),
329
0
                         &lrstate->ps.resultopsfixed);
330
331
  /*
332
   * LockRows nodes do no projections, so initialize projection info for
333
   * this node appropriately
334
   */
335
0
  lrstate->ps.ps_ProjInfo = NULL;
336
337
  /*
338
   * Locate the ExecRowMark(s) that this node is responsible for, and
339
   * construct ExecAuxRowMarks for them.  (InitPlan should already have
340
   * built the global list of ExecRowMarks.)
341
   */
342
0
  lrstate->lr_arowMarks = NIL;
343
0
  epq_arowmarks = NIL;
344
0
  foreach(lc, node->rowMarks)
345
0
  {
346
0
    PlanRowMark *rc = lfirst_node(PlanRowMark, lc);
347
0
    ExecRowMark *erm;
348
0
    ExecAuxRowMark *aerm;
349
350
    /*
351
     * Ignore "parent" rowmarks, because they are irrelevant at runtime.
352
     * Also ignore the rowmarks belonging to child tables that have been
353
     * pruned in ExecDoInitialPruning().
354
     */
355
0
    if (rc->isParent ||
356
0
      !bms_is_member(rc->rti, estate->es_unpruned_relids))
357
0
      continue;
358
359
    /* find ExecRowMark and build ExecAuxRowMark */
360
0
    erm = ExecFindRowMark(estate, rc->rti, false);
361
0
    aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
362
363
    /*
364
     * Only locking rowmarks go into our own list.  Non-locking marks are
365
     * passed off to the EvalPlanQual machinery.  This is because we don't
366
     * want to bother fetching non-locked rows unless we actually have to
367
     * do an EPQ recheck.
368
     */
369
0
    if (RowMarkRequiresRowShareLock(erm->markType))
370
0
      lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm);
371
0
    else
372
0
      epq_arowmarks = lappend(epq_arowmarks, aerm);
373
0
  }
374
375
  /* Now we have the info needed to set up EPQ state */
376
0
  EvalPlanQualInit(&lrstate->lr_epqstate, estate,
377
0
           outerPlan, epq_arowmarks, node->epqParam, NIL);
378
379
0
  return lrstate;
380
0
}
381
382
/* ----------------------------------------------------------------
383
 *    ExecEndLockRows
384
 *
385
 *    This shuts down the subplan and frees resources allocated
386
 *    to this node.
387
 * ----------------------------------------------------------------
388
 */
389
void
390
ExecEndLockRows(LockRowsState *node)
391
0
{
392
  /* We may have shut down EPQ already, but no harm in another call */
393
0
  EvalPlanQualEnd(&node->lr_epqstate);
394
0
  ExecEndNode(outerPlanState(node));
395
0
}
396
397
398
void
399
ExecReScanLockRows(LockRowsState *node)
400
0
{
401
0
  PlanState  *outerPlan = outerPlanState(node);
402
403
  /*
404
   * if chgParam of subnode is not null then plan will be re-scanned by
405
   * first ExecProcNode.
406
   */
407
0
  if (outerPlan->chgParam == NULL)
408
0
    ExecReScan(outerPlan);
409
0
}