/src/postgres/src/backend/tcop/pquery.c
Line | Count | Source |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * pquery.c |
4 | | * POSTGRES process query command code |
5 | | * |
6 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
7 | | * Portions Copyright (c) 1994, Regents of the University of California |
8 | | * |
9 | | * |
10 | | * IDENTIFICATION |
11 | | * src/backend/tcop/pquery.c |
12 | | * |
13 | | *------------------------------------------------------------------------- |
14 | | */ |
15 | | |
16 | | #include "postgres.h" |
17 | | |
18 | | #include <limits.h> |
19 | | |
20 | | #include "access/xact.h" |
21 | | #include "commands/prepare.h" |
22 | | #include "executor/executor.h" |
23 | | #include "executor/tstoreReceiver.h" |
24 | | #include "miscadmin.h" |
25 | | #include "pg_trace.h" |
26 | | #include "tcop/pquery.h" |
27 | | #include "tcop/utility.h" |
28 | | #include "utils/memutils.h" |
29 | | #include "utils/snapmgr.h" |
30 | | |
31 | | |
32 | | /* |
33 | | * ActivePortal is the currently executing Portal (the most closely nested, |
34 | | * if there are several). |
35 | | */ |
36 | | Portal ActivePortal = NULL; |
37 | | |
38 | | |
39 | | static void ProcessQuery(PlannedStmt *plan, |
40 | | const char *sourceText, |
41 | | ParamListInfo params, |
42 | | QueryEnvironment *queryEnv, |
43 | | DestReceiver *dest, |
44 | | QueryCompletion *qc); |
45 | | static void FillPortalStore(Portal portal, bool isTopLevel); |
46 | | static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count, |
47 | | DestReceiver *dest); |
48 | | static uint64 PortalRunSelect(Portal portal, bool forward, long count, |
49 | | DestReceiver *dest); |
50 | | static void PortalRunUtility(Portal portal, PlannedStmt *pstmt, |
51 | | bool isTopLevel, bool setHoldSnapshot, |
52 | | DestReceiver *dest, QueryCompletion *qc); |
53 | | static void PortalRunMulti(Portal portal, |
54 | | bool isTopLevel, bool setHoldSnapshot, |
55 | | DestReceiver *dest, DestReceiver *altdest, |
56 | | QueryCompletion *qc); |
57 | | static uint64 DoPortalRunFetch(Portal portal, |
58 | | FetchDirection fdirection, |
59 | | long count, |
60 | | DestReceiver *dest); |
61 | | static void DoPortalRewind(Portal portal); |
62 | | |
63 | | |
64 | | /* |
65 | | * CreateQueryDesc |
66 | | */ |
67 | | QueryDesc * |
68 | | CreateQueryDesc(PlannedStmt *plannedstmt, |
69 | | const char *sourceText, |
70 | | Snapshot snapshot, |
71 | | Snapshot crosscheck_snapshot, |
72 | | DestReceiver *dest, |
73 | | ParamListInfo params, |
74 | | QueryEnvironment *queryEnv, |
75 | | int instrument_options) |
76 | 0 | { |
77 | 0 | QueryDesc *qd = (QueryDesc *) palloc(sizeof(QueryDesc)); |
78 | |
|
79 | 0 | qd->operation = plannedstmt->commandType; /* operation */ |
80 | 0 | qd->plannedstmt = plannedstmt; /* plan */ |
81 | 0 | qd->sourceText = sourceText; /* query text */ |
82 | 0 | qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ |
83 | | /* RI check snapshot */ |
84 | 0 | qd->crosscheck_snapshot = RegisterSnapshot(crosscheck_snapshot); |
85 | 0 | qd->dest = dest; /* output dest */ |
86 | 0 | qd->params = params; /* parameter values passed into query */ |
87 | 0 | qd->queryEnv = queryEnv; |
88 | 0 | qd->instrument_options = instrument_options; /* instrumentation wanted? */ |
89 | | |
90 | | /* null these fields until set by ExecutorStart */ |
91 | 0 | qd->tupDesc = NULL; |
92 | 0 | qd->estate = NULL; |
93 | 0 | qd->planstate = NULL; |
94 | 0 | qd->totaltime = NULL; |
95 | | |
96 | | /* not yet executed */ |
97 | 0 | qd->already_executed = false; |
98 | |
|
99 | 0 | return qd; |
100 | 0 | } |
101 | | |
102 | | /* |
103 | | * FreeQueryDesc |
104 | | */ |
105 | | void |
106 | | FreeQueryDesc(QueryDesc *qdesc) |
107 | 0 | { |
108 | | /* Can't be a live query */ |
109 | 0 | Assert(qdesc->estate == NULL); |
110 | | |
111 | | /* forget our snapshots */ |
112 | 0 | UnregisterSnapshot(qdesc->snapshot); |
113 | 0 | UnregisterSnapshot(qdesc->crosscheck_snapshot); |
114 | | |
115 | | /* Only the QueryDesc itself need be freed */ |
116 | 0 | pfree(qdesc); |
117 | 0 | } |
118 | | |
119 | | |
120 | | /* |
121 | | * ProcessQuery |
122 | | * Execute a single plannable query within a PORTAL_MULTI_QUERY, |
123 | | * PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal |
124 | | * |
125 | | * plan: the plan tree for the query |
126 | | * sourceText: the source text of the query |
127 | | * params: any parameters needed |
128 | | * dest: where to send results |
129 | | * qc: where to store the command completion status data. |
130 | | * |
131 | | * qc may be NULL if caller doesn't want a status string. |
132 | | * |
133 | | * Must be called in a memory context that will be reset or deleted on |
134 | | * error; otherwise the executor's memory usage will be leaked. |
135 | | */ |
136 | | static void |
137 | | ProcessQuery(PlannedStmt *plan, |
138 | | const char *sourceText, |
139 | | ParamListInfo params, |
140 | | QueryEnvironment *queryEnv, |
141 | | DestReceiver *dest, |
142 | | QueryCompletion *qc) |
143 | 0 | { |
144 | 0 | QueryDesc *queryDesc; |
145 | | |
146 | | /* |
147 | | * Create the QueryDesc object |
148 | | */ |
149 | 0 | queryDesc = CreateQueryDesc(plan, sourceText, |
150 | 0 | GetActiveSnapshot(), InvalidSnapshot, |
151 | 0 | dest, params, queryEnv, 0); |
152 | | |
153 | | /* |
154 | | * Call ExecutorStart to prepare the plan for execution |
155 | | */ |
156 | 0 | ExecutorStart(queryDesc, 0); |
157 | | |
158 | | /* |
159 | | * Run the plan to completion. |
160 | | */ |
161 | 0 | ExecutorRun(queryDesc, ForwardScanDirection, 0); |
162 | | |
163 | | /* |
164 | | * Build command completion status data, if caller wants one. |
165 | | */ |
166 | 0 | if (qc) |
167 | 0 | { |
168 | 0 | switch (queryDesc->operation) |
169 | 0 | { |
170 | 0 | case CMD_SELECT: |
171 | 0 | SetQueryCompletion(qc, CMDTAG_SELECT, queryDesc->estate->es_processed); |
172 | 0 | break; |
173 | 0 | case CMD_INSERT: |
174 | 0 | SetQueryCompletion(qc, CMDTAG_INSERT, queryDesc->estate->es_processed); |
175 | 0 | break; |
176 | 0 | case CMD_UPDATE: |
177 | 0 | SetQueryCompletion(qc, CMDTAG_UPDATE, queryDesc->estate->es_processed); |
178 | 0 | break; |
179 | 0 | case CMD_DELETE: |
180 | 0 | SetQueryCompletion(qc, CMDTAG_DELETE, queryDesc->estate->es_processed); |
181 | 0 | break; |
182 | 0 | case CMD_MERGE: |
183 | 0 | SetQueryCompletion(qc, CMDTAG_MERGE, queryDesc->estate->es_processed); |
184 | 0 | break; |
185 | 0 | default: |
186 | 0 | SetQueryCompletion(qc, CMDTAG_UNKNOWN, queryDesc->estate->es_processed); |
187 | 0 | break; |
188 | 0 | } |
189 | 0 | } |
190 | | |
191 | | /* |
192 | | * Now, we close down all the scans and free allocated resources. |
193 | | */ |
194 | 0 | ExecutorFinish(queryDesc); |
195 | 0 | ExecutorEnd(queryDesc); |
196 | |
|
197 | 0 | FreeQueryDesc(queryDesc); |
198 | 0 | } |
199 | | |
200 | | /* |
201 | | * ChoosePortalStrategy |
202 | | * Select portal execution strategy given the intended statement list. |
203 | | * |
204 | | * The list elements can be Querys or PlannedStmts. |
205 | | * That's more general than portals need, but plancache.c uses this too. |
206 | | * |
207 | | * See the comments in portal.h. |
208 | | */ |
209 | | PortalStrategy |
210 | | ChoosePortalStrategy(List *stmts) |
211 | 0 | { |
212 | 0 | int nSetTag; |
213 | 0 | ListCell *lc; |
214 | | |
215 | | /* |
216 | | * PORTAL_ONE_SELECT and PORTAL_UTIL_SELECT need only consider the |
217 | | * single-statement case, since there are no rewrite rules that can add |
218 | | * auxiliary queries to a SELECT or a utility command. PORTAL_ONE_MOD_WITH |
219 | | * likewise allows only one top-level statement. |
220 | | */ |
221 | 0 | if (list_length(stmts) == 1) |
222 | 0 | { |
223 | 0 | Node *stmt = (Node *) linitial(stmts); |
224 | |
|
225 | 0 | if (IsA(stmt, Query)) |
226 | 0 | { |
227 | 0 | Query *query = (Query *) stmt; |
228 | |
|
229 | 0 | if (query->canSetTag) |
230 | 0 | { |
231 | 0 | if (query->commandType == CMD_SELECT) |
232 | 0 | { |
233 | 0 | if (query->hasModifyingCTE) |
234 | 0 | return PORTAL_ONE_MOD_WITH; |
235 | 0 | else |
236 | 0 | return PORTAL_ONE_SELECT; |
237 | 0 | } |
238 | 0 | if (query->commandType == CMD_UTILITY) |
239 | 0 | { |
240 | 0 | if (UtilityReturnsTuples(query->utilityStmt)) |
241 | 0 | return PORTAL_UTIL_SELECT; |
242 | | /* it can't be ONE_RETURNING, so give up */ |
243 | 0 | return PORTAL_MULTI_QUERY; |
244 | 0 | } |
245 | 0 | } |
246 | 0 | } |
247 | 0 | else if (IsA(stmt, PlannedStmt)) |
248 | 0 | { |
249 | 0 | PlannedStmt *pstmt = (PlannedStmt *) stmt; |
250 | |
|
251 | 0 | if (pstmt->canSetTag) |
252 | 0 | { |
253 | 0 | if (pstmt->commandType == CMD_SELECT) |
254 | 0 | { |
255 | 0 | if (pstmt->hasModifyingCTE) |
256 | 0 | return PORTAL_ONE_MOD_WITH; |
257 | 0 | else |
258 | 0 | return PORTAL_ONE_SELECT; |
259 | 0 | } |
260 | 0 | if (pstmt->commandType == CMD_UTILITY) |
261 | 0 | { |
262 | 0 | if (UtilityReturnsTuples(pstmt->utilityStmt)) |
263 | 0 | return PORTAL_UTIL_SELECT; |
264 | | /* it can't be ONE_RETURNING, so give up */ |
265 | 0 | return PORTAL_MULTI_QUERY; |
266 | 0 | } |
267 | 0 | } |
268 | 0 | } |
269 | 0 | else |
270 | 0 | elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt)); |
271 | 0 | } |
272 | | |
273 | | /* |
274 | | * PORTAL_ONE_RETURNING has to allow auxiliary queries added by rewrite. |
275 | | * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query and |
276 | | * it has a RETURNING list. |
277 | | */ |
278 | 0 | nSetTag = 0; |
279 | 0 | foreach(lc, stmts) |
280 | 0 | { |
281 | 0 | Node *stmt = (Node *) lfirst(lc); |
282 | |
|
283 | 0 | if (IsA(stmt, Query)) |
284 | 0 | { |
285 | 0 | Query *query = (Query *) stmt; |
286 | |
|
287 | 0 | if (query->canSetTag) |
288 | 0 | { |
289 | 0 | if (++nSetTag > 1) |
290 | 0 | return PORTAL_MULTI_QUERY; /* no need to look further */ |
291 | 0 | if (query->commandType == CMD_UTILITY || |
292 | 0 | query->returningList == NIL) |
293 | 0 | return PORTAL_MULTI_QUERY; /* no need to look further */ |
294 | 0 | } |
295 | 0 | } |
296 | 0 | else if (IsA(stmt, PlannedStmt)) |
297 | 0 | { |
298 | 0 | PlannedStmt *pstmt = (PlannedStmt *) stmt; |
299 | |
|
300 | 0 | if (pstmt->canSetTag) |
301 | 0 | { |
302 | 0 | if (++nSetTag > 1) |
303 | 0 | return PORTAL_MULTI_QUERY; /* no need to look further */ |
304 | 0 | if (pstmt->commandType == CMD_UTILITY || |
305 | 0 | !pstmt->hasReturning) |
306 | 0 | return PORTAL_MULTI_QUERY; /* no need to look further */ |
307 | 0 | } |
308 | 0 | } |
309 | 0 | else |
310 | 0 | elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt)); |
311 | 0 | } |
312 | 0 | if (nSetTag == 1) |
313 | 0 | return PORTAL_ONE_RETURNING; |
314 | | |
315 | | /* Else, it's the general case... */ |
316 | 0 | return PORTAL_MULTI_QUERY; |
317 | 0 | } |
318 | | |
319 | | /* |
320 | | * FetchPortalTargetList |
321 | | * Given a portal that returns tuples, extract the query targetlist. |
322 | | * Returns NIL if the portal doesn't have a determinable targetlist. |
323 | | * |
324 | | * Note: do not modify the result. |
325 | | */ |
326 | | List * |
327 | | FetchPortalTargetList(Portal portal) |
328 | 0 | { |
329 | | /* no point in looking if we determined it doesn't return tuples */ |
330 | 0 | if (portal->strategy == PORTAL_MULTI_QUERY) |
331 | 0 | return NIL; |
332 | | /* get the primary statement and find out what it returns */ |
333 | 0 | return FetchStatementTargetList((Node *) PortalGetPrimaryStmt(portal)); |
334 | 0 | } |
335 | | |
336 | | /* |
337 | | * FetchStatementTargetList |
338 | | * Given a statement that returns tuples, extract the query targetlist. |
339 | | * Returns NIL if the statement doesn't have a determinable targetlist. |
340 | | * |
341 | | * This can be applied to a Query or a PlannedStmt. |
342 | | * That's more general than portals need, but plancache.c uses this too. |
343 | | * |
344 | | * Note: do not modify the result. |
345 | | * |
346 | | * XXX be careful to keep this in sync with UtilityReturnsTuples. |
347 | | */ |
348 | | List * |
349 | | FetchStatementTargetList(Node *stmt) |
350 | 0 | { |
351 | 0 | if (stmt == NULL) |
352 | 0 | return NIL; |
353 | 0 | if (IsA(stmt, Query)) |
354 | 0 | { |
355 | 0 | Query *query = (Query *) stmt; |
356 | |
|
357 | 0 | if (query->commandType == CMD_UTILITY) |
358 | 0 | { |
359 | | /* transfer attention to utility statement */ |
360 | 0 | stmt = query->utilityStmt; |
361 | 0 | } |
362 | 0 | else |
363 | 0 | { |
364 | 0 | if (query->commandType == CMD_SELECT) |
365 | 0 | return query->targetList; |
366 | 0 | if (query->returningList) |
367 | 0 | return query->returningList; |
368 | 0 | return NIL; |
369 | 0 | } |
370 | 0 | } |
371 | 0 | if (IsA(stmt, PlannedStmt)) |
372 | 0 | { |
373 | 0 | PlannedStmt *pstmt = (PlannedStmt *) stmt; |
374 | |
|
375 | 0 | if (pstmt->commandType == CMD_UTILITY) |
376 | 0 | { |
377 | | /* transfer attention to utility statement */ |
378 | 0 | stmt = pstmt->utilityStmt; |
379 | 0 | } |
380 | 0 | else |
381 | 0 | { |
382 | 0 | if (pstmt->commandType == CMD_SELECT) |
383 | 0 | return pstmt->planTree->targetlist; |
384 | 0 | if (pstmt->hasReturning) |
385 | 0 | return pstmt->planTree->targetlist; |
386 | 0 | return NIL; |
387 | 0 | } |
388 | 0 | } |
389 | 0 | if (IsA(stmt, FetchStmt)) |
390 | 0 | { |
391 | 0 | FetchStmt *fstmt = (FetchStmt *) stmt; |
392 | 0 | Portal subportal; |
393 | |
|
394 | 0 | Assert(!fstmt->ismove); |
395 | 0 | subportal = GetPortalByName(fstmt->portalname); |
396 | 0 | Assert(PortalIsValid(subportal)); |
397 | 0 | return FetchPortalTargetList(subportal); |
398 | 0 | } |
399 | 0 | if (IsA(stmt, ExecuteStmt)) |
400 | 0 | { |
401 | 0 | ExecuteStmt *estmt = (ExecuteStmt *) stmt; |
402 | 0 | PreparedStatement *entry; |
403 | |
|
404 | 0 | entry = FetchPreparedStatement(estmt->name, true); |
405 | 0 | return FetchPreparedStatementTargetList(entry); |
406 | 0 | } |
407 | 0 | return NIL; |
408 | 0 | } |
409 | | |
410 | | /* |
411 | | * PortalStart |
412 | | * Prepare a portal for execution. |
413 | | * |
414 | | * Caller must already have created the portal, done PortalDefineQuery(), |
415 | | * and adjusted portal options if needed. |
416 | | * |
417 | | * If parameters are needed by the query, they must be passed in "params" |
418 | | * (caller is responsible for giving them appropriate lifetime). |
419 | | * |
420 | | * The caller can also provide an initial set of "eflags" to be passed to |
421 | | * ExecutorStart (but note these can be modified internally, and they are |
422 | | * currently only honored for PORTAL_ONE_SELECT portals). Most callers |
423 | | * should simply pass zero. |
424 | | * |
425 | | * The caller can optionally pass a snapshot to be used; pass InvalidSnapshot |
426 | | * for the normal behavior of setting a new snapshot. This parameter is |
427 | | * presently ignored for non-PORTAL_ONE_SELECT portals (it's only intended |
428 | | * to be used for cursors). |
429 | | * |
430 | | * On return, portal is ready to accept PortalRun() calls, and the result |
431 | | * tupdesc (if any) is known. |
432 | | */ |
433 | | void |
434 | | PortalStart(Portal portal, ParamListInfo params, |
435 | | int eflags, Snapshot snapshot) |
436 | 0 | { |
437 | 0 | Portal saveActivePortal; |
438 | 0 | ResourceOwner saveResourceOwner; |
439 | 0 | MemoryContext savePortalContext; |
440 | 0 | MemoryContext oldContext; |
441 | 0 | QueryDesc *queryDesc; |
442 | 0 | int myeflags; |
443 | |
|
444 | 0 | Assert(PortalIsValid(portal)); |
445 | 0 | Assert(portal->status == PORTAL_DEFINED); |
446 | | |
447 | | /* |
448 | | * Set up global portal context pointers. |
449 | | */ |
450 | 0 | saveActivePortal = ActivePortal; |
451 | 0 | saveResourceOwner = CurrentResourceOwner; |
452 | 0 | savePortalContext = PortalContext; |
453 | 0 | PG_TRY(); |
454 | 0 | { |
455 | 0 | ActivePortal = portal; |
456 | 0 | if (portal->resowner) |
457 | 0 | CurrentResourceOwner = portal->resowner; |
458 | 0 | PortalContext = portal->portalContext; |
459 | |
|
460 | 0 | oldContext = MemoryContextSwitchTo(PortalContext); |
461 | | |
462 | | /* Must remember portal param list, if any */ |
463 | 0 | portal->portalParams = params; |
464 | | |
465 | | /* |
466 | | * Determine the portal execution strategy |
467 | | */ |
468 | 0 | portal->strategy = ChoosePortalStrategy(portal->stmts); |
469 | | |
470 | | /* |
471 | | * Fire her up according to the strategy |
472 | | */ |
473 | 0 | switch (portal->strategy) |
474 | 0 | { |
475 | 0 | case PORTAL_ONE_SELECT: |
476 | | |
477 | | /* Must set snapshot before starting executor. */ |
478 | 0 | if (snapshot) |
479 | 0 | PushActiveSnapshot(snapshot); |
480 | 0 | else |
481 | 0 | PushActiveSnapshot(GetTransactionSnapshot()); |
482 | | |
483 | | /* |
484 | | * We could remember the snapshot in portal->portalSnapshot, |
485 | | * but presently there seems no need to, as this code path |
486 | | * cannot be used for non-atomic execution. Hence there can't |
487 | | * be any commit/abort that might destroy the snapshot. Since |
488 | | * we don't do that, there's also no need to force a |
489 | | * non-default nesting level for the snapshot. |
490 | | */ |
491 | | |
492 | | /* |
493 | | * Create QueryDesc in portal's context; for the moment, set |
494 | | * the destination to DestNone. |
495 | | */ |
496 | 0 | queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts), |
497 | 0 | portal->sourceText, |
498 | 0 | GetActiveSnapshot(), |
499 | 0 | InvalidSnapshot, |
500 | 0 | None_Receiver, |
501 | 0 | params, |
502 | 0 | portal->queryEnv, |
503 | 0 | 0); |
504 | | |
505 | | /* |
506 | | * If it's a scrollable cursor, executor needs to support |
507 | | * REWIND and backwards scan, as well as whatever the caller |
508 | | * might've asked for. |
509 | | */ |
510 | 0 | if (portal->cursorOptions & CURSOR_OPT_SCROLL) |
511 | 0 | myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD; |
512 | 0 | else |
513 | 0 | myeflags = eflags; |
514 | | |
515 | | /* |
516 | | * Call ExecutorStart to prepare the plan for execution |
517 | | */ |
518 | 0 | ExecutorStart(queryDesc, myeflags); |
519 | | |
520 | | /* |
521 | | * This tells PortalCleanup to shut down the executor |
522 | | */ |
523 | 0 | portal->queryDesc = queryDesc; |
524 | | |
525 | | /* |
526 | | * Remember tuple descriptor (computed by ExecutorStart) |
527 | | */ |
528 | 0 | portal->tupDesc = queryDesc->tupDesc; |
529 | | |
530 | | /* |
531 | | * Reset cursor position data to "start of query" |
532 | | */ |
533 | 0 | portal->atStart = true; |
534 | 0 | portal->atEnd = false; /* allow fetches */ |
535 | 0 | portal->portalPos = 0; |
536 | |
|
537 | 0 | PopActiveSnapshot(); |
538 | 0 | break; |
539 | | |
540 | 0 | case PORTAL_ONE_RETURNING: |
541 | 0 | case PORTAL_ONE_MOD_WITH: |
542 | | |
543 | | /* |
544 | | * We don't start the executor until we are told to run the |
545 | | * portal. We do need to set up the result tupdesc. |
546 | | */ |
547 | 0 | { |
548 | 0 | PlannedStmt *pstmt; |
549 | |
|
550 | 0 | pstmt = PortalGetPrimaryStmt(portal); |
551 | 0 | portal->tupDesc = |
552 | 0 | ExecCleanTypeFromTL(pstmt->planTree->targetlist); |
553 | 0 | } |
554 | | |
555 | | /* |
556 | | * Reset cursor position data to "start of query" |
557 | | */ |
558 | 0 | portal->atStart = true; |
559 | 0 | portal->atEnd = false; /* allow fetches */ |
560 | 0 | portal->portalPos = 0; |
561 | 0 | break; |
562 | | |
563 | 0 | case PORTAL_UTIL_SELECT: |
564 | | |
565 | | /* |
566 | | * We don't set snapshot here, because PortalRunUtility will |
567 | | * take care of it if needed. |
568 | | */ |
569 | 0 | { |
570 | 0 | PlannedStmt *pstmt = PortalGetPrimaryStmt(portal); |
571 | |
|
572 | 0 | Assert(pstmt->commandType == CMD_UTILITY); |
573 | 0 | portal->tupDesc = UtilityTupleDescriptor(pstmt->utilityStmt); |
574 | 0 | } |
575 | | |
576 | | /* |
577 | | * Reset cursor position data to "start of query" |
578 | | */ |
579 | 0 | portal->atStart = true; |
580 | 0 | portal->atEnd = false; /* allow fetches */ |
581 | 0 | portal->portalPos = 0; |
582 | 0 | break; |
583 | | |
584 | 0 | case PORTAL_MULTI_QUERY: |
585 | | /* Need do nothing now */ |
586 | 0 | portal->tupDesc = NULL; |
587 | 0 | break; |
588 | 0 | } |
589 | 0 | } |
590 | 0 | PG_CATCH(); |
591 | 0 | { |
592 | | /* Uncaught error while executing portal: mark it dead */ |
593 | 0 | MarkPortalFailed(portal); |
594 | | |
595 | | /* Restore global vars and propagate error */ |
596 | 0 | ActivePortal = saveActivePortal; |
597 | 0 | CurrentResourceOwner = saveResourceOwner; |
598 | 0 | PortalContext = savePortalContext; |
599 | |
|
600 | 0 | PG_RE_THROW(); |
601 | 0 | } |
602 | 0 | PG_END_TRY(); |
603 | | |
604 | 0 | MemoryContextSwitchTo(oldContext); |
605 | |
|
606 | 0 | ActivePortal = saveActivePortal; |
607 | 0 | CurrentResourceOwner = saveResourceOwner; |
608 | 0 | PortalContext = savePortalContext; |
609 | |
|
610 | 0 | portal->status = PORTAL_READY; |
611 | 0 | } |
612 | | |
613 | | /* |
614 | | * PortalSetResultFormat |
615 | | * Select the format codes for a portal's output. |
616 | | * |
617 | | * This must be run after PortalStart for a portal that will be read by |
618 | | * a DestRemote or DestRemoteExecute destination. It is not presently needed |
619 | | * for other destination types. |
620 | | * |
621 | | * formats[] is the client format request, as per Bind message conventions. |
622 | | */ |
623 | | void |
624 | | PortalSetResultFormat(Portal portal, int nFormats, int16 *formats) |
625 | 0 | { |
626 | 0 | int natts; |
627 | 0 | int i; |
628 | | |
629 | | /* Do nothing if portal won't return tuples */ |
630 | 0 | if (portal->tupDesc == NULL) |
631 | 0 | return; |
632 | 0 | natts = portal->tupDesc->natts; |
633 | 0 | portal->formats = (int16 *) |
634 | 0 | MemoryContextAlloc(portal->portalContext, |
635 | 0 | natts * sizeof(int16)); |
636 | 0 | if (nFormats > 1) |
637 | 0 | { |
638 | | /* format specified for each column */ |
639 | 0 | if (nFormats != natts) |
640 | 0 | ereport(ERROR, |
641 | 0 | (errcode(ERRCODE_PROTOCOL_VIOLATION), |
642 | 0 | errmsg("bind message has %d result formats but query has %d columns", |
643 | 0 | nFormats, natts))); |
644 | 0 | memcpy(portal->formats, formats, natts * sizeof(int16)); |
645 | 0 | } |
646 | 0 | else if (nFormats > 0) |
647 | 0 | { |
648 | | /* single format specified, use for all columns */ |
649 | 0 | int16 format1 = formats[0]; |
650 | |
|
651 | 0 | for (i = 0; i < natts; i++) |
652 | 0 | portal->formats[i] = format1; |
653 | 0 | } |
654 | 0 | else |
655 | 0 | { |
656 | | /* use default format for all columns */ |
657 | 0 | for (i = 0; i < natts; i++) |
658 | 0 | portal->formats[i] = 0; |
659 | 0 | } |
660 | 0 | } |
661 | | |
662 | | /* |
663 | | * PortalRun |
664 | | * Run a portal's query or queries. |
665 | | * |
666 | | * count <= 0 is interpreted as a no-op: the destination gets started up |
667 | | * and shut down, but nothing else happens. Also, count == FETCH_ALL is |
668 | | * interpreted as "all rows". Note that count is ignored in multi-query |
669 | | * situations, where we always run the portal to completion. |
670 | | * |
671 | | * isTopLevel: true if query is being executed at backend "top level" |
672 | | * (that is, directly from a client command message) |
673 | | * |
674 | | * dest: where to send output of primary (canSetTag) query |
675 | | * |
676 | | * altdest: where to send output of non-primary queries |
677 | | * |
678 | | * qc: where to store command completion status data. |
679 | | * May be NULL if caller doesn't want status data. |
680 | | * |
681 | | * Returns true if the portal's execution is complete, false if it was |
682 | | * suspended due to exhaustion of the count parameter. |
683 | | */ |
684 | | bool |
685 | | PortalRun(Portal portal, long count, bool isTopLevel, |
686 | | DestReceiver *dest, DestReceiver *altdest, |
687 | | QueryCompletion *qc) |
688 | 0 | { |
689 | 0 | bool result; |
690 | 0 | uint64 nprocessed; |
691 | 0 | ResourceOwner saveTopTransactionResourceOwner; |
692 | 0 | MemoryContext saveTopTransactionContext; |
693 | 0 | Portal saveActivePortal; |
694 | 0 | ResourceOwner saveResourceOwner; |
695 | 0 | MemoryContext savePortalContext; |
696 | 0 | MemoryContext saveMemoryContext; |
697 | |
|
698 | 0 | Assert(PortalIsValid(portal)); |
699 | |
|
700 | 0 | TRACE_POSTGRESQL_QUERY_EXECUTE_START(); |
701 | | |
702 | | /* Initialize empty completion data */ |
703 | 0 | if (qc) |
704 | 0 | InitializeQueryCompletion(qc); |
705 | |
|
706 | 0 | if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY) |
707 | 0 | { |
708 | 0 | elog(DEBUG3, "PortalRun"); |
709 | | /* PORTAL_MULTI_QUERY logs its own stats per query */ |
710 | 0 | ResetUsage(); |
711 | 0 | } |
712 | | |
713 | | /* |
714 | | * Check for improper portal use, and mark portal active. |
715 | | */ |
716 | 0 | MarkPortalActive(portal); |
717 | | |
718 | | /* |
719 | | * Set up global portal context pointers. |
720 | | * |
721 | | * We have to play a special game here to support utility commands like |
722 | | * VACUUM and CLUSTER, which internally start and commit transactions. |
723 | | * When we are called to execute such a command, CurrentResourceOwner will |
724 | | * be pointing to the TopTransactionResourceOwner --- which will be |
725 | | * destroyed and replaced in the course of the internal commit and |
726 | | * restart. So we need to be prepared to restore it as pointing to the |
727 | | * exit-time TopTransactionResourceOwner. (Ain't that ugly? This idea of |
728 | | * internally starting whole new transactions is not good.) |
729 | | * CurrentMemoryContext has a similar problem, but the other pointers we |
730 | | * save here will be NULL or pointing to longer-lived objects. |
731 | | */ |
732 | 0 | saveTopTransactionResourceOwner = TopTransactionResourceOwner; |
733 | 0 | saveTopTransactionContext = TopTransactionContext; |
734 | 0 | saveActivePortal = ActivePortal; |
735 | 0 | saveResourceOwner = CurrentResourceOwner; |
736 | 0 | savePortalContext = PortalContext; |
737 | 0 | saveMemoryContext = CurrentMemoryContext; |
738 | 0 | PG_TRY(); |
739 | 0 | { |
740 | 0 | ActivePortal = portal; |
741 | 0 | if (portal->resowner) |
742 | 0 | CurrentResourceOwner = portal->resowner; |
743 | 0 | PortalContext = portal->portalContext; |
744 | |
|
745 | 0 | MemoryContextSwitchTo(PortalContext); |
746 | |
|
747 | 0 | switch (portal->strategy) |
748 | 0 | { |
749 | 0 | case PORTAL_ONE_SELECT: |
750 | 0 | case PORTAL_ONE_RETURNING: |
751 | 0 | case PORTAL_ONE_MOD_WITH: |
752 | 0 | case PORTAL_UTIL_SELECT: |
753 | | |
754 | | /* |
755 | | * If we have not yet run the command, do so, storing its |
756 | | * results in the portal's tuplestore. But we don't do that |
757 | | * for the PORTAL_ONE_SELECT case. |
758 | | */ |
759 | 0 | if (portal->strategy != PORTAL_ONE_SELECT && !portal->holdStore) |
760 | 0 | FillPortalStore(portal, isTopLevel); |
761 | | |
762 | | /* |
763 | | * Now fetch desired portion of results. |
764 | | */ |
765 | 0 | nprocessed = PortalRunSelect(portal, true, count, dest); |
766 | | |
767 | | /* |
768 | | * If the portal result contains a command tag and the caller |
769 | | * gave us a pointer to store it, copy it and update the |
770 | | * rowcount. |
771 | | */ |
772 | 0 | if (qc && portal->qc.commandTag != CMDTAG_UNKNOWN) |
773 | 0 | { |
774 | 0 | CopyQueryCompletion(qc, &portal->qc); |
775 | 0 | qc->nprocessed = nprocessed; |
776 | 0 | } |
777 | | |
778 | | /* Mark portal not active */ |
779 | 0 | portal->status = PORTAL_READY; |
780 | | |
781 | | /* |
782 | | * Since it's a forward fetch, say DONE iff atEnd is now true. |
783 | | */ |
784 | 0 | result = portal->atEnd; |
785 | 0 | break; |
786 | | |
787 | 0 | case PORTAL_MULTI_QUERY: |
788 | 0 | PortalRunMulti(portal, isTopLevel, false, |
789 | 0 | dest, altdest, qc); |
790 | | |
791 | | /* Prevent portal's commands from being re-executed */ |
792 | 0 | MarkPortalDone(portal); |
793 | | |
794 | | /* Always complete at end of RunMulti */ |
795 | 0 | result = true; |
796 | 0 | break; |
797 | | |
798 | 0 | default: |
799 | 0 | elog(ERROR, "unrecognized portal strategy: %d", |
800 | 0 | (int) portal->strategy); |
801 | 0 | result = false; /* keep compiler quiet */ |
802 | 0 | break; |
803 | 0 | } |
804 | 0 | } |
805 | 0 | PG_CATCH(); |
806 | 0 | { |
807 | | /* Uncaught error while executing portal: mark it dead */ |
808 | 0 | MarkPortalFailed(portal); |
809 | | |
810 | | /* Restore global vars and propagate error */ |
811 | 0 | if (saveMemoryContext == saveTopTransactionContext) |
812 | 0 | MemoryContextSwitchTo(TopTransactionContext); |
813 | 0 | else |
814 | 0 | MemoryContextSwitchTo(saveMemoryContext); |
815 | 0 | ActivePortal = saveActivePortal; |
816 | 0 | if (saveResourceOwner == saveTopTransactionResourceOwner) |
817 | 0 | CurrentResourceOwner = TopTransactionResourceOwner; |
818 | 0 | else |
819 | 0 | CurrentResourceOwner = saveResourceOwner; |
820 | 0 | PortalContext = savePortalContext; |
821 | |
|
822 | 0 | PG_RE_THROW(); |
823 | 0 | } |
824 | 0 | PG_END_TRY(); |
825 | | |
826 | 0 | if (saveMemoryContext == saveTopTransactionContext) |
827 | 0 | MemoryContextSwitchTo(TopTransactionContext); |
828 | 0 | else |
829 | 0 | MemoryContextSwitchTo(saveMemoryContext); |
830 | 0 | ActivePortal = saveActivePortal; |
831 | 0 | if (saveResourceOwner == saveTopTransactionResourceOwner) |
832 | 0 | CurrentResourceOwner = TopTransactionResourceOwner; |
833 | 0 | else |
834 | 0 | CurrentResourceOwner = saveResourceOwner; |
835 | 0 | PortalContext = savePortalContext; |
836 | |
|
837 | 0 | if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY) |
838 | 0 | ShowUsage("EXECUTOR STATISTICS"); |
839 | |
|
840 | 0 | TRACE_POSTGRESQL_QUERY_EXECUTE_DONE(); |
841 | |
|
842 | 0 | return result; |
843 | 0 | } |
844 | | |
845 | | /* |
846 | | * PortalRunSelect |
847 | | * Execute a portal's query in PORTAL_ONE_SELECT mode, and also |
848 | | * when fetching from a completed holdStore in PORTAL_ONE_RETURNING, |
849 | | * PORTAL_ONE_MOD_WITH, and PORTAL_UTIL_SELECT cases. |
850 | | * |
851 | | * This handles simple N-rows-forward-or-backward cases. For more complex |
852 | | * nonsequential access to a portal, see PortalRunFetch. |
853 | | * |
854 | | * count <= 0 is interpreted as a no-op: the destination gets started up |
855 | | * and shut down, but nothing else happens. Also, count == FETCH_ALL is |
856 | | * interpreted as "all rows". (cf FetchStmt.howMany) |
857 | | * |
858 | | * Caller must already have validated the Portal and done appropriate |
859 | | * setup (cf. PortalRun). |
860 | | * |
861 | | * Returns number of rows processed (suitable for use in result tag) |
862 | | */ |
863 | | static uint64 |
864 | | PortalRunSelect(Portal portal, |
865 | | bool forward, |
866 | | long count, |
867 | | DestReceiver *dest) |
868 | 0 | { |
869 | 0 | QueryDesc *queryDesc; |
870 | 0 | ScanDirection direction; |
871 | 0 | uint64 nprocessed; |
872 | | |
873 | | /* |
874 | | * NB: queryDesc will be NULL if we are fetching from a held cursor or a |
875 | | * completed utility query; can't use it in that path. |
876 | | */ |
877 | 0 | queryDesc = portal->queryDesc; |
878 | | |
879 | | /* Caller messed up if we have neither a ready query nor held data. */ |
880 | 0 | Assert(queryDesc || portal->holdStore); |
881 | | |
882 | | /* |
883 | | * Force the queryDesc destination to the right thing. This supports |
884 | | * MOVE, for example, which will pass in dest = DestNone. This is okay to |
885 | | * change as long as we do it on every fetch. (The Executor must not |
886 | | * assume that dest never changes.) |
887 | | */ |
888 | 0 | if (queryDesc) |
889 | 0 | queryDesc->dest = dest; |
890 | | |
891 | | /* |
892 | | * Determine which direction to go in, and check to see if we're already |
893 | | * at the end of the available tuples in that direction. If so, set the |
894 | | * direction to NoMovement to avoid trying to fetch any tuples. (This |
895 | | * check exists because not all plan node types are robust about being |
896 | | * called again if they've already returned NULL once.) Then call the |
897 | | * executor (we must not skip this, because the destination needs to see a |
898 | | * setup and shutdown even if no tuples are available). Finally, update |
899 | | * the portal position state depending on the number of tuples that were |
900 | | * retrieved. |
901 | | */ |
902 | 0 | if (forward) |
903 | 0 | { |
904 | 0 | if (portal->atEnd || count <= 0) |
905 | 0 | { |
906 | 0 | direction = NoMovementScanDirection; |
907 | 0 | count = 0; /* don't pass negative count to executor */ |
908 | 0 | } |
909 | 0 | else |
910 | 0 | direction = ForwardScanDirection; |
911 | | |
912 | | /* In the executor, zero count processes all rows */ |
913 | 0 | if (count == FETCH_ALL) |
914 | 0 | count = 0; |
915 | |
|
916 | 0 | if (portal->holdStore) |
917 | 0 | nprocessed = RunFromStore(portal, direction, (uint64) count, dest); |
918 | 0 | else |
919 | 0 | { |
920 | 0 | PushActiveSnapshot(queryDesc->snapshot); |
921 | 0 | ExecutorRun(queryDesc, direction, (uint64) count); |
922 | 0 | nprocessed = queryDesc->estate->es_processed; |
923 | 0 | PopActiveSnapshot(); |
924 | 0 | } |
925 | |
|
926 | 0 | if (!ScanDirectionIsNoMovement(direction)) |
927 | 0 | { |
928 | 0 | if (nprocessed > 0) |
929 | 0 | portal->atStart = false; /* OK to go backward now */ |
930 | 0 | if (count == 0 || nprocessed < (uint64) count) |
931 | 0 | portal->atEnd = true; /* we retrieved 'em all */ |
932 | 0 | portal->portalPos += nprocessed; |
933 | 0 | } |
934 | 0 | } |
935 | 0 | else |
936 | 0 | { |
937 | 0 | if (portal->cursorOptions & CURSOR_OPT_NO_SCROLL) |
938 | 0 | ereport(ERROR, |
939 | 0 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
940 | 0 | errmsg("cursor can only scan forward"), |
941 | 0 | errhint("Declare it with SCROLL option to enable backward scan."))); |
942 | | |
943 | 0 | if (portal->atStart || count <= 0) |
944 | 0 | { |
945 | 0 | direction = NoMovementScanDirection; |
946 | 0 | count = 0; /* don't pass negative count to executor */ |
947 | 0 | } |
948 | 0 | else |
949 | 0 | direction = BackwardScanDirection; |
950 | | |
951 | | /* In the executor, zero count processes all rows */ |
952 | 0 | if (count == FETCH_ALL) |
953 | 0 | count = 0; |
954 | |
|
955 | 0 | if (portal->holdStore) |
956 | 0 | nprocessed = RunFromStore(portal, direction, (uint64) count, dest); |
957 | 0 | else |
958 | 0 | { |
959 | 0 | PushActiveSnapshot(queryDesc->snapshot); |
960 | 0 | ExecutorRun(queryDesc, direction, (uint64) count); |
961 | 0 | nprocessed = queryDesc->estate->es_processed; |
962 | 0 | PopActiveSnapshot(); |
963 | 0 | } |
964 | |
|
965 | 0 | if (!ScanDirectionIsNoMovement(direction)) |
966 | 0 | { |
967 | 0 | if (nprocessed > 0 && portal->atEnd) |
968 | 0 | { |
969 | 0 | portal->atEnd = false; /* OK to go forward now */ |
970 | 0 | portal->portalPos++; /* adjust for endpoint case */ |
971 | 0 | } |
972 | 0 | if (count == 0 || nprocessed < (uint64) count) |
973 | 0 | { |
974 | 0 | portal->atStart = true; /* we retrieved 'em all */ |
975 | 0 | portal->portalPos = 0; |
976 | 0 | } |
977 | 0 | else |
978 | 0 | { |
979 | 0 | portal->portalPos -= nprocessed; |
980 | 0 | } |
981 | 0 | } |
982 | 0 | } |
983 | | |
984 | 0 | return nprocessed; |
985 | 0 | } |
986 | | |
987 | | /* |
988 | | * FillPortalStore |
989 | | * Run the query and load result tuples into the portal's tuple store. |
990 | | * |
991 | | * This is used for PORTAL_ONE_RETURNING, PORTAL_ONE_MOD_WITH, and |
992 | | * PORTAL_UTIL_SELECT cases only. |
993 | | */ |
994 | | static void |
995 | | FillPortalStore(Portal portal, bool isTopLevel) |
996 | 0 | { |
997 | 0 | DestReceiver *treceiver; |
998 | 0 | QueryCompletion qc; |
999 | |
|
1000 | 0 | InitializeQueryCompletion(&qc); |
1001 | 0 | PortalCreateHoldStore(portal); |
1002 | 0 | treceiver = CreateDestReceiver(DestTuplestore); |
1003 | 0 | SetTuplestoreDestReceiverParams(treceiver, |
1004 | 0 | portal->holdStore, |
1005 | 0 | portal->holdContext, |
1006 | 0 | false, |
1007 | 0 | NULL, |
1008 | 0 | NULL); |
1009 | |
|
1010 | 0 | switch (portal->strategy) |
1011 | 0 | { |
1012 | 0 | case PORTAL_ONE_RETURNING: |
1013 | 0 | case PORTAL_ONE_MOD_WITH: |
1014 | | |
1015 | | /* |
1016 | | * Run the portal to completion just as for the default |
1017 | | * PORTAL_MULTI_QUERY case, but send the primary query's output to |
1018 | | * the tuplestore. Auxiliary query outputs are discarded. Set the |
1019 | | * portal's holdSnapshot to the snapshot used (or a copy of it). |
1020 | | */ |
1021 | 0 | PortalRunMulti(portal, isTopLevel, true, |
1022 | 0 | treceiver, None_Receiver, &qc); |
1023 | 0 | break; |
1024 | | |
1025 | 0 | case PORTAL_UTIL_SELECT: |
1026 | 0 | PortalRunUtility(portal, linitial_node(PlannedStmt, portal->stmts), |
1027 | 0 | isTopLevel, true, treceiver, &qc); |
1028 | 0 | break; |
1029 | | |
1030 | 0 | default: |
1031 | 0 | elog(ERROR, "unsupported portal strategy: %d", |
1032 | 0 | (int) portal->strategy); |
1033 | 0 | break; |
1034 | 0 | } |
1035 | | |
1036 | | /* Override portal completion data with actual command results */ |
1037 | 0 | if (qc.commandTag != CMDTAG_UNKNOWN) |
1038 | 0 | CopyQueryCompletion(&portal->qc, &qc); |
1039 | |
|
1040 | 0 | treceiver->rDestroy(treceiver); |
1041 | 0 | } |
1042 | | |
1043 | | /* |
1044 | | * RunFromStore |
1045 | | * Fetch tuples from the portal's tuple store. |
1046 | | * |
1047 | | * Calling conventions are similar to ExecutorRun, except that we |
1048 | | * do not depend on having a queryDesc or estate. Therefore we return the |
1049 | | * number of tuples processed as the result, not in estate->es_processed. |
1050 | | * |
1051 | | * One difference from ExecutorRun is that the destination receiver functions |
1052 | | * are run in the caller's memory context (since we have no estate). Watch |
1053 | | * out for memory leaks. |
1054 | | */ |
1055 | | static uint64 |
1056 | | RunFromStore(Portal portal, ScanDirection direction, uint64 count, |
1057 | | DestReceiver *dest) |
1058 | 0 | { |
1059 | 0 | uint64 current_tuple_count = 0; |
1060 | 0 | TupleTableSlot *slot; |
1061 | |
|
1062 | 0 | slot = MakeSingleTupleTableSlot(portal->tupDesc, &TTSOpsMinimalTuple); |
1063 | |
|
1064 | 0 | dest->rStartup(dest, CMD_SELECT, portal->tupDesc); |
1065 | |
|
1066 | 0 | if (ScanDirectionIsNoMovement(direction)) |
1067 | 0 | { |
1068 | | /* do nothing except start/stop the destination */ |
1069 | 0 | } |
1070 | 0 | else |
1071 | 0 | { |
1072 | 0 | bool forward = ScanDirectionIsForward(direction); |
1073 | |
|
1074 | 0 | for (;;) |
1075 | 0 | { |
1076 | 0 | MemoryContext oldcontext; |
1077 | 0 | bool ok; |
1078 | |
|
1079 | 0 | oldcontext = MemoryContextSwitchTo(portal->holdContext); |
1080 | |
|
1081 | 0 | ok = tuplestore_gettupleslot(portal->holdStore, forward, false, |
1082 | 0 | slot); |
1083 | |
|
1084 | 0 | MemoryContextSwitchTo(oldcontext); |
1085 | |
|
1086 | 0 | if (!ok) |
1087 | 0 | break; |
1088 | | |
1089 | | /* |
1090 | | * If we are not able to send the tuple, we assume the destination |
1091 | | * has closed and no more tuples can be sent. If that's the case, |
1092 | | * end the loop. |
1093 | | */ |
1094 | 0 | if (!dest->receiveSlot(slot, dest)) |
1095 | 0 | break; |
1096 | | |
1097 | 0 | ExecClearTuple(slot); |
1098 | | |
1099 | | /* |
1100 | | * check our tuple count.. if we've processed the proper number |
1101 | | * then quit, else loop again and process more tuples. Zero count |
1102 | | * means no limit. |
1103 | | */ |
1104 | 0 | current_tuple_count++; |
1105 | 0 | if (count && count == current_tuple_count) |
1106 | 0 | break; |
1107 | 0 | } |
1108 | 0 | } |
1109 | |
|
1110 | 0 | dest->rShutdown(dest); |
1111 | |
|
1112 | 0 | ExecDropSingleTupleTableSlot(slot); |
1113 | |
|
1114 | 0 | return current_tuple_count; |
1115 | 0 | } |
1116 | | |
1117 | | /* |
1118 | | * PortalRunUtility |
1119 | | * Execute a utility statement inside a portal. |
1120 | | */ |
1121 | | static void |
1122 | | PortalRunUtility(Portal portal, PlannedStmt *pstmt, |
1123 | | bool isTopLevel, bool setHoldSnapshot, |
1124 | | DestReceiver *dest, QueryCompletion *qc) |
1125 | 0 | { |
1126 | | /* |
1127 | | * Set snapshot if utility stmt needs one. |
1128 | | */ |
1129 | 0 | if (PlannedStmtRequiresSnapshot(pstmt)) |
1130 | 0 | { |
1131 | 0 | Snapshot snapshot = GetTransactionSnapshot(); |
1132 | | |
1133 | | /* If told to, register the snapshot we're using and save in portal */ |
1134 | 0 | if (setHoldSnapshot) |
1135 | 0 | { |
1136 | 0 | snapshot = RegisterSnapshot(snapshot); |
1137 | 0 | portal->holdSnapshot = snapshot; |
1138 | 0 | } |
1139 | | |
1140 | | /* |
1141 | | * In any case, make the snapshot active and remember it in portal. |
1142 | | * Because the portal now references the snapshot, we must tell |
1143 | | * snapmgr.c that the snapshot belongs to the portal's transaction |
1144 | | * level, else we risk portalSnapshot becoming a dangling pointer. |
1145 | | */ |
1146 | 0 | PushActiveSnapshotWithLevel(snapshot, portal->createLevel); |
1147 | | /* PushActiveSnapshotWithLevel might have copied the snapshot */ |
1148 | 0 | portal->portalSnapshot = GetActiveSnapshot(); |
1149 | 0 | } |
1150 | 0 | else |
1151 | 0 | portal->portalSnapshot = NULL; |
1152 | |
|
1153 | 0 | ProcessUtility(pstmt, |
1154 | 0 | portal->sourceText, |
1155 | 0 | (portal->cplan != NULL), /* protect tree if in plancache */ |
1156 | 0 | isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY, |
1157 | 0 | portal->portalParams, |
1158 | 0 | portal->queryEnv, |
1159 | 0 | dest, |
1160 | 0 | qc); |
1161 | | |
1162 | | /* Some utility statements may change context on us */ |
1163 | 0 | MemoryContextSwitchTo(portal->portalContext); |
1164 | | |
1165 | | /* |
1166 | | * Some utility commands (e.g., VACUUM) pop the ActiveSnapshot stack from |
1167 | | * under us, so don't complain if it's now empty. Otherwise, our snapshot |
1168 | | * should be the top one; pop it. Note that this could be a different |
1169 | | * snapshot from the one we made above; see EnsurePortalSnapshotExists. |
1170 | | */ |
1171 | 0 | if (portal->portalSnapshot != NULL && ActiveSnapshotSet()) |
1172 | 0 | { |
1173 | 0 | Assert(portal->portalSnapshot == GetActiveSnapshot()); |
1174 | 0 | PopActiveSnapshot(); |
1175 | 0 | } |
1176 | 0 | portal->portalSnapshot = NULL; |
1177 | 0 | } |
1178 | | |
1179 | | /* |
1180 | | * PortalRunMulti |
1181 | | * Execute a portal's queries in the general case (multi queries |
1182 | | * or non-SELECT-like queries) |
1183 | | */ |
1184 | | static void |
1185 | | PortalRunMulti(Portal portal, |
1186 | | bool isTopLevel, bool setHoldSnapshot, |
1187 | | DestReceiver *dest, DestReceiver *altdest, |
1188 | | QueryCompletion *qc) |
1189 | 0 | { |
1190 | 0 | bool active_snapshot_set = false; |
1191 | 0 | ListCell *stmtlist_item; |
1192 | | |
1193 | | /* |
1194 | | * If the destination is DestRemoteExecute, change to DestNone. The |
1195 | | * reason is that the client won't be expecting any tuples, and indeed has |
1196 | | * no way to know what they are, since there is no provision for Describe |
1197 | | * to send a RowDescription message when this portal execution strategy is |
1198 | | * in effect. This presently will only affect SELECT commands added to |
1199 | | * non-SELECT queries by rewrite rules: such commands will be executed, |
1200 | | * but the results will be discarded unless you use "simple Query" |
1201 | | * protocol. |
1202 | | */ |
1203 | 0 | if (dest->mydest == DestRemoteExecute) |
1204 | 0 | dest = None_Receiver; |
1205 | 0 | if (altdest->mydest == DestRemoteExecute) |
1206 | 0 | altdest = None_Receiver; |
1207 | | |
1208 | | /* |
1209 | | * Loop to handle the individual queries generated from a single parsetree |
1210 | | * by analysis and rewrite. |
1211 | | */ |
1212 | 0 | foreach(stmtlist_item, portal->stmts) |
1213 | 0 | { |
1214 | 0 | PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item); |
1215 | | |
1216 | | /* |
1217 | | * If we got a cancel signal in prior command, quit |
1218 | | */ |
1219 | 0 | CHECK_FOR_INTERRUPTS(); |
1220 | |
|
1221 | 0 | if (pstmt->utilityStmt == NULL) |
1222 | 0 | { |
1223 | | /* |
1224 | | * process a plannable query. |
1225 | | */ |
1226 | 0 | TRACE_POSTGRESQL_QUERY_EXECUTE_START(); |
1227 | |
|
1228 | 0 | if (log_executor_stats) |
1229 | 0 | ResetUsage(); |
1230 | | |
1231 | | /* |
1232 | | * Must always have a snapshot for plannable queries. First time |
1233 | | * through, take a new snapshot; for subsequent queries in the |
1234 | | * same portal, just update the snapshot's copy of the command |
1235 | | * counter. |
1236 | | */ |
1237 | 0 | if (!active_snapshot_set) |
1238 | 0 | { |
1239 | 0 | Snapshot snapshot = GetTransactionSnapshot(); |
1240 | | |
1241 | | /* If told to, register the snapshot and save in portal */ |
1242 | 0 | if (setHoldSnapshot) |
1243 | 0 | { |
1244 | 0 | snapshot = RegisterSnapshot(snapshot); |
1245 | 0 | portal->holdSnapshot = snapshot; |
1246 | 0 | } |
1247 | | |
1248 | | /* |
1249 | | * We can't have the holdSnapshot also be the active one, |
1250 | | * because UpdateActiveSnapshotCommandId would complain. So |
1251 | | * force an extra snapshot copy. Plain PushActiveSnapshot |
1252 | | * would have copied the transaction snapshot anyway, so this |
1253 | | * only adds a copy step when setHoldSnapshot is true. (It's |
1254 | | * okay for the command ID of the active snapshot to diverge |
1255 | | * from what holdSnapshot has.) |
1256 | | */ |
1257 | 0 | PushCopiedSnapshot(snapshot); |
1258 | | |
1259 | | /* |
1260 | | * As for PORTAL_ONE_SELECT portals, it does not seem |
1261 | | * necessary to maintain portal->portalSnapshot here. |
1262 | | */ |
1263 | |
|
1264 | 0 | active_snapshot_set = true; |
1265 | 0 | } |
1266 | 0 | else |
1267 | 0 | UpdateActiveSnapshotCommandId(); |
1268 | |
|
1269 | 0 | if (pstmt->canSetTag) |
1270 | 0 | { |
1271 | | /* statement can set tag string */ |
1272 | 0 | ProcessQuery(pstmt, |
1273 | 0 | portal->sourceText, |
1274 | 0 | portal->portalParams, |
1275 | 0 | portal->queryEnv, |
1276 | 0 | dest, qc); |
1277 | 0 | } |
1278 | 0 | else |
1279 | 0 | { |
1280 | | /* stmt added by rewrite cannot set tag */ |
1281 | 0 | ProcessQuery(pstmt, |
1282 | 0 | portal->sourceText, |
1283 | 0 | portal->portalParams, |
1284 | 0 | portal->queryEnv, |
1285 | 0 | altdest, NULL); |
1286 | 0 | } |
1287 | |
|
1288 | 0 | if (log_executor_stats) |
1289 | 0 | ShowUsage("EXECUTOR STATISTICS"); |
1290 | |
|
1291 | 0 | TRACE_POSTGRESQL_QUERY_EXECUTE_DONE(); |
1292 | 0 | } |
1293 | 0 | else |
1294 | 0 | { |
1295 | | /* |
1296 | | * process utility functions (create, destroy, etc..) |
1297 | | * |
1298 | | * We must not set a snapshot here for utility commands (if one is |
1299 | | * needed, PortalRunUtility will do it). If a utility command is |
1300 | | * alone in a portal then everything's fine. The only case where |
1301 | | * a utility command can be part of a longer list is that rules |
1302 | | * are allowed to include NotifyStmt. NotifyStmt doesn't care |
1303 | | * whether it has a snapshot or not, so we just leave the current |
1304 | | * snapshot alone if we have one. |
1305 | | */ |
1306 | 0 | if (pstmt->canSetTag) |
1307 | 0 | { |
1308 | 0 | Assert(!active_snapshot_set); |
1309 | | /* statement can set tag string */ |
1310 | 0 | PortalRunUtility(portal, pstmt, isTopLevel, false, |
1311 | 0 | dest, qc); |
1312 | 0 | } |
1313 | 0 | else |
1314 | 0 | { |
1315 | 0 | Assert(IsA(pstmt->utilityStmt, NotifyStmt)); |
1316 | | /* stmt added by rewrite cannot set tag */ |
1317 | 0 | PortalRunUtility(portal, pstmt, isTopLevel, false, |
1318 | 0 | altdest, NULL); |
1319 | 0 | } |
1320 | 0 | } |
1321 | | |
1322 | | /* |
1323 | | * Clear subsidiary contexts to recover temporary memory. |
1324 | | */ |
1325 | 0 | Assert(portal->portalContext == CurrentMemoryContext); |
1326 | |
|
1327 | 0 | MemoryContextDeleteChildren(portal->portalContext); |
1328 | | |
1329 | | /* |
1330 | | * Avoid crashing if portal->stmts has been reset. This can only |
1331 | | * occur if a CALL or DO utility statement executed an internal |
1332 | | * COMMIT/ROLLBACK (cf PortalReleaseCachedPlan). The CALL or DO must |
1333 | | * have been the only statement in the portal, so there's nothing left |
1334 | | * for us to do; but we don't want to dereference a now-dangling list |
1335 | | * pointer. |
1336 | | */ |
1337 | 0 | if (portal->stmts == NIL) |
1338 | 0 | break; |
1339 | | |
1340 | | /* |
1341 | | * Increment command counter between queries, but not after the last |
1342 | | * one. |
1343 | | */ |
1344 | 0 | if (lnext(portal->stmts, stmtlist_item) != NULL) |
1345 | 0 | CommandCounterIncrement(); |
1346 | 0 | } |
1347 | | |
1348 | | /* Pop the snapshot if we pushed one. */ |
1349 | 0 | if (active_snapshot_set) |
1350 | 0 | PopActiveSnapshot(); |
1351 | | |
1352 | | /* |
1353 | | * If a command tag was requested and we did not fill in a run-time- |
1354 | | * determined tag above, copy the parse-time tag from the Portal. (There |
1355 | | * might not be any tag there either, in edge cases such as empty prepared |
1356 | | * statements. That's OK.) |
1357 | | */ |
1358 | 0 | if (qc && |
1359 | 0 | qc->commandTag == CMDTAG_UNKNOWN && |
1360 | 0 | portal->qc.commandTag != CMDTAG_UNKNOWN) |
1361 | 0 | CopyQueryCompletion(qc, &portal->qc); |
1362 | 0 | } |
1363 | | |
1364 | | /* |
1365 | | * PortalRunFetch |
1366 | | * Variant form of PortalRun that supports SQL FETCH directions. |
1367 | | * |
1368 | | * Note: we presently assume that no callers of this want isTopLevel = true. |
1369 | | * |
1370 | | * count <= 0 is interpreted as a no-op: the destination gets started up |
1371 | | * and shut down, but nothing else happens. Also, count == FETCH_ALL is |
1372 | | * interpreted as "all rows". (cf FetchStmt.howMany) |
1373 | | * |
1374 | | * Returns number of rows processed (suitable for use in result tag) |
1375 | | */ |
1376 | | uint64 |
1377 | | PortalRunFetch(Portal portal, |
1378 | | FetchDirection fdirection, |
1379 | | long count, |
1380 | | DestReceiver *dest) |
1381 | 0 | { |
1382 | 0 | uint64 result; |
1383 | 0 | Portal saveActivePortal; |
1384 | 0 | ResourceOwner saveResourceOwner; |
1385 | 0 | MemoryContext savePortalContext; |
1386 | 0 | MemoryContext oldContext; |
1387 | |
|
1388 | 0 | Assert(PortalIsValid(portal)); |
1389 | | |
1390 | | /* |
1391 | | * Check for improper portal use, and mark portal active. |
1392 | | */ |
1393 | 0 | MarkPortalActive(portal); |
1394 | | |
1395 | | /* |
1396 | | * Set up global portal context pointers. |
1397 | | */ |
1398 | 0 | saveActivePortal = ActivePortal; |
1399 | 0 | saveResourceOwner = CurrentResourceOwner; |
1400 | 0 | savePortalContext = PortalContext; |
1401 | 0 | PG_TRY(); |
1402 | 0 | { |
1403 | 0 | ActivePortal = portal; |
1404 | 0 | if (portal->resowner) |
1405 | 0 | CurrentResourceOwner = portal->resowner; |
1406 | 0 | PortalContext = portal->portalContext; |
1407 | |
|
1408 | 0 | oldContext = MemoryContextSwitchTo(PortalContext); |
1409 | |
|
1410 | 0 | switch (portal->strategy) |
1411 | 0 | { |
1412 | 0 | case PORTAL_ONE_SELECT: |
1413 | 0 | result = DoPortalRunFetch(portal, fdirection, count, dest); |
1414 | 0 | break; |
1415 | | |
1416 | 0 | case PORTAL_ONE_RETURNING: |
1417 | 0 | case PORTAL_ONE_MOD_WITH: |
1418 | 0 | case PORTAL_UTIL_SELECT: |
1419 | | |
1420 | | /* |
1421 | | * If we have not yet run the command, do so, storing its |
1422 | | * results in the portal's tuplestore. |
1423 | | */ |
1424 | 0 | if (!portal->holdStore) |
1425 | 0 | FillPortalStore(portal, false /* isTopLevel */ ); |
1426 | | |
1427 | | /* |
1428 | | * Now fetch desired portion of results. |
1429 | | */ |
1430 | 0 | result = DoPortalRunFetch(portal, fdirection, count, dest); |
1431 | 0 | break; |
1432 | | |
1433 | 0 | default: |
1434 | 0 | elog(ERROR, "unsupported portal strategy"); |
1435 | 0 | result = 0; /* keep compiler quiet */ |
1436 | 0 | break; |
1437 | 0 | } |
1438 | 0 | } |
1439 | 0 | PG_CATCH(); |
1440 | 0 | { |
1441 | | /* Uncaught error while executing portal: mark it dead */ |
1442 | 0 | MarkPortalFailed(portal); |
1443 | | |
1444 | | /* Restore global vars and propagate error */ |
1445 | 0 | ActivePortal = saveActivePortal; |
1446 | 0 | CurrentResourceOwner = saveResourceOwner; |
1447 | 0 | PortalContext = savePortalContext; |
1448 | |
|
1449 | 0 | PG_RE_THROW(); |
1450 | 0 | } |
1451 | 0 | PG_END_TRY(); |
1452 | | |
1453 | 0 | MemoryContextSwitchTo(oldContext); |
1454 | | |
1455 | | /* Mark portal not active */ |
1456 | 0 | portal->status = PORTAL_READY; |
1457 | |
|
1458 | 0 | ActivePortal = saveActivePortal; |
1459 | 0 | CurrentResourceOwner = saveResourceOwner; |
1460 | 0 | PortalContext = savePortalContext; |
1461 | |
|
1462 | 0 | return result; |
1463 | 0 | } |
1464 | | |
1465 | | /* |
1466 | | * DoPortalRunFetch |
1467 | | * Guts of PortalRunFetch --- the portal context is already set up |
1468 | | * |
1469 | | * Here, count < 0 typically reverses the direction. Also, count == FETCH_ALL |
1470 | | * is interpreted as "all rows". (cf FetchStmt.howMany) |
1471 | | * |
1472 | | * Returns number of rows processed (suitable for use in result tag) |
1473 | | */ |
1474 | | static uint64 |
1475 | | DoPortalRunFetch(Portal portal, |
1476 | | FetchDirection fdirection, |
1477 | | long count, |
1478 | | DestReceiver *dest) |
1479 | 0 | { |
1480 | 0 | bool forward; |
1481 | |
|
1482 | 0 | Assert(portal->strategy == PORTAL_ONE_SELECT || |
1483 | 0 | portal->strategy == PORTAL_ONE_RETURNING || |
1484 | 0 | portal->strategy == PORTAL_ONE_MOD_WITH || |
1485 | 0 | portal->strategy == PORTAL_UTIL_SELECT); |
1486 | | |
1487 | | /* |
1488 | | * Note: we disallow backwards fetch (including re-fetch of current row) |
1489 | | * for NO SCROLL cursors, but we interpret that very loosely: you can use |
1490 | | * any of the FetchDirection options, so long as the end result is to move |
1491 | | * forwards by at least one row. Currently it's sufficient to check for |
1492 | | * NO SCROLL in DoPortalRewind() and in the forward == false path in |
1493 | | * PortalRunSelect(); but someday we might prefer to account for that |
1494 | | * restriction explicitly here. |
1495 | | */ |
1496 | 0 | switch (fdirection) |
1497 | 0 | { |
1498 | 0 | case FETCH_FORWARD: |
1499 | 0 | if (count < 0) |
1500 | 0 | { |
1501 | 0 | fdirection = FETCH_BACKWARD; |
1502 | 0 | count = -count; |
1503 | 0 | } |
1504 | | /* fall out of switch to share code with FETCH_BACKWARD */ |
1505 | 0 | break; |
1506 | 0 | case FETCH_BACKWARD: |
1507 | 0 | if (count < 0) |
1508 | 0 | { |
1509 | 0 | fdirection = FETCH_FORWARD; |
1510 | 0 | count = -count; |
1511 | 0 | } |
1512 | | /* fall out of switch to share code with FETCH_FORWARD */ |
1513 | 0 | break; |
1514 | 0 | case FETCH_ABSOLUTE: |
1515 | 0 | if (count > 0) |
1516 | 0 | { |
1517 | | /* |
1518 | | * Definition: Rewind to start, advance count-1 rows, return |
1519 | | * next row (if any). |
1520 | | * |
1521 | | * In practice, if the goal is less than halfway back to the |
1522 | | * start, it's better to scan from where we are. |
1523 | | * |
1524 | | * Also, if current portalPos is outside the range of "long", |
1525 | | * do it the hard way to avoid possible overflow of the count |
1526 | | * argument to PortalRunSelect. We must exclude exactly |
1527 | | * LONG_MAX, as well, lest the count look like FETCH_ALL. |
1528 | | * |
1529 | | * In any case, we arrange to fetch the target row going |
1530 | | * forwards. |
1531 | | */ |
1532 | 0 | if ((uint64) (count - 1) <= portal->portalPos / 2 || |
1533 | 0 | portal->portalPos >= (uint64) LONG_MAX) |
1534 | 0 | { |
1535 | 0 | DoPortalRewind(portal); |
1536 | 0 | if (count > 1) |
1537 | 0 | PortalRunSelect(portal, true, count - 1, |
1538 | 0 | None_Receiver); |
1539 | 0 | } |
1540 | 0 | else |
1541 | 0 | { |
1542 | 0 | long pos = (long) portal->portalPos; |
1543 | |
|
1544 | 0 | if (portal->atEnd) |
1545 | 0 | pos++; /* need one extra fetch if off end */ |
1546 | 0 | if (count <= pos) |
1547 | 0 | PortalRunSelect(portal, false, pos - count + 1, |
1548 | 0 | None_Receiver); |
1549 | 0 | else if (count > pos + 1) |
1550 | 0 | PortalRunSelect(portal, true, count - pos - 1, |
1551 | 0 | None_Receiver); |
1552 | 0 | } |
1553 | 0 | return PortalRunSelect(portal, true, 1L, dest); |
1554 | 0 | } |
1555 | 0 | else if (count < 0) |
1556 | 0 | { |
1557 | | /* |
1558 | | * Definition: Advance to end, back up abs(count)-1 rows, |
1559 | | * return prior row (if any). We could optimize this if we |
1560 | | * knew in advance where the end was, but typically we won't. |
1561 | | * (Is it worth considering case where count > half of size of |
1562 | | * query? We could rewind once we know the size ...) |
1563 | | */ |
1564 | 0 | PortalRunSelect(portal, true, FETCH_ALL, None_Receiver); |
1565 | 0 | if (count < -1) |
1566 | 0 | PortalRunSelect(portal, false, -count - 1, None_Receiver); |
1567 | 0 | return PortalRunSelect(portal, false, 1L, dest); |
1568 | 0 | } |
1569 | 0 | else |
1570 | 0 | { |
1571 | | /* count == 0 */ |
1572 | | /* Rewind to start, return zero rows */ |
1573 | 0 | DoPortalRewind(portal); |
1574 | 0 | return PortalRunSelect(portal, true, 0L, dest); |
1575 | 0 | } |
1576 | 0 | break; |
1577 | 0 | case FETCH_RELATIVE: |
1578 | 0 | if (count > 0) |
1579 | 0 | { |
1580 | | /* |
1581 | | * Definition: advance count-1 rows, return next row (if any). |
1582 | | */ |
1583 | 0 | if (count > 1) |
1584 | 0 | PortalRunSelect(portal, true, count - 1, None_Receiver); |
1585 | 0 | return PortalRunSelect(portal, true, 1L, dest); |
1586 | 0 | } |
1587 | 0 | else if (count < 0) |
1588 | 0 | { |
1589 | | /* |
1590 | | * Definition: back up abs(count)-1 rows, return prior row (if |
1591 | | * any). |
1592 | | */ |
1593 | 0 | if (count < -1) |
1594 | 0 | PortalRunSelect(portal, false, -count - 1, None_Receiver); |
1595 | 0 | return PortalRunSelect(portal, false, 1L, dest); |
1596 | 0 | } |
1597 | 0 | else |
1598 | 0 | { |
1599 | | /* count == 0 */ |
1600 | | /* Same as FETCH FORWARD 0, so fall out of switch */ |
1601 | 0 | fdirection = FETCH_FORWARD; |
1602 | 0 | } |
1603 | 0 | break; |
1604 | 0 | default: |
1605 | 0 | elog(ERROR, "bogus direction"); |
1606 | 0 | break; |
1607 | 0 | } |
1608 | | |
1609 | | /* |
1610 | | * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and count |
1611 | | * >= 0. |
1612 | | */ |
1613 | 0 | forward = (fdirection == FETCH_FORWARD); |
1614 | | |
1615 | | /* |
1616 | | * Zero count means to re-fetch the current row, if any (per SQL) |
1617 | | */ |
1618 | 0 | if (count == 0) |
1619 | 0 | { |
1620 | 0 | bool on_row; |
1621 | | |
1622 | | /* Are we sitting on a row? */ |
1623 | 0 | on_row = (!portal->atStart && !portal->atEnd); |
1624 | |
|
1625 | 0 | if (dest->mydest == DestNone) |
1626 | 0 | { |
1627 | | /* MOVE 0 returns 0/1 based on if FETCH 0 would return a row */ |
1628 | 0 | return on_row ? 1 : 0; |
1629 | 0 | } |
1630 | 0 | else |
1631 | 0 | { |
1632 | | /* |
1633 | | * If we are sitting on a row, back up one so we can re-fetch it. |
1634 | | * If we are not sitting on a row, we still have to start up and |
1635 | | * shut down the executor so that the destination is initialized |
1636 | | * and shut down correctly; so keep going. To PortalRunSelect, |
1637 | | * count == 0 means we will retrieve no row. |
1638 | | */ |
1639 | 0 | if (on_row) |
1640 | 0 | { |
1641 | 0 | PortalRunSelect(portal, false, 1L, None_Receiver); |
1642 | | /* Set up to fetch one row forward */ |
1643 | 0 | count = 1; |
1644 | 0 | forward = true; |
1645 | 0 | } |
1646 | 0 | } |
1647 | 0 | } |
1648 | | |
1649 | | /* |
1650 | | * Optimize MOVE BACKWARD ALL into a Rewind. |
1651 | | */ |
1652 | 0 | if (!forward && count == FETCH_ALL && dest->mydest == DestNone) |
1653 | 0 | { |
1654 | 0 | uint64 result = portal->portalPos; |
1655 | |
|
1656 | 0 | if (result > 0 && !portal->atEnd) |
1657 | 0 | result--; |
1658 | 0 | DoPortalRewind(portal); |
1659 | 0 | return result; |
1660 | 0 | } |
1661 | | |
1662 | 0 | return PortalRunSelect(portal, forward, count, dest); |
1663 | 0 | } |
1664 | | |
1665 | | /* |
1666 | | * DoPortalRewind - rewind a Portal to starting point |
1667 | | */ |
1668 | | static void |
1669 | | DoPortalRewind(Portal portal) |
1670 | 0 | { |
1671 | 0 | QueryDesc *queryDesc; |
1672 | | |
1673 | | /* |
1674 | | * No work is needed if we've not advanced nor attempted to advance the |
1675 | | * cursor (and we don't want to throw a NO SCROLL error in this case). |
1676 | | */ |
1677 | 0 | if (portal->atStart && !portal->atEnd) |
1678 | 0 | return; |
1679 | | |
1680 | | /* Otherwise, cursor must allow scrolling */ |
1681 | 0 | if (portal->cursorOptions & CURSOR_OPT_NO_SCROLL) |
1682 | 0 | ereport(ERROR, |
1683 | 0 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1684 | 0 | errmsg("cursor can only scan forward"), |
1685 | 0 | errhint("Declare it with SCROLL option to enable backward scan."))); |
1686 | | |
1687 | | /* Rewind holdStore, if we have one */ |
1688 | 0 | if (portal->holdStore) |
1689 | 0 | { |
1690 | 0 | MemoryContext oldcontext; |
1691 | |
|
1692 | 0 | oldcontext = MemoryContextSwitchTo(portal->holdContext); |
1693 | 0 | tuplestore_rescan(portal->holdStore); |
1694 | 0 | MemoryContextSwitchTo(oldcontext); |
1695 | 0 | } |
1696 | | |
1697 | | /* Rewind executor, if active */ |
1698 | 0 | queryDesc = portal->queryDesc; |
1699 | 0 | if (queryDesc) |
1700 | 0 | { |
1701 | 0 | PushActiveSnapshot(queryDesc->snapshot); |
1702 | 0 | ExecutorRewind(queryDesc); |
1703 | 0 | PopActiveSnapshot(); |
1704 | 0 | } |
1705 | |
|
1706 | 0 | portal->atStart = true; |
1707 | 0 | portal->atEnd = false; |
1708 | 0 | portal->portalPos = 0; |
1709 | 0 | } |
1710 | | |
1711 | | /* |
1712 | | * PlannedStmtRequiresSnapshot - what it says on the tin |
1713 | | */ |
1714 | | bool |
1715 | | PlannedStmtRequiresSnapshot(PlannedStmt *pstmt) |
1716 | 0 | { |
1717 | 0 | Node *utilityStmt = pstmt->utilityStmt; |
1718 | | |
1719 | | /* If it's not a utility statement, it definitely needs a snapshot */ |
1720 | 0 | if (utilityStmt == NULL) |
1721 | 0 | return true; |
1722 | | |
1723 | | /* |
1724 | | * Most utility statements need a snapshot, and the default presumption |
1725 | | * about new ones should be that they do too. Hence, enumerate those that |
1726 | | * do not need one. |
1727 | | * |
1728 | | * Transaction control, LOCK, and SET must *not* set a snapshot, since |
1729 | | * they need to be executable at the start of a transaction-snapshot-mode |
1730 | | * transaction without freezing a snapshot. By extension we allow SHOW |
1731 | | * not to set a snapshot. The other stmts listed are just efficiency |
1732 | | * hacks. Beware of listing anything that can modify the database --- if, |
1733 | | * say, it has to update an index with expressions that invoke |
1734 | | * user-defined functions, then it had better have a snapshot. |
1735 | | */ |
1736 | 0 | if (IsA(utilityStmt, TransactionStmt) || |
1737 | 0 | IsA(utilityStmt, LockStmt) || |
1738 | 0 | IsA(utilityStmt, VariableSetStmt) || |
1739 | 0 | IsA(utilityStmt, VariableShowStmt) || |
1740 | 0 | IsA(utilityStmt, ConstraintsSetStmt) || |
1741 | | /* efficiency hacks from here down */ |
1742 | 0 | IsA(utilityStmt, FetchStmt) || |
1743 | 0 | IsA(utilityStmt, ListenStmt) || |
1744 | 0 | IsA(utilityStmt, NotifyStmt) || |
1745 | 0 | IsA(utilityStmt, UnlistenStmt) || |
1746 | 0 | IsA(utilityStmt, CheckPointStmt)) |
1747 | 0 | return false; |
1748 | | |
1749 | 0 | return true; |
1750 | 0 | } |
1751 | | |
1752 | | /* |
1753 | | * EnsurePortalSnapshotExists - recreate Portal-level snapshot, if needed |
1754 | | * |
1755 | | * Generally, we will have an active snapshot whenever we are executing |
1756 | | * inside a Portal, unless the Portal's query is one of the utility |
1757 | | * statements exempted from that rule (see PlannedStmtRequiresSnapshot). |
1758 | | * However, procedures and DO blocks can commit or abort the transaction, |
1759 | | * and thereby destroy all snapshots. This function can be called to |
1760 | | * re-establish the Portal-level snapshot when none exists. |
1761 | | */ |
1762 | | void |
1763 | | EnsurePortalSnapshotExists(void) |
1764 | 0 | { |
1765 | 0 | Portal portal; |
1766 | | |
1767 | | /* |
1768 | | * Nothing to do if a snapshot is set. (We take it on faith that the |
1769 | | * outermost active snapshot belongs to some Portal; or if there is no |
1770 | | * Portal, it's somebody else's responsibility to manage things.) |
1771 | | */ |
1772 | 0 | if (ActiveSnapshotSet()) |
1773 | 0 | return; |
1774 | | |
1775 | | /* Otherwise, we'd better have an active Portal */ |
1776 | 0 | portal = ActivePortal; |
1777 | 0 | if (unlikely(portal == NULL)) |
1778 | 0 | elog(ERROR, "cannot execute SQL without an outer snapshot or portal"); |
1779 | 0 | Assert(portal->portalSnapshot == NULL); |
1780 | | |
1781 | | /* |
1782 | | * Create a new snapshot, make it active, and remember it in portal. |
1783 | | * Because the portal now references the snapshot, we must tell snapmgr.c |
1784 | | * that the snapshot belongs to the portal's transaction level, else we |
1785 | | * risk portalSnapshot becoming a dangling pointer. |
1786 | | */ |
1787 | 0 | PushActiveSnapshotWithLevel(GetTransactionSnapshot(), portal->createLevel); |
1788 | | /* PushActiveSnapshotWithLevel might have copied the snapshot */ |
1789 | 0 | portal->portalSnapshot = GetActiveSnapshot(); |
1790 | 0 | } |