/src/postgres/src/backend/optimizer/util/pathnode.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * pathnode.c |
4 | | * Routines to manipulate pathlists and create path nodes |
5 | | * |
6 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
7 | | * Portions Copyright (c) 1994, Regents of the University of California |
8 | | * |
9 | | * |
10 | | * IDENTIFICATION |
11 | | * src/backend/optimizer/util/pathnode.c |
12 | | * |
13 | | *------------------------------------------------------------------------- |
14 | | */ |
15 | | #include "postgres.h" |
16 | | |
17 | | #include <math.h> |
18 | | |
19 | | #include "foreign/fdwapi.h" |
20 | | #include "miscadmin.h" |
21 | | #include "nodes/extensible.h" |
22 | | #include "optimizer/appendinfo.h" |
23 | | #include "optimizer/clauses.h" |
24 | | #include "optimizer/cost.h" |
25 | | #include "optimizer/optimizer.h" |
26 | | #include "optimizer/pathnode.h" |
27 | | #include "optimizer/paths.h" |
28 | | #include "optimizer/planmain.h" |
29 | | #include "optimizer/tlist.h" |
30 | | #include "parser/parsetree.h" |
31 | | #include "utils/memutils.h" |
32 | | #include "utils/selfuncs.h" |
33 | | |
34 | | typedef enum |
35 | | { |
36 | | COSTS_EQUAL, /* path costs are fuzzily equal */ |
37 | | COSTS_BETTER1, /* first path is cheaper than second */ |
38 | | COSTS_BETTER2, /* second path is cheaper than first */ |
39 | | COSTS_DIFFERENT, /* neither path dominates the other on cost */ |
40 | | } PathCostComparison; |
41 | | |
42 | | /* |
43 | | * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily. |
44 | | * XXX is it worth making this user-controllable? It provides a tradeoff |
45 | | * between planner runtime and the accuracy of path cost comparisons. |
46 | | */ |
47 | 0 | #define STD_FUZZ_FACTOR 1.01 |
48 | | |
49 | | static List *translate_sub_tlist(List *tlist, int relid); |
50 | | static int append_total_cost_compare(const ListCell *a, const ListCell *b); |
51 | | static int append_startup_cost_compare(const ListCell *a, const ListCell *b); |
52 | | static List *reparameterize_pathlist_by_child(PlannerInfo *root, |
53 | | List *pathlist, |
54 | | RelOptInfo *child_rel); |
55 | | static bool pathlist_is_reparameterizable_by_child(List *pathlist, |
56 | | RelOptInfo *child_rel); |
57 | | |
58 | | |
59 | | /***************************************************************************** |
60 | | * MISC. PATH UTILITIES |
61 | | *****************************************************************************/ |
62 | | |
63 | | /* |
64 | | * compare_path_costs |
65 | | * Return -1, 0, or +1 according as path1 is cheaper, the same cost, |
66 | | * or more expensive than path2 for the specified criterion. |
67 | | */ |
68 | | int |
69 | | compare_path_costs(Path *path1, Path *path2, CostSelector criterion) |
70 | 0 | { |
71 | | /* Number of disabled nodes, if different, trumps all else. */ |
72 | 0 | if (unlikely(path1->disabled_nodes != path2->disabled_nodes)) |
73 | 0 | { |
74 | 0 | if (path1->disabled_nodes < path2->disabled_nodes) |
75 | 0 | return -1; |
76 | 0 | else |
77 | 0 | return +1; |
78 | 0 | } |
79 | | |
80 | 0 | if (criterion == STARTUP_COST) |
81 | 0 | { |
82 | 0 | if (path1->startup_cost < path2->startup_cost) |
83 | 0 | return -1; |
84 | 0 | if (path1->startup_cost > path2->startup_cost) |
85 | 0 | return +1; |
86 | | |
87 | | /* |
88 | | * If paths have the same startup cost (not at all unlikely), order |
89 | | * them by total cost. |
90 | | */ |
91 | 0 | if (path1->total_cost < path2->total_cost) |
92 | 0 | return -1; |
93 | 0 | if (path1->total_cost > path2->total_cost) |
94 | 0 | return +1; |
95 | 0 | } |
96 | 0 | else |
97 | 0 | { |
98 | 0 | if (path1->total_cost < path2->total_cost) |
99 | 0 | return -1; |
100 | 0 | if (path1->total_cost > path2->total_cost) |
101 | 0 | return +1; |
102 | | |
103 | | /* |
104 | | * If paths have the same total cost, order them by startup cost. |
105 | | */ |
106 | 0 | if (path1->startup_cost < path2->startup_cost) |
107 | 0 | return -1; |
108 | 0 | if (path1->startup_cost > path2->startup_cost) |
109 | 0 | return +1; |
110 | 0 | } |
111 | 0 | return 0; |
112 | 0 | } |
113 | | |
114 | | /* |
115 | | * compare_fractional_path_costs |
116 | | * Return -1, 0, or +1 according as path1 is cheaper, the same cost, |
117 | | * or more expensive than path2 for fetching the specified fraction |
118 | | * of the total tuples. |
119 | | * |
120 | | * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the |
121 | | * path with the cheaper total_cost. |
122 | | */ |
123 | | int |
124 | | compare_fractional_path_costs(Path *path1, Path *path2, |
125 | | double fraction) |
126 | 0 | { |
127 | 0 | Cost cost1, |
128 | 0 | cost2; |
129 | | |
130 | | /* Number of disabled nodes, if different, trumps all else. */ |
131 | 0 | if (unlikely(path1->disabled_nodes != path2->disabled_nodes)) |
132 | 0 | { |
133 | 0 | if (path1->disabled_nodes < path2->disabled_nodes) |
134 | 0 | return -1; |
135 | 0 | else |
136 | 0 | return +1; |
137 | 0 | } |
138 | | |
139 | 0 | if (fraction <= 0.0 || fraction >= 1.0) |
140 | 0 | return compare_path_costs(path1, path2, TOTAL_COST); |
141 | 0 | cost1 = path1->startup_cost + |
142 | 0 | fraction * (path1->total_cost - path1->startup_cost); |
143 | 0 | cost2 = path2->startup_cost + |
144 | 0 | fraction * (path2->total_cost - path2->startup_cost); |
145 | 0 | if (cost1 < cost2) |
146 | 0 | return -1; |
147 | 0 | if (cost1 > cost2) |
148 | 0 | return +1; |
149 | 0 | return 0; |
150 | 0 | } |
151 | | |
152 | | /* |
153 | | * compare_path_costs_fuzzily |
154 | | * Compare the costs of two paths to see if either can be said to |
155 | | * dominate the other. |
156 | | * |
157 | | * We use fuzzy comparisons so that add_path() can avoid keeping both of |
158 | | * a pair of paths that really have insignificantly different cost. |
159 | | * |
160 | | * The fuzz_factor argument must be 1.0 plus delta, where delta is the |
161 | | * fraction of the smaller cost that is considered to be a significant |
162 | | * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit |
163 | | * be 1% of the smaller cost. |
164 | | * |
165 | | * The two paths are said to have "equal" costs if both startup and total |
166 | | * costs are fuzzily the same. Path1 is said to be better than path2 if |
167 | | * it has fuzzily better startup cost and fuzzily no worse total cost, |
168 | | * or if it has fuzzily better total cost and fuzzily no worse startup cost. |
169 | | * Path2 is better than path1 if the reverse holds. Finally, if one path |
170 | | * is fuzzily better than the other on startup cost and fuzzily worse on |
171 | | * total cost, we just say that their costs are "different", since neither |
172 | | * dominates the other across the whole performance spectrum. |
173 | | * |
174 | | * This function also enforces a policy rule that paths for which the relevant |
175 | | * one of parent->consider_startup and parent->consider_param_startup is false |
176 | | * cannot survive comparisons solely on the grounds of good startup cost, so |
177 | | * we never return COSTS_DIFFERENT when that is true for the total-cost loser. |
178 | | * (But if total costs are fuzzily equal, we compare startup costs anyway, |
179 | | * in hopes of eliminating one path or the other.) |
180 | | */ |
181 | | static PathCostComparison |
182 | | compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor) |
183 | 0 | { |
184 | 0 | #define CONSIDER_PATH_STARTUP_COST(p) \ |
185 | 0 | ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup) |
186 | | |
187 | | /* Number of disabled nodes, if different, trumps all else. */ |
188 | 0 | if (unlikely(path1->disabled_nodes != path2->disabled_nodes)) |
189 | 0 | { |
190 | 0 | if (path1->disabled_nodes < path2->disabled_nodes) |
191 | 0 | return COSTS_BETTER1; |
192 | 0 | else |
193 | 0 | return COSTS_BETTER2; |
194 | 0 | } |
195 | | |
196 | | /* |
197 | | * Check total cost first since it's more likely to be different; many |
198 | | * paths have zero startup cost. |
199 | | */ |
200 | 0 | if (path1->total_cost > path2->total_cost * fuzz_factor) |
201 | 0 | { |
202 | | /* path1 fuzzily worse on total cost */ |
203 | 0 | if (CONSIDER_PATH_STARTUP_COST(path1) && |
204 | 0 | path2->startup_cost > path1->startup_cost * fuzz_factor) |
205 | 0 | { |
206 | | /* ... but path2 fuzzily worse on startup, so DIFFERENT */ |
207 | 0 | return COSTS_DIFFERENT; |
208 | 0 | } |
209 | | /* else path2 dominates */ |
210 | 0 | return COSTS_BETTER2; |
211 | 0 | } |
212 | 0 | if (path2->total_cost > path1->total_cost * fuzz_factor) |
213 | 0 | { |
214 | | /* path2 fuzzily worse on total cost */ |
215 | 0 | if (CONSIDER_PATH_STARTUP_COST(path2) && |
216 | 0 | path1->startup_cost > path2->startup_cost * fuzz_factor) |
217 | 0 | { |
218 | | /* ... but path1 fuzzily worse on startup, so DIFFERENT */ |
219 | 0 | return COSTS_DIFFERENT; |
220 | 0 | } |
221 | | /* else path1 dominates */ |
222 | 0 | return COSTS_BETTER1; |
223 | 0 | } |
224 | | /* fuzzily the same on total cost ... */ |
225 | 0 | if (path1->startup_cost > path2->startup_cost * fuzz_factor) |
226 | 0 | { |
227 | | /* ... but path1 fuzzily worse on startup, so path2 wins */ |
228 | 0 | return COSTS_BETTER2; |
229 | 0 | } |
230 | 0 | if (path2->startup_cost > path1->startup_cost * fuzz_factor) |
231 | 0 | { |
232 | | /* ... but path2 fuzzily worse on startup, so path1 wins */ |
233 | 0 | return COSTS_BETTER1; |
234 | 0 | } |
235 | | /* fuzzily the same on both costs */ |
236 | 0 | return COSTS_EQUAL; |
237 | |
|
238 | 0 | #undef CONSIDER_PATH_STARTUP_COST |
239 | 0 | } |
240 | | |
241 | | /* |
242 | | * set_cheapest |
243 | | * Find the minimum-cost paths from among a relation's paths, |
244 | | * and save them in the rel's cheapest-path fields. |
245 | | * |
246 | | * cheapest_total_path is normally the cheapest-total-cost unparameterized |
247 | | * path; but if there are no unparameterized paths, we assign it to be the |
248 | | * best (cheapest least-parameterized) parameterized path. However, only |
249 | | * unparameterized paths are considered candidates for cheapest_startup_path, |
250 | | * so that will be NULL if there are no unparameterized paths. |
251 | | * |
252 | | * The cheapest_parameterized_paths list collects all parameterized paths |
253 | | * that have survived the add_path() tournament for this relation. (Since |
254 | | * add_path ignores pathkeys for a parameterized path, these will be paths |
255 | | * that have best cost or best row count for their parameterization. We |
256 | | * may also have both a parallel-safe and a non-parallel-safe path in some |
257 | | * cases for the same parameterization in some cases, but this should be |
258 | | * relatively rare since, most typically, all paths for the same relation |
259 | | * will be parallel-safe or none of them will.) |
260 | | * |
261 | | * cheapest_parameterized_paths always includes the cheapest-total |
262 | | * unparameterized path, too, if there is one; the users of that list find |
263 | | * it more convenient if that's included. |
264 | | * |
265 | | * This is normally called only after we've finished constructing the path |
266 | | * list for the rel node. |
267 | | */ |
268 | | void |
269 | | set_cheapest(RelOptInfo *parent_rel) |
270 | 0 | { |
271 | 0 | Path *cheapest_startup_path; |
272 | 0 | Path *cheapest_total_path; |
273 | 0 | Path *best_param_path; |
274 | 0 | List *parameterized_paths; |
275 | 0 | ListCell *p; |
276 | |
|
277 | 0 | Assert(IsA(parent_rel, RelOptInfo)); |
278 | |
|
279 | 0 | if (parent_rel->pathlist == NIL) |
280 | 0 | elog(ERROR, "could not devise a query plan for the given query"); |
281 | | |
282 | 0 | cheapest_startup_path = cheapest_total_path = best_param_path = NULL; |
283 | 0 | parameterized_paths = NIL; |
284 | |
|
285 | 0 | foreach(p, parent_rel->pathlist) |
286 | 0 | { |
287 | 0 | Path *path = (Path *) lfirst(p); |
288 | 0 | int cmp; |
289 | |
|
290 | 0 | if (path->param_info) |
291 | 0 | { |
292 | | /* Parameterized path, so add it to parameterized_paths */ |
293 | 0 | parameterized_paths = lappend(parameterized_paths, path); |
294 | | |
295 | | /* |
296 | | * If we have an unparameterized cheapest-total, we no longer care |
297 | | * about finding the best parameterized path, so move on. |
298 | | */ |
299 | 0 | if (cheapest_total_path) |
300 | 0 | continue; |
301 | | |
302 | | /* |
303 | | * Otherwise, track the best parameterized path, which is the one |
304 | | * with least total cost among those of the minimum |
305 | | * parameterization. |
306 | | */ |
307 | 0 | if (best_param_path == NULL) |
308 | 0 | best_param_path = path; |
309 | 0 | else |
310 | 0 | { |
311 | 0 | switch (bms_subset_compare(PATH_REQ_OUTER(path), |
312 | 0 | PATH_REQ_OUTER(best_param_path))) |
313 | 0 | { |
314 | 0 | case BMS_EQUAL: |
315 | | /* keep the cheaper one */ |
316 | 0 | if (compare_path_costs(path, best_param_path, |
317 | 0 | TOTAL_COST) < 0) |
318 | 0 | best_param_path = path; |
319 | 0 | break; |
320 | 0 | case BMS_SUBSET1: |
321 | | /* new path is less-parameterized */ |
322 | 0 | best_param_path = path; |
323 | 0 | break; |
324 | 0 | case BMS_SUBSET2: |
325 | | /* old path is less-parameterized, keep it */ |
326 | 0 | break; |
327 | 0 | case BMS_DIFFERENT: |
328 | | |
329 | | /* |
330 | | * This means that neither path has the least possible |
331 | | * parameterization for the rel. We'll sit on the old |
332 | | * path until something better comes along. |
333 | | */ |
334 | 0 | break; |
335 | 0 | } |
336 | 0 | } |
337 | 0 | } |
338 | 0 | else |
339 | 0 | { |
340 | | /* Unparameterized path, so consider it for cheapest slots */ |
341 | 0 | if (cheapest_total_path == NULL) |
342 | 0 | { |
343 | 0 | cheapest_startup_path = cheapest_total_path = path; |
344 | 0 | continue; |
345 | 0 | } |
346 | | |
347 | | /* |
348 | | * If we find two paths of identical costs, try to keep the |
349 | | * better-sorted one. The paths might have unrelated sort |
350 | | * orderings, in which case we can only guess which might be |
351 | | * better to keep, but if one is superior then we definitely |
352 | | * should keep that one. |
353 | | */ |
354 | 0 | cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST); |
355 | 0 | if (cmp > 0 || |
356 | 0 | (cmp == 0 && |
357 | 0 | compare_pathkeys(cheapest_startup_path->pathkeys, |
358 | 0 | path->pathkeys) == PATHKEYS_BETTER2)) |
359 | 0 | cheapest_startup_path = path; |
360 | |
|
361 | 0 | cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST); |
362 | 0 | if (cmp > 0 || |
363 | 0 | (cmp == 0 && |
364 | 0 | compare_pathkeys(cheapest_total_path->pathkeys, |
365 | 0 | path->pathkeys) == PATHKEYS_BETTER2)) |
366 | 0 | cheapest_total_path = path; |
367 | 0 | } |
368 | 0 | } |
369 | | |
370 | | /* Add cheapest unparameterized path, if any, to parameterized_paths */ |
371 | 0 | if (cheapest_total_path) |
372 | 0 | parameterized_paths = lcons(cheapest_total_path, parameterized_paths); |
373 | | |
374 | | /* |
375 | | * If there is no unparameterized path, use the best parameterized path as |
376 | | * cheapest_total_path (but not as cheapest_startup_path). |
377 | | */ |
378 | 0 | if (cheapest_total_path == NULL) |
379 | 0 | cheapest_total_path = best_param_path; |
380 | 0 | Assert(cheapest_total_path != NULL); |
381 | |
|
382 | 0 | parent_rel->cheapest_startup_path = cheapest_startup_path; |
383 | 0 | parent_rel->cheapest_total_path = cheapest_total_path; |
384 | 0 | parent_rel->cheapest_unique_path = NULL; /* computed only if needed */ |
385 | 0 | parent_rel->cheapest_parameterized_paths = parameterized_paths; |
386 | 0 | } |
387 | | |
388 | | /* |
389 | | * add_path |
390 | | * Consider a potential implementation path for the specified parent rel, |
391 | | * and add it to the rel's pathlist if it is worthy of consideration. |
392 | | * |
393 | | * A path is worthy if it has a better sort order (better pathkeys) or |
394 | | * cheaper cost (as defined below), or generates fewer rows, than any |
395 | | * existing path that has the same or superset parameterization rels. We |
396 | | * also consider parallel-safe paths more worthy than others. |
397 | | * |
398 | | * Cheaper cost can mean either a cheaper total cost or a cheaper startup |
399 | | * cost; if one path is cheaper in one of these aspects and another is |
400 | | * cheaper in the other, we keep both. However, when some path type is |
401 | | * disabled (e.g. due to enable_seqscan=false), the number of times that |
402 | | * a disabled path type is used is considered to be a higher-order |
403 | | * component of the cost. Hence, if path A uses no disabled path type, |
404 | | * and path B uses 1 or more disabled path types, A is cheaper, no matter |
405 | | * what we estimate for the startup and total costs. The startup and total |
406 | | * cost essentially act as a tiebreak when comparing paths that use equal |
407 | | * numbers of disabled path nodes; but in practice this tiebreak is almost |
408 | | * always used, since normally no path types are disabled. |
409 | | * |
410 | | * In addition to possibly adding new_path, we also remove from the rel's |
411 | | * pathlist any old paths that are dominated by new_path --- that is, |
412 | | * new_path is cheaper, at least as well ordered, generates no more rows, |
413 | | * requires no outer rels not required by the old path, and is no less |
414 | | * parallel-safe. |
415 | | * |
416 | | * In most cases, a path with a superset parameterization will generate |
417 | | * fewer rows (since it has more join clauses to apply), so that those two |
418 | | * figures of merit move in opposite directions; this means that a path of |
419 | | * one parameterization can seldom dominate a path of another. But such |
420 | | * cases do arise, so we make the full set of checks anyway. |
421 | | * |
422 | | * There are two policy decisions embedded in this function, along with |
423 | | * its sibling add_path_precheck. First, we treat all parameterized paths |
424 | | * as having NIL pathkeys, so that they cannot win comparisons on the |
425 | | * basis of sort order. This is to reduce the number of parameterized |
426 | | * paths that are kept; see discussion in src/backend/optimizer/README. |
427 | | * |
428 | | * Second, we only consider cheap startup cost to be interesting if |
429 | | * parent_rel->consider_startup is true for an unparameterized path, or |
430 | | * parent_rel->consider_param_startup is true for a parameterized one. |
431 | | * Again, this allows discarding useless paths sooner. |
432 | | * |
433 | | * The pathlist is kept sorted by disabled_nodes and then by total_cost, |
434 | | * with cheaper paths at the front. Within this routine, that's simply a |
435 | | * speed hack: doing it that way makes it more likely that we will reject |
436 | | * an inferior path after a few comparisons, rather than many comparisons. |
437 | | * However, add_path_precheck relies on this ordering to exit early |
438 | | * when possible. |
439 | | * |
440 | | * NOTE: discarded Path objects are immediately pfree'd to reduce planner |
441 | | * memory consumption. We dare not try to free the substructure of a Path, |
442 | | * since much of it may be shared with other Paths or the query tree itself; |
443 | | * but just recycling discarded Path nodes is a very useful savings in |
444 | | * a large join tree. We can recycle the List nodes of pathlist, too. |
445 | | * |
446 | | * As noted in optimizer/README, deleting a previously-accepted Path is |
447 | | * safe because we know that Paths of this rel cannot yet be referenced |
448 | | * from any other rel, such as a higher-level join. However, in some cases |
449 | | * it is possible that a Path is referenced by another Path for its own |
450 | | * rel; we must not delete such a Path, even if it is dominated by the new |
451 | | * Path. Currently this occurs only for IndexPath objects, which may be |
452 | | * referenced as children of BitmapHeapPaths as well as being paths in |
453 | | * their own right. Hence, we don't pfree IndexPaths when rejecting them. |
454 | | * |
455 | | * 'parent_rel' is the relation entry to which the path corresponds. |
456 | | * 'new_path' is a potential path for parent_rel. |
457 | | * |
458 | | * Returns nothing, but modifies parent_rel->pathlist. |
459 | | */ |
460 | | void |
461 | | add_path(RelOptInfo *parent_rel, Path *new_path) |
462 | 0 | { |
463 | 0 | bool accept_new = true; /* unless we find a superior old path */ |
464 | 0 | int insert_at = 0; /* where to insert new item */ |
465 | 0 | List *new_path_pathkeys; |
466 | 0 | ListCell *p1; |
467 | | |
468 | | /* |
469 | | * This is a convenient place to check for query cancel --- no part of the |
470 | | * planner goes very long without calling add_path(). |
471 | | */ |
472 | 0 | CHECK_FOR_INTERRUPTS(); |
473 | | |
474 | | /* Pretend parameterized paths have no pathkeys, per comment above */ |
475 | 0 | new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys; |
476 | | |
477 | | /* |
478 | | * Loop to check proposed new path against old paths. Note it is possible |
479 | | * for more than one old path to be tossed out because new_path dominates |
480 | | * it. |
481 | | */ |
482 | 0 | foreach(p1, parent_rel->pathlist) |
483 | 0 | { |
484 | 0 | Path *old_path = (Path *) lfirst(p1); |
485 | 0 | bool remove_old = false; /* unless new proves superior */ |
486 | 0 | PathCostComparison costcmp; |
487 | 0 | PathKeysComparison keyscmp; |
488 | 0 | BMS_Comparison outercmp; |
489 | | |
490 | | /* |
491 | | * Do a fuzzy cost comparison with standard fuzziness limit. |
492 | | */ |
493 | 0 | costcmp = compare_path_costs_fuzzily(new_path, old_path, |
494 | 0 | STD_FUZZ_FACTOR); |
495 | | |
496 | | /* |
497 | | * If the two paths compare differently for startup and total cost, |
498 | | * then we want to keep both, and we can skip comparing pathkeys and |
499 | | * required_outer rels. If they compare the same, proceed with the |
500 | | * other comparisons. Row count is checked last. (We make the tests |
501 | | * in this order because the cost comparison is most likely to turn |
502 | | * out "different", and the pathkeys comparison next most likely. As |
503 | | * explained above, row count very seldom makes a difference, so even |
504 | | * though it's cheap to compare there's not much point in checking it |
505 | | * earlier.) |
506 | | */ |
507 | 0 | if (costcmp != COSTS_DIFFERENT) |
508 | 0 | { |
509 | | /* Similarly check to see if either dominates on pathkeys */ |
510 | 0 | List *old_path_pathkeys; |
511 | |
|
512 | 0 | old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys; |
513 | 0 | keyscmp = compare_pathkeys(new_path_pathkeys, |
514 | 0 | old_path_pathkeys); |
515 | 0 | if (keyscmp != PATHKEYS_DIFFERENT) |
516 | 0 | { |
517 | 0 | switch (costcmp) |
518 | 0 | { |
519 | 0 | case COSTS_EQUAL: |
520 | 0 | outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), |
521 | 0 | PATH_REQ_OUTER(old_path)); |
522 | 0 | if (keyscmp == PATHKEYS_BETTER1) |
523 | 0 | { |
524 | 0 | if ((outercmp == BMS_EQUAL || |
525 | 0 | outercmp == BMS_SUBSET1) && |
526 | 0 | new_path->rows <= old_path->rows && |
527 | 0 | new_path->parallel_safe >= old_path->parallel_safe) |
528 | 0 | remove_old = true; /* new dominates old */ |
529 | 0 | } |
530 | 0 | else if (keyscmp == PATHKEYS_BETTER2) |
531 | 0 | { |
532 | 0 | if ((outercmp == BMS_EQUAL || |
533 | 0 | outercmp == BMS_SUBSET2) && |
534 | 0 | new_path->rows >= old_path->rows && |
535 | 0 | new_path->parallel_safe <= old_path->parallel_safe) |
536 | 0 | accept_new = false; /* old dominates new */ |
537 | 0 | } |
538 | 0 | else /* keyscmp == PATHKEYS_EQUAL */ |
539 | 0 | { |
540 | 0 | if (outercmp == BMS_EQUAL) |
541 | 0 | { |
542 | | /* |
543 | | * Same pathkeys and outer rels, and fuzzily |
544 | | * the same cost, so keep just one; to decide |
545 | | * which, first check parallel-safety, then |
546 | | * rows, then do a fuzzy cost comparison with |
547 | | * very small fuzz limit. (We used to do an |
548 | | * exact cost comparison, but that results in |
549 | | * annoying platform-specific plan variations |
550 | | * due to roundoff in the cost estimates.) If |
551 | | * things are still tied, arbitrarily keep |
552 | | * only the old path. Notice that we will |
553 | | * keep only the old path even if the |
554 | | * less-fuzzy comparison decides the startup |
555 | | * and total costs compare differently. |
556 | | */ |
557 | 0 | if (new_path->parallel_safe > |
558 | 0 | old_path->parallel_safe) |
559 | 0 | remove_old = true; /* new dominates old */ |
560 | 0 | else if (new_path->parallel_safe < |
561 | 0 | old_path->parallel_safe) |
562 | 0 | accept_new = false; /* old dominates new */ |
563 | 0 | else if (new_path->rows < old_path->rows) |
564 | 0 | remove_old = true; /* new dominates old */ |
565 | 0 | else if (new_path->rows > old_path->rows) |
566 | 0 | accept_new = false; /* old dominates new */ |
567 | 0 | else if (compare_path_costs_fuzzily(new_path, |
568 | 0 | old_path, |
569 | 0 | 1.0000000001) == COSTS_BETTER1) |
570 | 0 | remove_old = true; /* new dominates old */ |
571 | 0 | else |
572 | 0 | accept_new = false; /* old equals or |
573 | | * dominates new */ |
574 | 0 | } |
575 | 0 | else if (outercmp == BMS_SUBSET1 && |
576 | 0 | new_path->rows <= old_path->rows && |
577 | 0 | new_path->parallel_safe >= old_path->parallel_safe) |
578 | 0 | remove_old = true; /* new dominates old */ |
579 | 0 | else if (outercmp == BMS_SUBSET2 && |
580 | 0 | new_path->rows >= old_path->rows && |
581 | 0 | new_path->parallel_safe <= old_path->parallel_safe) |
582 | 0 | accept_new = false; /* old dominates new */ |
583 | | /* else different parameterizations, keep both */ |
584 | 0 | } |
585 | 0 | break; |
586 | 0 | case COSTS_BETTER1: |
587 | 0 | if (keyscmp != PATHKEYS_BETTER2) |
588 | 0 | { |
589 | 0 | outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), |
590 | 0 | PATH_REQ_OUTER(old_path)); |
591 | 0 | if ((outercmp == BMS_EQUAL || |
592 | 0 | outercmp == BMS_SUBSET1) && |
593 | 0 | new_path->rows <= old_path->rows && |
594 | 0 | new_path->parallel_safe >= old_path->parallel_safe) |
595 | 0 | remove_old = true; /* new dominates old */ |
596 | 0 | } |
597 | 0 | break; |
598 | 0 | case COSTS_BETTER2: |
599 | 0 | if (keyscmp != PATHKEYS_BETTER1) |
600 | 0 | { |
601 | 0 | outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), |
602 | 0 | PATH_REQ_OUTER(old_path)); |
603 | 0 | if ((outercmp == BMS_EQUAL || |
604 | 0 | outercmp == BMS_SUBSET2) && |
605 | 0 | new_path->rows >= old_path->rows && |
606 | 0 | new_path->parallel_safe <= old_path->parallel_safe) |
607 | 0 | accept_new = false; /* old dominates new */ |
608 | 0 | } |
609 | 0 | break; |
610 | 0 | case COSTS_DIFFERENT: |
611 | | |
612 | | /* |
613 | | * can't get here, but keep this case to keep compiler |
614 | | * quiet |
615 | | */ |
616 | 0 | break; |
617 | 0 | } |
618 | 0 | } |
619 | 0 | } |
620 | | |
621 | | /* |
622 | | * Remove current element from pathlist if dominated by new. |
623 | | */ |
624 | 0 | if (remove_old) |
625 | 0 | { |
626 | 0 | parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist, |
627 | 0 | p1); |
628 | | |
629 | | /* |
630 | | * Delete the data pointed-to by the deleted cell, if possible |
631 | | */ |
632 | 0 | if (!IsA(old_path, IndexPath)) |
633 | 0 | pfree(old_path); |
634 | 0 | } |
635 | 0 | else |
636 | 0 | { |
637 | | /* |
638 | | * new belongs after this old path if it has more disabled nodes |
639 | | * or if it has the same number of nodes but a greater total cost |
640 | | */ |
641 | 0 | if (new_path->disabled_nodes > old_path->disabled_nodes || |
642 | 0 | (new_path->disabled_nodes == old_path->disabled_nodes && |
643 | 0 | new_path->total_cost >= old_path->total_cost)) |
644 | 0 | insert_at = foreach_current_index(p1) + 1; |
645 | 0 | } |
646 | | |
647 | | /* |
648 | | * If we found an old path that dominates new_path, we can quit |
649 | | * scanning the pathlist; we will not add new_path, and we assume |
650 | | * new_path cannot dominate any other elements of the pathlist. |
651 | | */ |
652 | 0 | if (!accept_new) |
653 | 0 | break; |
654 | 0 | } |
655 | | |
656 | 0 | if (accept_new) |
657 | 0 | { |
658 | | /* Accept the new path: insert it at proper place in pathlist */ |
659 | 0 | parent_rel->pathlist = |
660 | 0 | list_insert_nth(parent_rel->pathlist, insert_at, new_path); |
661 | 0 | } |
662 | 0 | else |
663 | 0 | { |
664 | | /* Reject and recycle the new path */ |
665 | 0 | if (!IsA(new_path, IndexPath)) |
666 | 0 | pfree(new_path); |
667 | 0 | } |
668 | 0 | } |
669 | | |
670 | | /* |
671 | | * add_path_precheck |
672 | | * Check whether a proposed new path could possibly get accepted. |
673 | | * We assume we know the path's pathkeys and parameterization accurately, |
674 | | * and have lower bounds for its costs. |
675 | | * |
676 | | * Note that we do not know the path's rowcount, since getting an estimate for |
677 | | * that is too expensive to do before prechecking. We assume here that paths |
678 | | * of a superset parameterization will generate fewer rows; if that holds, |
679 | | * then paths with different parameterizations cannot dominate each other |
680 | | * and so we can simply ignore existing paths of another parameterization. |
681 | | * (In the infrequent cases where that rule of thumb fails, add_path will |
682 | | * get rid of the inferior path.) |
683 | | * |
684 | | * At the time this is called, we haven't actually built a Path structure, |
685 | | * so the required information has to be passed piecemeal. |
686 | | */ |
687 | | bool |
688 | | add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, |
689 | | Cost startup_cost, Cost total_cost, |
690 | | List *pathkeys, Relids required_outer) |
691 | 0 | { |
692 | 0 | List *new_path_pathkeys; |
693 | 0 | bool consider_startup; |
694 | 0 | ListCell *p1; |
695 | | |
696 | | /* Pretend parameterized paths have no pathkeys, per add_path policy */ |
697 | 0 | new_path_pathkeys = required_outer ? NIL : pathkeys; |
698 | | |
699 | | /* Decide whether new path's startup cost is interesting */ |
700 | 0 | consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup; |
701 | |
|
702 | 0 | foreach(p1, parent_rel->pathlist) |
703 | 0 | { |
704 | 0 | Path *old_path = (Path *) lfirst(p1); |
705 | 0 | PathKeysComparison keyscmp; |
706 | | |
707 | | /* |
708 | | * Since the pathlist is sorted by disabled_nodes and then by |
709 | | * total_cost, we can stop looking once we reach a path with more |
710 | | * disabled nodes, or the same number of disabled nodes plus a |
711 | | * total_cost larger than the new path's. |
712 | | */ |
713 | 0 | if (unlikely(old_path->disabled_nodes != disabled_nodes)) |
714 | 0 | { |
715 | 0 | if (disabled_nodes < old_path->disabled_nodes) |
716 | 0 | break; |
717 | 0 | } |
718 | 0 | else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR) |
719 | 0 | break; |
720 | | |
721 | | /* |
722 | | * We are looking for an old_path with the same parameterization (and |
723 | | * by assumption the same rowcount) that dominates the new path on |
724 | | * pathkeys as well as both cost metrics. If we find one, we can |
725 | | * reject the new path. |
726 | | * |
727 | | * Cost comparisons here should match compare_path_costs_fuzzily. |
728 | | */ |
729 | | /* new path can win on startup cost only if consider_startup */ |
730 | 0 | if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR || |
731 | 0 | !consider_startup) |
732 | 0 | { |
733 | | /* new path loses on cost, so check pathkeys... */ |
734 | 0 | List *old_path_pathkeys; |
735 | |
|
736 | 0 | old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys; |
737 | 0 | keyscmp = compare_pathkeys(new_path_pathkeys, |
738 | 0 | old_path_pathkeys); |
739 | 0 | if (keyscmp == PATHKEYS_EQUAL || |
740 | 0 | keyscmp == PATHKEYS_BETTER2) |
741 | 0 | { |
742 | | /* new path does not win on pathkeys... */ |
743 | 0 | if (bms_equal(required_outer, PATH_REQ_OUTER(old_path))) |
744 | 0 | { |
745 | | /* Found an old path that dominates the new one */ |
746 | 0 | return false; |
747 | 0 | } |
748 | 0 | } |
749 | 0 | } |
750 | 0 | } |
751 | | |
752 | 0 | return true; |
753 | 0 | } |
754 | | |
755 | | /* |
756 | | * add_partial_path |
757 | | * Like add_path, our goal here is to consider whether a path is worthy |
758 | | * of being kept around, but the considerations here are a bit different. |
759 | | * A partial path is one which can be executed in any number of workers in |
760 | | * parallel such that each worker will generate a subset of the path's |
761 | | * overall result. |
762 | | * |
763 | | * As in add_path, the partial_pathlist is kept sorted with the cheapest |
764 | | * total path in front. This is depended on by multiple places, which |
765 | | * just take the front entry as the cheapest path without searching. |
766 | | * |
767 | | * We don't generate parameterized partial paths for several reasons. Most |
768 | | * importantly, they're not safe to execute, because there's nothing to |
769 | | * make sure that a parallel scan within the parameterized portion of the |
770 | | * plan is running with the same value in every worker at the same time. |
771 | | * Fortunately, it seems unlikely to be worthwhile anyway, because having |
772 | | * each worker scan the entire outer relation and a subset of the inner |
773 | | * relation will generally be a terrible plan. The inner (parameterized) |
774 | | * side of the plan will be small anyway. There could be rare cases where |
775 | | * this wins big - e.g. if join order constraints put a 1-row relation on |
776 | | * the outer side of the topmost join with a parameterized plan on the inner |
777 | | * side - but we'll have to be content not to handle such cases until |
778 | | * somebody builds an executor infrastructure that can cope with them. |
779 | | * |
780 | | * Because we don't consider parameterized paths here, we also don't |
781 | | * need to consider the row counts as a measure of quality: every path will |
782 | | * produce the same number of rows. Neither do we need to consider startup |
783 | | * costs: parallelism is only used for plans that will be run to completion. |
784 | | * Therefore, this routine is much simpler than add_path: it needs to |
785 | | * consider only disabled nodes, pathkeys and total cost. |
786 | | * |
787 | | * As with add_path, we pfree paths that are found to be dominated by |
788 | | * another partial path; this requires that there be no other references to |
789 | | * such paths yet. Hence, GatherPaths must not be created for a rel until |
790 | | * we're done creating all partial paths for it. Unlike add_path, we don't |
791 | | * take an exception for IndexPaths as partial index paths won't be |
792 | | * referenced by partial BitmapHeapPaths. |
793 | | */ |
794 | | void |
795 | | add_partial_path(RelOptInfo *parent_rel, Path *new_path) |
796 | 0 | { |
797 | 0 | bool accept_new = true; /* unless we find a superior old path */ |
798 | 0 | int insert_at = 0; /* where to insert new item */ |
799 | 0 | ListCell *p1; |
800 | | |
801 | | /* Check for query cancel. */ |
802 | 0 | CHECK_FOR_INTERRUPTS(); |
803 | | |
804 | | /* Path to be added must be parallel safe. */ |
805 | 0 | Assert(new_path->parallel_safe); |
806 | | |
807 | | /* Relation should be OK for parallelism, too. */ |
808 | 0 | Assert(parent_rel->consider_parallel); |
809 | | |
810 | | /* |
811 | | * As in add_path, throw out any paths which are dominated by the new |
812 | | * path, but throw out the new path if some existing path dominates it. |
813 | | */ |
814 | 0 | foreach(p1, parent_rel->partial_pathlist) |
815 | 0 | { |
816 | 0 | Path *old_path = (Path *) lfirst(p1); |
817 | 0 | bool remove_old = false; /* unless new proves superior */ |
818 | 0 | PathKeysComparison keyscmp; |
819 | | |
820 | | /* Compare pathkeys. */ |
821 | 0 | keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys); |
822 | | |
823 | | /* Unless pathkeys are incompatible, keep just one of the two paths. */ |
824 | 0 | if (keyscmp != PATHKEYS_DIFFERENT) |
825 | 0 | { |
826 | 0 | if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes)) |
827 | 0 | { |
828 | 0 | if (new_path->disabled_nodes > old_path->disabled_nodes) |
829 | 0 | accept_new = false; |
830 | 0 | else |
831 | 0 | remove_old = true; |
832 | 0 | } |
833 | 0 | else if (new_path->total_cost > old_path->total_cost |
834 | 0 | * STD_FUZZ_FACTOR) |
835 | 0 | { |
836 | | /* New path costs more; keep it only if pathkeys are better. */ |
837 | 0 | if (keyscmp != PATHKEYS_BETTER1) |
838 | 0 | accept_new = false; |
839 | 0 | } |
840 | 0 | else if (old_path->total_cost > new_path->total_cost |
841 | 0 | * STD_FUZZ_FACTOR) |
842 | 0 | { |
843 | | /* Old path costs more; keep it only if pathkeys are better. */ |
844 | 0 | if (keyscmp != PATHKEYS_BETTER2) |
845 | 0 | remove_old = true; |
846 | 0 | } |
847 | 0 | else if (keyscmp == PATHKEYS_BETTER1) |
848 | 0 | { |
849 | | /* Costs are about the same, new path has better pathkeys. */ |
850 | 0 | remove_old = true; |
851 | 0 | } |
852 | 0 | else if (keyscmp == PATHKEYS_BETTER2) |
853 | 0 | { |
854 | | /* Costs are about the same, old path has better pathkeys. */ |
855 | 0 | accept_new = false; |
856 | 0 | } |
857 | 0 | else if (old_path->total_cost > new_path->total_cost * 1.0000000001) |
858 | 0 | { |
859 | | /* Pathkeys are the same, and the old path costs more. */ |
860 | 0 | remove_old = true; |
861 | 0 | } |
862 | 0 | else |
863 | 0 | { |
864 | | /* |
865 | | * Pathkeys are the same, and new path isn't materially |
866 | | * cheaper. |
867 | | */ |
868 | 0 | accept_new = false; |
869 | 0 | } |
870 | 0 | } |
871 | | |
872 | | /* |
873 | | * Remove current element from partial_pathlist if dominated by new. |
874 | | */ |
875 | 0 | if (remove_old) |
876 | 0 | { |
877 | 0 | parent_rel->partial_pathlist = |
878 | 0 | foreach_delete_current(parent_rel->partial_pathlist, p1); |
879 | 0 | pfree(old_path); |
880 | 0 | } |
881 | 0 | else |
882 | 0 | { |
883 | | /* new belongs after this old path if it has cost >= old's */ |
884 | 0 | if (new_path->total_cost >= old_path->total_cost) |
885 | 0 | insert_at = foreach_current_index(p1) + 1; |
886 | 0 | } |
887 | | |
888 | | /* |
889 | | * If we found an old path that dominates new_path, we can quit |
890 | | * scanning the partial_pathlist; we will not add new_path, and we |
891 | | * assume new_path cannot dominate any later path. |
892 | | */ |
893 | 0 | if (!accept_new) |
894 | 0 | break; |
895 | 0 | } |
896 | |
|
897 | 0 | if (accept_new) |
898 | 0 | { |
899 | | /* Accept the new path: insert it at proper place */ |
900 | 0 | parent_rel->partial_pathlist = |
901 | 0 | list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path); |
902 | 0 | } |
903 | 0 | else |
904 | 0 | { |
905 | | /* Reject and recycle the new path */ |
906 | 0 | pfree(new_path); |
907 | 0 | } |
908 | 0 | } |
909 | | |
910 | | /* |
911 | | * add_partial_path_precheck |
912 | | * Check whether a proposed new partial path could possibly get accepted. |
913 | | * |
914 | | * Unlike add_path_precheck, we can ignore startup cost and parameterization, |
915 | | * since they don't matter for partial paths (see add_partial_path). But |
916 | | * we do want to make sure we don't add a partial path if there's already |
917 | | * a complete path that dominates it, since in that case the proposed path |
918 | | * is surely a loser. |
919 | | */ |
920 | | bool |
921 | | add_partial_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, |
922 | | Cost total_cost, List *pathkeys) |
923 | 0 | { |
924 | 0 | ListCell *p1; |
925 | | |
926 | | /* |
927 | | * Our goal here is twofold. First, we want to find out whether this path |
928 | | * is clearly inferior to some existing partial path. If so, we want to |
929 | | * reject it immediately. Second, we want to find out whether this path |
930 | | * is clearly superior to some existing partial path -- at least, modulo |
931 | | * final cost computations. If so, we definitely want to consider it. |
932 | | * |
933 | | * Unlike add_path(), we always compare pathkeys here. This is because we |
934 | | * expect partial_pathlist to be very short, and getting a definitive |
935 | | * answer at this stage avoids the need to call add_path_precheck. |
936 | | */ |
937 | 0 | foreach(p1, parent_rel->partial_pathlist) |
938 | 0 | { |
939 | 0 | Path *old_path = (Path *) lfirst(p1); |
940 | 0 | PathKeysComparison keyscmp; |
941 | |
|
942 | 0 | keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys); |
943 | 0 | if (keyscmp != PATHKEYS_DIFFERENT) |
944 | 0 | { |
945 | 0 | if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR && |
946 | 0 | keyscmp != PATHKEYS_BETTER1) |
947 | 0 | return false; |
948 | 0 | if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR && |
949 | 0 | keyscmp != PATHKEYS_BETTER2) |
950 | 0 | return true; |
951 | 0 | } |
952 | 0 | } |
953 | | |
954 | | /* |
955 | | * This path is neither clearly inferior to an existing partial path nor |
956 | | * clearly good enough that it might replace one. Compare it to |
957 | | * non-parallel plans. If it loses even before accounting for the cost of |
958 | | * the Gather node, we should definitely reject it. |
959 | | * |
960 | | * Note that we pass the total_cost to add_path_precheck twice. This is |
961 | | * because it's never advantageous to consider the startup cost of a |
962 | | * partial path; the resulting plans, if run in parallel, will be run to |
963 | | * completion. |
964 | | */ |
965 | 0 | if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost, |
966 | 0 | pathkeys, NULL)) |
967 | 0 | return false; |
968 | | |
969 | 0 | return true; |
970 | 0 | } |
971 | | |
972 | | |
973 | | /***************************************************************************** |
974 | | * PATH NODE CREATION ROUTINES |
975 | | *****************************************************************************/ |
976 | | |
977 | | /* |
978 | | * create_seqscan_path |
979 | | * Creates a path corresponding to a sequential scan, returning the |
980 | | * pathnode. |
981 | | */ |
982 | | Path * |
983 | | create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, |
984 | | Relids required_outer, int parallel_workers) |
985 | 0 | { |
986 | 0 | Path *pathnode = makeNode(Path); |
987 | |
|
988 | 0 | pathnode->pathtype = T_SeqScan; |
989 | 0 | pathnode->parent = rel; |
990 | 0 | pathnode->pathtarget = rel->reltarget; |
991 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
992 | 0 | required_outer); |
993 | 0 | pathnode->parallel_aware = (parallel_workers > 0); |
994 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
995 | 0 | pathnode->parallel_workers = parallel_workers; |
996 | 0 | pathnode->pathkeys = NIL; /* seqscan has unordered result */ |
997 | |
|
998 | 0 | cost_seqscan(pathnode, root, rel, pathnode->param_info); |
999 | |
|
1000 | 0 | return pathnode; |
1001 | 0 | } |
1002 | | |
1003 | | /* |
1004 | | * create_samplescan_path |
1005 | | * Creates a path node for a sampled table scan. |
1006 | | */ |
1007 | | Path * |
1008 | | create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer) |
1009 | 0 | { |
1010 | 0 | Path *pathnode = makeNode(Path); |
1011 | |
|
1012 | 0 | pathnode->pathtype = T_SampleScan; |
1013 | 0 | pathnode->parent = rel; |
1014 | 0 | pathnode->pathtarget = rel->reltarget; |
1015 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
1016 | 0 | required_outer); |
1017 | 0 | pathnode->parallel_aware = false; |
1018 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
1019 | 0 | pathnode->parallel_workers = 0; |
1020 | 0 | pathnode->pathkeys = NIL; /* samplescan has unordered result */ |
1021 | |
|
1022 | 0 | cost_samplescan(pathnode, root, rel, pathnode->param_info); |
1023 | |
|
1024 | 0 | return pathnode; |
1025 | 0 | } |
1026 | | |
1027 | | /* |
1028 | | * create_index_path |
1029 | | * Creates a path node for an index scan. |
1030 | | * |
1031 | | * 'index' is a usable index. |
1032 | | * 'indexclauses' is a list of IndexClause nodes representing clauses |
1033 | | * to be enforced as qual conditions in the scan. |
1034 | | * 'indexorderbys' is a list of bare expressions (no RestrictInfos) |
1035 | | * to be used as index ordering operators in the scan. |
1036 | | * 'indexorderbycols' is an integer list of index column numbers (zero based) |
1037 | | * the ordering operators can be used with. |
1038 | | * 'pathkeys' describes the ordering of the path. |
1039 | | * 'indexscandir' is either ForwardScanDirection or BackwardScanDirection. |
1040 | | * 'indexonly' is true if an index-only scan is wanted. |
1041 | | * 'required_outer' is the set of outer relids for a parameterized path. |
1042 | | * 'loop_count' is the number of repetitions of the indexscan to factor into |
1043 | | * estimates of caching behavior. |
1044 | | * 'partial_path' is true if constructing a parallel index scan path. |
1045 | | * |
1046 | | * Returns the new path node. |
1047 | | */ |
1048 | | IndexPath * |
1049 | | create_index_path(PlannerInfo *root, |
1050 | | IndexOptInfo *index, |
1051 | | List *indexclauses, |
1052 | | List *indexorderbys, |
1053 | | List *indexorderbycols, |
1054 | | List *pathkeys, |
1055 | | ScanDirection indexscandir, |
1056 | | bool indexonly, |
1057 | | Relids required_outer, |
1058 | | double loop_count, |
1059 | | bool partial_path) |
1060 | 0 | { |
1061 | 0 | IndexPath *pathnode = makeNode(IndexPath); |
1062 | 0 | RelOptInfo *rel = index->rel; |
1063 | |
|
1064 | 0 | pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan; |
1065 | 0 | pathnode->path.parent = rel; |
1066 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1067 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1068 | 0 | required_outer); |
1069 | 0 | pathnode->path.parallel_aware = false; |
1070 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1071 | 0 | pathnode->path.parallel_workers = 0; |
1072 | 0 | pathnode->path.pathkeys = pathkeys; |
1073 | |
|
1074 | 0 | pathnode->indexinfo = index; |
1075 | 0 | pathnode->indexclauses = indexclauses; |
1076 | 0 | pathnode->indexorderbys = indexorderbys; |
1077 | 0 | pathnode->indexorderbycols = indexorderbycols; |
1078 | 0 | pathnode->indexscandir = indexscandir; |
1079 | |
|
1080 | 0 | cost_index(pathnode, root, loop_count, partial_path); |
1081 | |
|
1082 | 0 | return pathnode; |
1083 | 0 | } |
1084 | | |
1085 | | /* |
1086 | | * create_bitmap_heap_path |
1087 | | * Creates a path node for a bitmap scan. |
1088 | | * |
1089 | | * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes. |
1090 | | * 'required_outer' is the set of outer relids for a parameterized path. |
1091 | | * 'loop_count' is the number of repetitions of the indexscan to factor into |
1092 | | * estimates of caching behavior. |
1093 | | * |
1094 | | * loop_count should match the value used when creating the component |
1095 | | * IndexPaths. |
1096 | | */ |
1097 | | BitmapHeapPath * |
1098 | | create_bitmap_heap_path(PlannerInfo *root, |
1099 | | RelOptInfo *rel, |
1100 | | Path *bitmapqual, |
1101 | | Relids required_outer, |
1102 | | double loop_count, |
1103 | | int parallel_degree) |
1104 | 0 | { |
1105 | 0 | BitmapHeapPath *pathnode = makeNode(BitmapHeapPath); |
1106 | |
|
1107 | 0 | pathnode->path.pathtype = T_BitmapHeapScan; |
1108 | 0 | pathnode->path.parent = rel; |
1109 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1110 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1111 | 0 | required_outer); |
1112 | 0 | pathnode->path.parallel_aware = (parallel_degree > 0); |
1113 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1114 | 0 | pathnode->path.parallel_workers = parallel_degree; |
1115 | 0 | pathnode->path.pathkeys = NIL; /* always unordered */ |
1116 | |
|
1117 | 0 | pathnode->bitmapqual = bitmapqual; |
1118 | |
|
1119 | 0 | cost_bitmap_heap_scan(&pathnode->path, root, rel, |
1120 | 0 | pathnode->path.param_info, |
1121 | 0 | bitmapqual, loop_count); |
1122 | |
|
1123 | 0 | return pathnode; |
1124 | 0 | } |
1125 | | |
1126 | | /* |
1127 | | * create_bitmap_and_path |
1128 | | * Creates a path node representing a BitmapAnd. |
1129 | | */ |
1130 | | BitmapAndPath * |
1131 | | create_bitmap_and_path(PlannerInfo *root, |
1132 | | RelOptInfo *rel, |
1133 | | List *bitmapquals) |
1134 | 0 | { |
1135 | 0 | BitmapAndPath *pathnode = makeNode(BitmapAndPath); |
1136 | 0 | Relids required_outer = NULL; |
1137 | 0 | ListCell *lc; |
1138 | |
|
1139 | 0 | pathnode->path.pathtype = T_BitmapAnd; |
1140 | 0 | pathnode->path.parent = rel; |
1141 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1142 | | |
1143 | | /* |
1144 | | * Identify the required outer rels as the union of what the child paths |
1145 | | * depend on. (Alternatively, we could insist that the caller pass this |
1146 | | * in, but it's more convenient and reliable to compute it here.) |
1147 | | */ |
1148 | 0 | foreach(lc, bitmapquals) |
1149 | 0 | { |
1150 | 0 | Path *bitmapqual = (Path *) lfirst(lc); |
1151 | |
|
1152 | 0 | required_outer = bms_add_members(required_outer, |
1153 | 0 | PATH_REQ_OUTER(bitmapqual)); |
1154 | 0 | } |
1155 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1156 | 0 | required_outer); |
1157 | | |
1158 | | /* |
1159 | | * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be |
1160 | | * parallel-safe if and only if rel->consider_parallel is set. So, we can |
1161 | | * set the flag for this path based only on the relation-level flag, |
1162 | | * without actually iterating over the list of children. |
1163 | | */ |
1164 | 0 | pathnode->path.parallel_aware = false; |
1165 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1166 | 0 | pathnode->path.parallel_workers = 0; |
1167 | |
|
1168 | 0 | pathnode->path.pathkeys = NIL; /* always unordered */ |
1169 | |
|
1170 | 0 | pathnode->bitmapquals = bitmapquals; |
1171 | | |
1172 | | /* this sets bitmapselectivity as well as the regular cost fields: */ |
1173 | 0 | cost_bitmap_and_node(pathnode, root); |
1174 | |
|
1175 | 0 | return pathnode; |
1176 | 0 | } |
1177 | | |
1178 | | /* |
1179 | | * create_bitmap_or_path |
1180 | | * Creates a path node representing a BitmapOr. |
1181 | | */ |
1182 | | BitmapOrPath * |
1183 | | create_bitmap_or_path(PlannerInfo *root, |
1184 | | RelOptInfo *rel, |
1185 | | List *bitmapquals) |
1186 | 0 | { |
1187 | 0 | BitmapOrPath *pathnode = makeNode(BitmapOrPath); |
1188 | 0 | Relids required_outer = NULL; |
1189 | 0 | ListCell *lc; |
1190 | |
|
1191 | 0 | pathnode->path.pathtype = T_BitmapOr; |
1192 | 0 | pathnode->path.parent = rel; |
1193 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1194 | | |
1195 | | /* |
1196 | | * Identify the required outer rels as the union of what the child paths |
1197 | | * depend on. (Alternatively, we could insist that the caller pass this |
1198 | | * in, but it's more convenient and reliable to compute it here.) |
1199 | | */ |
1200 | 0 | foreach(lc, bitmapquals) |
1201 | 0 | { |
1202 | 0 | Path *bitmapqual = (Path *) lfirst(lc); |
1203 | |
|
1204 | 0 | required_outer = bms_add_members(required_outer, |
1205 | 0 | PATH_REQ_OUTER(bitmapqual)); |
1206 | 0 | } |
1207 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1208 | 0 | required_outer); |
1209 | | |
1210 | | /* |
1211 | | * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be |
1212 | | * parallel-safe if and only if rel->consider_parallel is set. So, we can |
1213 | | * set the flag for this path based only on the relation-level flag, |
1214 | | * without actually iterating over the list of children. |
1215 | | */ |
1216 | 0 | pathnode->path.parallel_aware = false; |
1217 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1218 | 0 | pathnode->path.parallel_workers = 0; |
1219 | |
|
1220 | 0 | pathnode->path.pathkeys = NIL; /* always unordered */ |
1221 | |
|
1222 | 0 | pathnode->bitmapquals = bitmapquals; |
1223 | | |
1224 | | /* this sets bitmapselectivity as well as the regular cost fields: */ |
1225 | 0 | cost_bitmap_or_node(pathnode, root); |
1226 | |
|
1227 | 0 | return pathnode; |
1228 | 0 | } |
1229 | | |
1230 | | /* |
1231 | | * create_tidscan_path |
1232 | | * Creates a path corresponding to a scan by TID, returning the pathnode. |
1233 | | */ |
1234 | | TidPath * |
1235 | | create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, |
1236 | | Relids required_outer) |
1237 | 0 | { |
1238 | 0 | TidPath *pathnode = makeNode(TidPath); |
1239 | |
|
1240 | 0 | pathnode->path.pathtype = T_TidScan; |
1241 | 0 | pathnode->path.parent = rel; |
1242 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1243 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1244 | 0 | required_outer); |
1245 | 0 | pathnode->path.parallel_aware = false; |
1246 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1247 | 0 | pathnode->path.parallel_workers = 0; |
1248 | 0 | pathnode->path.pathkeys = NIL; /* always unordered */ |
1249 | |
|
1250 | 0 | pathnode->tidquals = tidquals; |
1251 | |
|
1252 | 0 | cost_tidscan(&pathnode->path, root, rel, tidquals, |
1253 | 0 | pathnode->path.param_info); |
1254 | |
|
1255 | 0 | return pathnode; |
1256 | 0 | } |
1257 | | |
1258 | | /* |
1259 | | * create_tidrangescan_path |
1260 | | * Creates a path corresponding to a scan by a range of TIDs, returning |
1261 | | * the pathnode. |
1262 | | */ |
1263 | | TidRangePath * |
1264 | | create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, |
1265 | | List *tidrangequals, Relids required_outer) |
1266 | 0 | { |
1267 | 0 | TidRangePath *pathnode = makeNode(TidRangePath); |
1268 | |
|
1269 | 0 | pathnode->path.pathtype = T_TidRangeScan; |
1270 | 0 | pathnode->path.parent = rel; |
1271 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1272 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1273 | 0 | required_outer); |
1274 | 0 | pathnode->path.parallel_aware = false; |
1275 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1276 | 0 | pathnode->path.parallel_workers = 0; |
1277 | 0 | pathnode->path.pathkeys = NIL; /* always unordered */ |
1278 | |
|
1279 | 0 | pathnode->tidrangequals = tidrangequals; |
1280 | |
|
1281 | 0 | cost_tidrangescan(&pathnode->path, root, rel, tidrangequals, |
1282 | 0 | pathnode->path.param_info); |
1283 | |
|
1284 | 0 | return pathnode; |
1285 | 0 | } |
1286 | | |
1287 | | /* |
1288 | | * create_append_path |
1289 | | * Creates a path corresponding to an Append plan, returning the |
1290 | | * pathnode. |
1291 | | * |
1292 | | * Note that we must handle subpaths = NIL, representing a dummy access path. |
1293 | | * Also, there are callers that pass root = NULL. |
1294 | | * |
1295 | | * 'rows', when passed as a non-negative number, will be used to overwrite the |
1296 | | * returned path's row estimate. Otherwise, the row estimate is calculated |
1297 | | * by totalling the row estimates from the 'subpaths' list. |
1298 | | */ |
1299 | | AppendPath * |
1300 | | create_append_path(PlannerInfo *root, |
1301 | | RelOptInfo *rel, |
1302 | | List *subpaths, List *partial_subpaths, |
1303 | | List *pathkeys, Relids required_outer, |
1304 | | int parallel_workers, bool parallel_aware, |
1305 | | double rows) |
1306 | 0 | { |
1307 | 0 | AppendPath *pathnode = makeNode(AppendPath); |
1308 | 0 | ListCell *l; |
1309 | |
|
1310 | 0 | Assert(!parallel_aware || parallel_workers > 0); |
1311 | |
|
1312 | 0 | pathnode->path.pathtype = T_Append; |
1313 | 0 | pathnode->path.parent = rel; |
1314 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1315 | | |
1316 | | /* |
1317 | | * If this is for a baserel (not a join or non-leaf partition), we prefer |
1318 | | * to apply get_baserel_parampathinfo to construct a full ParamPathInfo |
1319 | | * for the path. This supports building a Memoize path atop this path, |
1320 | | * and if this is a partitioned table the info may be useful for run-time |
1321 | | * pruning (cf make_partition_pruneinfo()). |
1322 | | * |
1323 | | * However, if we don't have "root" then that won't work and we fall back |
1324 | | * on the simpler get_appendrel_parampathinfo. There's no point in doing |
1325 | | * the more expensive thing for a dummy path, either. |
1326 | | */ |
1327 | 0 | if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL) |
1328 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, |
1329 | 0 | rel, |
1330 | 0 | required_outer); |
1331 | 0 | else |
1332 | 0 | pathnode->path.param_info = get_appendrel_parampathinfo(rel, |
1333 | 0 | required_outer); |
1334 | |
|
1335 | 0 | pathnode->path.parallel_aware = parallel_aware; |
1336 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1337 | 0 | pathnode->path.parallel_workers = parallel_workers; |
1338 | 0 | pathnode->path.pathkeys = pathkeys; |
1339 | | |
1340 | | /* |
1341 | | * For parallel append, non-partial paths are sorted by descending total |
1342 | | * costs. That way, the total time to finish all non-partial paths is |
1343 | | * minimized. Also, the partial paths are sorted by descending startup |
1344 | | * costs. There may be some paths that require to do startup work by a |
1345 | | * single worker. In such case, it's better for workers to choose the |
1346 | | * expensive ones first, whereas the leader should choose the cheapest |
1347 | | * startup plan. |
1348 | | */ |
1349 | 0 | if (pathnode->path.parallel_aware) |
1350 | 0 | { |
1351 | | /* |
1352 | | * We mustn't fiddle with the order of subpaths when the Append has |
1353 | | * pathkeys. The order they're listed in is critical to keeping the |
1354 | | * pathkeys valid. |
1355 | | */ |
1356 | 0 | Assert(pathkeys == NIL); |
1357 | |
|
1358 | 0 | list_sort(subpaths, append_total_cost_compare); |
1359 | 0 | list_sort(partial_subpaths, append_startup_cost_compare); |
1360 | 0 | } |
1361 | 0 | pathnode->first_partial_path = list_length(subpaths); |
1362 | 0 | pathnode->subpaths = list_concat(subpaths, partial_subpaths); |
1363 | | |
1364 | | /* |
1365 | | * Apply query-wide LIMIT if known and path is for sole base relation. |
1366 | | * (Handling this at this low level is a bit klugy.) |
1367 | | */ |
1368 | 0 | if (root != NULL && bms_equal(rel->relids, root->all_query_rels)) |
1369 | 0 | pathnode->limit_tuples = root->limit_tuples; |
1370 | 0 | else |
1371 | 0 | pathnode->limit_tuples = -1.0; |
1372 | |
|
1373 | 0 | foreach(l, pathnode->subpaths) |
1374 | 0 | { |
1375 | 0 | Path *subpath = (Path *) lfirst(l); |
1376 | |
|
1377 | 0 | pathnode->path.parallel_safe = pathnode->path.parallel_safe && |
1378 | 0 | subpath->parallel_safe; |
1379 | | |
1380 | | /* All child paths must have same parameterization */ |
1381 | 0 | Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer)); |
1382 | 0 | } |
1383 | |
|
1384 | 0 | Assert(!parallel_aware || pathnode->path.parallel_safe); |
1385 | | |
1386 | | /* |
1387 | | * If there's exactly one child path then the output of the Append is |
1388 | | * necessarily ordered the same as the child's, so we can inherit the |
1389 | | * child's pathkeys if any, overriding whatever the caller might've said. |
1390 | | * Furthermore, if the child's parallel awareness matches the Append's, |
1391 | | * then the Append is a no-op and will be discarded later (in setrefs.c). |
1392 | | * Then we can inherit the child's size and cost too, effectively charging |
1393 | | * zero for the Append. Otherwise, we must do the normal costsize |
1394 | | * calculation. |
1395 | | */ |
1396 | 0 | if (list_length(pathnode->subpaths) == 1) |
1397 | 0 | { |
1398 | 0 | Path *child = (Path *) linitial(pathnode->subpaths); |
1399 | |
|
1400 | 0 | if (child->parallel_aware == parallel_aware) |
1401 | 0 | { |
1402 | 0 | pathnode->path.rows = child->rows; |
1403 | 0 | pathnode->path.startup_cost = child->startup_cost; |
1404 | 0 | pathnode->path.total_cost = child->total_cost; |
1405 | 0 | } |
1406 | 0 | else |
1407 | 0 | cost_append(pathnode); |
1408 | | /* Must do this last, else cost_append complains */ |
1409 | 0 | pathnode->path.pathkeys = child->pathkeys; |
1410 | 0 | } |
1411 | 0 | else |
1412 | 0 | cost_append(pathnode); |
1413 | | |
1414 | | /* If the caller provided a row estimate, override the computed value. */ |
1415 | 0 | if (rows >= 0) |
1416 | 0 | pathnode->path.rows = rows; |
1417 | |
|
1418 | 0 | return pathnode; |
1419 | 0 | } |
1420 | | |
1421 | | /* |
1422 | | * append_total_cost_compare |
1423 | | * list_sort comparator for sorting append child paths |
1424 | | * by total_cost descending |
1425 | | * |
1426 | | * For equal total costs, we fall back to comparing startup costs; if those |
1427 | | * are equal too, break ties using bms_compare on the paths' relids. |
1428 | | * (This is to avoid getting unpredictable results from list_sort.) |
1429 | | */ |
1430 | | static int |
1431 | | append_total_cost_compare(const ListCell *a, const ListCell *b) |
1432 | 0 | { |
1433 | 0 | Path *path1 = (Path *) lfirst(a); |
1434 | 0 | Path *path2 = (Path *) lfirst(b); |
1435 | 0 | int cmp; |
1436 | |
|
1437 | 0 | cmp = compare_path_costs(path1, path2, TOTAL_COST); |
1438 | 0 | if (cmp != 0) |
1439 | 0 | return -cmp; |
1440 | 0 | return bms_compare(path1->parent->relids, path2->parent->relids); |
1441 | 0 | } |
1442 | | |
1443 | | /* |
1444 | | * append_startup_cost_compare |
1445 | | * list_sort comparator for sorting append child paths |
1446 | | * by startup_cost descending |
1447 | | * |
1448 | | * For equal startup costs, we fall back to comparing total costs; if those |
1449 | | * are equal too, break ties using bms_compare on the paths' relids. |
1450 | | * (This is to avoid getting unpredictable results from list_sort.) |
1451 | | */ |
1452 | | static int |
1453 | | append_startup_cost_compare(const ListCell *a, const ListCell *b) |
1454 | 0 | { |
1455 | 0 | Path *path1 = (Path *) lfirst(a); |
1456 | 0 | Path *path2 = (Path *) lfirst(b); |
1457 | 0 | int cmp; |
1458 | |
|
1459 | 0 | cmp = compare_path_costs(path1, path2, STARTUP_COST); |
1460 | 0 | if (cmp != 0) |
1461 | 0 | return -cmp; |
1462 | 0 | return bms_compare(path1->parent->relids, path2->parent->relids); |
1463 | 0 | } |
1464 | | |
1465 | | /* |
1466 | | * create_merge_append_path |
1467 | | * Creates a path corresponding to a MergeAppend plan, returning the |
1468 | | * pathnode. |
1469 | | */ |
1470 | | MergeAppendPath * |
1471 | | create_merge_append_path(PlannerInfo *root, |
1472 | | RelOptInfo *rel, |
1473 | | List *subpaths, |
1474 | | List *pathkeys, |
1475 | | Relids required_outer) |
1476 | 0 | { |
1477 | 0 | MergeAppendPath *pathnode = makeNode(MergeAppendPath); |
1478 | 0 | int input_disabled_nodes; |
1479 | 0 | Cost input_startup_cost; |
1480 | 0 | Cost input_total_cost; |
1481 | 0 | ListCell *l; |
1482 | | |
1483 | | /* |
1484 | | * We don't currently support parameterized MergeAppend paths, as |
1485 | | * explained in the comments for generate_orderedappend_paths. |
1486 | | */ |
1487 | 0 | Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer)); |
1488 | |
|
1489 | 0 | pathnode->path.pathtype = T_MergeAppend; |
1490 | 0 | pathnode->path.parent = rel; |
1491 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1492 | 0 | pathnode->path.param_info = NULL; |
1493 | 0 | pathnode->path.parallel_aware = false; |
1494 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1495 | 0 | pathnode->path.parallel_workers = 0; |
1496 | 0 | pathnode->path.pathkeys = pathkeys; |
1497 | 0 | pathnode->subpaths = subpaths; |
1498 | | |
1499 | | /* |
1500 | | * Apply query-wide LIMIT if known and path is for sole base relation. |
1501 | | * (Handling this at this low level is a bit klugy.) |
1502 | | */ |
1503 | 0 | if (bms_equal(rel->relids, root->all_query_rels)) |
1504 | 0 | pathnode->limit_tuples = root->limit_tuples; |
1505 | 0 | else |
1506 | 0 | pathnode->limit_tuples = -1.0; |
1507 | | |
1508 | | /* |
1509 | | * Add up the sizes and costs of the input paths. |
1510 | | */ |
1511 | 0 | pathnode->path.rows = 0; |
1512 | 0 | input_disabled_nodes = 0; |
1513 | 0 | input_startup_cost = 0; |
1514 | 0 | input_total_cost = 0; |
1515 | 0 | foreach(l, subpaths) |
1516 | 0 | { |
1517 | 0 | Path *subpath = (Path *) lfirst(l); |
1518 | | |
1519 | | /* All child paths should be unparameterized */ |
1520 | 0 | Assert(bms_is_empty(PATH_REQ_OUTER(subpath))); |
1521 | |
|
1522 | 0 | pathnode->path.rows += subpath->rows; |
1523 | 0 | pathnode->path.parallel_safe = pathnode->path.parallel_safe && |
1524 | 0 | subpath->parallel_safe; |
1525 | |
|
1526 | 0 | if (pathkeys_contained_in(pathkeys, subpath->pathkeys)) |
1527 | 0 | { |
1528 | | /* Subpath is adequately ordered, we won't need to sort it */ |
1529 | 0 | input_disabled_nodes += subpath->disabled_nodes; |
1530 | 0 | input_startup_cost += subpath->startup_cost; |
1531 | 0 | input_total_cost += subpath->total_cost; |
1532 | 0 | } |
1533 | 0 | else |
1534 | 0 | { |
1535 | | /* We'll need to insert a Sort node, so include cost for that */ |
1536 | 0 | Path sort_path; /* dummy for result of cost_sort */ |
1537 | |
|
1538 | 0 | cost_sort(&sort_path, |
1539 | 0 | root, |
1540 | 0 | pathkeys, |
1541 | 0 | subpath->disabled_nodes, |
1542 | 0 | subpath->total_cost, |
1543 | 0 | subpath->rows, |
1544 | 0 | subpath->pathtarget->width, |
1545 | 0 | 0.0, |
1546 | 0 | work_mem, |
1547 | 0 | pathnode->limit_tuples); |
1548 | 0 | input_disabled_nodes += sort_path.disabled_nodes; |
1549 | 0 | input_startup_cost += sort_path.startup_cost; |
1550 | 0 | input_total_cost += sort_path.total_cost; |
1551 | 0 | } |
1552 | 0 | } |
1553 | | |
1554 | | /* |
1555 | | * Now we can compute total costs of the MergeAppend. If there's exactly |
1556 | | * one child path and its parallel awareness matches that of the |
1557 | | * MergeAppend, then the MergeAppend is a no-op and will be discarded |
1558 | | * later (in setrefs.c); otherwise we do the normal cost calculation. |
1559 | | */ |
1560 | 0 | if (list_length(subpaths) == 1 && |
1561 | 0 | ((Path *) linitial(subpaths))->parallel_aware == |
1562 | 0 | pathnode->path.parallel_aware) |
1563 | 0 | { |
1564 | 0 | pathnode->path.disabled_nodes = input_disabled_nodes; |
1565 | 0 | pathnode->path.startup_cost = input_startup_cost; |
1566 | 0 | pathnode->path.total_cost = input_total_cost; |
1567 | 0 | } |
1568 | 0 | else |
1569 | 0 | cost_merge_append(&pathnode->path, root, |
1570 | 0 | pathkeys, list_length(subpaths), |
1571 | 0 | input_disabled_nodes, |
1572 | 0 | input_startup_cost, input_total_cost, |
1573 | 0 | pathnode->path.rows); |
1574 | |
|
1575 | 0 | return pathnode; |
1576 | 0 | } |
1577 | | |
1578 | | /* |
1579 | | * create_group_result_path |
1580 | | * Creates a path representing a Result-and-nothing-else plan. |
1581 | | * |
1582 | | * This is only used for degenerate grouping cases, in which we know we |
1583 | | * need to produce one result row, possibly filtered by a HAVING qual. |
1584 | | */ |
1585 | | GroupResultPath * |
1586 | | create_group_result_path(PlannerInfo *root, RelOptInfo *rel, |
1587 | | PathTarget *target, List *havingqual) |
1588 | 0 | { |
1589 | 0 | GroupResultPath *pathnode = makeNode(GroupResultPath); |
1590 | |
|
1591 | 0 | pathnode->path.pathtype = T_Result; |
1592 | 0 | pathnode->path.parent = rel; |
1593 | 0 | pathnode->path.pathtarget = target; |
1594 | 0 | pathnode->path.param_info = NULL; /* there are no other rels... */ |
1595 | 0 | pathnode->path.parallel_aware = false; |
1596 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
1597 | 0 | pathnode->path.parallel_workers = 0; |
1598 | 0 | pathnode->path.pathkeys = NIL; |
1599 | 0 | pathnode->quals = havingqual; |
1600 | | |
1601 | | /* |
1602 | | * We can't quite use cost_resultscan() because the quals we want to |
1603 | | * account for are not baserestrict quals of the rel. Might as well just |
1604 | | * hack it here. |
1605 | | */ |
1606 | 0 | pathnode->path.rows = 1; |
1607 | 0 | pathnode->path.startup_cost = target->cost.startup; |
1608 | 0 | pathnode->path.total_cost = target->cost.startup + |
1609 | 0 | cpu_tuple_cost + target->cost.per_tuple; |
1610 | | |
1611 | | /* |
1612 | | * Add cost of qual, if any --- but we ignore its selectivity, since our |
1613 | | * rowcount estimate should be 1 no matter what the qual is. |
1614 | | */ |
1615 | 0 | if (havingqual) |
1616 | 0 | { |
1617 | 0 | QualCost qual_cost; |
1618 | |
|
1619 | 0 | cost_qual_eval(&qual_cost, havingqual, root); |
1620 | | /* havingqual is evaluated once at startup */ |
1621 | 0 | pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple; |
1622 | 0 | pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple; |
1623 | 0 | } |
1624 | |
|
1625 | 0 | return pathnode; |
1626 | 0 | } |
1627 | | |
1628 | | /* |
1629 | | * create_material_path |
1630 | | * Creates a path corresponding to a Material plan, returning the |
1631 | | * pathnode. |
1632 | | */ |
1633 | | MaterialPath * |
1634 | | create_material_path(RelOptInfo *rel, Path *subpath) |
1635 | 0 | { |
1636 | 0 | MaterialPath *pathnode = makeNode(MaterialPath); |
1637 | |
|
1638 | 0 | Assert(subpath->parent == rel); |
1639 | |
|
1640 | 0 | pathnode->path.pathtype = T_Material; |
1641 | 0 | pathnode->path.parent = rel; |
1642 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1643 | 0 | pathnode->path.param_info = subpath->param_info; |
1644 | 0 | pathnode->path.parallel_aware = false; |
1645 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
1646 | 0 | subpath->parallel_safe; |
1647 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
1648 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
1649 | |
|
1650 | 0 | pathnode->subpath = subpath; |
1651 | |
|
1652 | 0 | cost_material(&pathnode->path, |
1653 | 0 | subpath->disabled_nodes, |
1654 | 0 | subpath->startup_cost, |
1655 | 0 | subpath->total_cost, |
1656 | 0 | subpath->rows, |
1657 | 0 | subpath->pathtarget->width); |
1658 | |
|
1659 | 0 | return pathnode; |
1660 | 0 | } |
1661 | | |
1662 | | /* |
1663 | | * create_memoize_path |
1664 | | * Creates a path corresponding to a Memoize plan, returning the pathnode. |
1665 | | */ |
1666 | | MemoizePath * |
1667 | | create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, |
1668 | | List *param_exprs, List *hash_operators, |
1669 | | bool singlerow, bool binary_mode, double calls) |
1670 | 0 | { |
1671 | 0 | MemoizePath *pathnode = makeNode(MemoizePath); |
1672 | |
|
1673 | 0 | Assert(subpath->parent == rel); |
1674 | |
|
1675 | 0 | pathnode->path.pathtype = T_Memoize; |
1676 | 0 | pathnode->path.parent = rel; |
1677 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1678 | 0 | pathnode->path.param_info = subpath->param_info; |
1679 | 0 | pathnode->path.parallel_aware = false; |
1680 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
1681 | 0 | subpath->parallel_safe; |
1682 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
1683 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
1684 | |
|
1685 | 0 | pathnode->subpath = subpath; |
1686 | 0 | pathnode->hash_operators = hash_operators; |
1687 | 0 | pathnode->param_exprs = param_exprs; |
1688 | 0 | pathnode->singlerow = singlerow; |
1689 | 0 | pathnode->binary_mode = binary_mode; |
1690 | 0 | pathnode->calls = clamp_row_est(calls); |
1691 | | |
1692 | | /* |
1693 | | * For now we set est_entries to 0. cost_memoize_rescan() does all the |
1694 | | * hard work to determine how many cache entries there are likely to be, |
1695 | | * so it seems best to leave it up to that function to fill this field in. |
1696 | | * If left at 0, the executor will make a guess at a good value. |
1697 | | */ |
1698 | 0 | pathnode->est_entries = 0; |
1699 | | |
1700 | | /* we should not generate this path type when enable_memoize=false */ |
1701 | 0 | Assert(enable_memoize); |
1702 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
1703 | | |
1704 | | /* |
1705 | | * Add a small additional charge for caching the first entry. All the |
1706 | | * harder calculations for rescans are performed in cost_memoize_rescan(). |
1707 | | */ |
1708 | 0 | pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost; |
1709 | 0 | pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost; |
1710 | 0 | pathnode->path.rows = subpath->rows; |
1711 | |
|
1712 | 0 | return pathnode; |
1713 | 0 | } |
1714 | | |
1715 | | /* |
1716 | | * create_unique_path |
1717 | | * Creates a path representing elimination of distinct rows from the |
1718 | | * input data. Distinct-ness is defined according to the needs of the |
1719 | | * semijoin represented by sjinfo. If it is not possible to identify |
1720 | | * how to make the data unique, NULL is returned. |
1721 | | * |
1722 | | * If used at all, this is likely to be called repeatedly on the same rel; |
1723 | | * and the input subpath should always be the same (the cheapest_total path |
1724 | | * for the rel). So we cache the result. |
1725 | | */ |
1726 | | UniquePath * |
1727 | | create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, |
1728 | | SpecialJoinInfo *sjinfo) |
1729 | 0 | { |
1730 | 0 | UniquePath *pathnode; |
1731 | 0 | Path sort_path; /* dummy for result of cost_sort */ |
1732 | 0 | Path agg_path; /* dummy for result of cost_agg */ |
1733 | 0 | MemoryContext oldcontext; |
1734 | 0 | int numCols; |
1735 | | |
1736 | | /* Caller made a mistake if subpath isn't cheapest_total ... */ |
1737 | 0 | Assert(subpath == rel->cheapest_total_path); |
1738 | 0 | Assert(subpath->parent == rel); |
1739 | | /* ... or if SpecialJoinInfo is the wrong one */ |
1740 | 0 | Assert(sjinfo->jointype == JOIN_SEMI); |
1741 | 0 | Assert(bms_equal(rel->relids, sjinfo->syn_righthand)); |
1742 | | |
1743 | | /* If result already cached, return it */ |
1744 | 0 | if (rel->cheapest_unique_path) |
1745 | 0 | return (UniquePath *) rel->cheapest_unique_path; |
1746 | | |
1747 | | /* If it's not possible to unique-ify, return NULL */ |
1748 | 0 | if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash)) |
1749 | 0 | return NULL; |
1750 | | |
1751 | | /* |
1752 | | * When called during GEQO join planning, we are in a short-lived memory |
1753 | | * context. We must make sure that the path and any subsidiary data |
1754 | | * structures created for a baserel survive the GEQO cycle, else the |
1755 | | * baserel is trashed for future GEQO cycles. On the other hand, when we |
1756 | | * are creating those for a joinrel during GEQO, we don't want them to |
1757 | | * clutter the main planning context. Upshot is that the best solution is |
1758 | | * to explicitly allocate memory in the same context the given RelOptInfo |
1759 | | * is in. |
1760 | | */ |
1761 | 0 | oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); |
1762 | |
|
1763 | 0 | pathnode = makeNode(UniquePath); |
1764 | |
|
1765 | 0 | pathnode->path.pathtype = T_Unique; |
1766 | 0 | pathnode->path.parent = rel; |
1767 | 0 | pathnode->path.pathtarget = rel->reltarget; |
1768 | 0 | pathnode->path.param_info = subpath->param_info; |
1769 | 0 | pathnode->path.parallel_aware = false; |
1770 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
1771 | 0 | subpath->parallel_safe; |
1772 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
1773 | | |
1774 | | /* |
1775 | | * Assume the output is unsorted, since we don't necessarily have pathkeys |
1776 | | * to represent it. (This might get overridden below.) |
1777 | | */ |
1778 | 0 | pathnode->path.pathkeys = NIL; |
1779 | |
|
1780 | 0 | pathnode->subpath = subpath; |
1781 | | |
1782 | | /* |
1783 | | * Under GEQO and when planning child joins, the sjinfo might be |
1784 | | * short-lived, so we'd better make copies of data structures we extract |
1785 | | * from it. |
1786 | | */ |
1787 | 0 | pathnode->in_operators = copyObject(sjinfo->semi_operators); |
1788 | 0 | pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs); |
1789 | | |
1790 | | /* |
1791 | | * If the input is a relation and it has a unique index that proves the |
1792 | | * semi_rhs_exprs are unique, then we don't need to do anything. Note |
1793 | | * that relation_has_unique_index_for automatically considers restriction |
1794 | | * clauses for the rel, as well. |
1795 | | */ |
1796 | 0 | if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree && |
1797 | 0 | relation_has_unique_index_for(root, rel, NIL, |
1798 | 0 | sjinfo->semi_rhs_exprs, |
1799 | 0 | sjinfo->semi_operators)) |
1800 | 0 | { |
1801 | 0 | pathnode->umethod = UNIQUE_PATH_NOOP; |
1802 | 0 | pathnode->path.rows = rel->rows; |
1803 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
1804 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
1805 | 0 | pathnode->path.total_cost = subpath->total_cost; |
1806 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
1807 | |
|
1808 | 0 | rel->cheapest_unique_path = (Path *) pathnode; |
1809 | |
|
1810 | 0 | MemoryContextSwitchTo(oldcontext); |
1811 | |
|
1812 | 0 | return pathnode; |
1813 | 0 | } |
1814 | | |
1815 | | /* |
1816 | | * If the input is a subquery whose output must be unique already, then we |
1817 | | * don't need to do anything. The test for uniqueness has to consider |
1818 | | * exactly which columns we are extracting; for example "SELECT DISTINCT |
1819 | | * x,y" doesn't guarantee that x alone is distinct. So we cannot check for |
1820 | | * this optimization unless semi_rhs_exprs consists only of simple Vars |
1821 | | * referencing subquery outputs. (Possibly we could do something with |
1822 | | * expressions in the subquery outputs, too, but for now keep it simple.) |
1823 | | */ |
1824 | 0 | if (rel->rtekind == RTE_SUBQUERY) |
1825 | 0 | { |
1826 | 0 | RangeTblEntry *rte = planner_rt_fetch(rel->relid, root); |
1827 | |
|
1828 | 0 | if (query_supports_distinctness(rte->subquery)) |
1829 | 0 | { |
1830 | 0 | List *sub_tlist_colnos; |
1831 | |
|
1832 | 0 | sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs, |
1833 | 0 | rel->relid); |
1834 | |
|
1835 | 0 | if (sub_tlist_colnos && |
1836 | 0 | query_is_distinct_for(rte->subquery, |
1837 | 0 | sub_tlist_colnos, |
1838 | 0 | sjinfo->semi_operators)) |
1839 | 0 | { |
1840 | 0 | pathnode->umethod = UNIQUE_PATH_NOOP; |
1841 | 0 | pathnode->path.rows = rel->rows; |
1842 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
1843 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
1844 | 0 | pathnode->path.total_cost = subpath->total_cost; |
1845 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
1846 | |
|
1847 | 0 | rel->cheapest_unique_path = (Path *) pathnode; |
1848 | |
|
1849 | 0 | MemoryContextSwitchTo(oldcontext); |
1850 | |
|
1851 | 0 | return pathnode; |
1852 | 0 | } |
1853 | 0 | } |
1854 | 0 | } |
1855 | | |
1856 | | /* Estimate number of output rows */ |
1857 | 0 | pathnode->path.rows = estimate_num_groups(root, |
1858 | 0 | sjinfo->semi_rhs_exprs, |
1859 | 0 | rel->rows, |
1860 | 0 | NULL, |
1861 | 0 | NULL); |
1862 | 0 | numCols = list_length(sjinfo->semi_rhs_exprs); |
1863 | |
|
1864 | 0 | if (sjinfo->semi_can_btree) |
1865 | 0 | { |
1866 | | /* |
1867 | | * Estimate cost for sort+unique implementation |
1868 | | */ |
1869 | 0 | cost_sort(&sort_path, root, NIL, |
1870 | 0 | subpath->disabled_nodes, |
1871 | 0 | subpath->total_cost, |
1872 | 0 | rel->rows, |
1873 | 0 | subpath->pathtarget->width, |
1874 | 0 | 0.0, |
1875 | 0 | work_mem, |
1876 | 0 | -1.0); |
1877 | | |
1878 | | /* |
1879 | | * Charge one cpu_operator_cost per comparison per input tuple. We |
1880 | | * assume all columns get compared at most of the tuples. (XXX |
1881 | | * probably this is an overestimate.) This should agree with |
1882 | | * create_upper_unique_path. |
1883 | | */ |
1884 | 0 | sort_path.total_cost += cpu_operator_cost * rel->rows * numCols; |
1885 | 0 | } |
1886 | |
|
1887 | 0 | if (sjinfo->semi_can_hash) |
1888 | 0 | { |
1889 | | /* |
1890 | | * Estimate the overhead per hashtable entry at 64 bytes (same as in |
1891 | | * planner.c). |
1892 | | */ |
1893 | 0 | int hashentrysize = subpath->pathtarget->width + 64; |
1894 | |
|
1895 | 0 | if (hashentrysize * pathnode->path.rows > get_hash_memory_limit()) |
1896 | 0 | { |
1897 | | /* |
1898 | | * We should not try to hash. Hack the SpecialJoinInfo to |
1899 | | * remember this, in case we come through here again. |
1900 | | */ |
1901 | 0 | sjinfo->semi_can_hash = false; |
1902 | 0 | } |
1903 | 0 | else |
1904 | 0 | cost_agg(&agg_path, root, |
1905 | 0 | AGG_HASHED, NULL, |
1906 | 0 | numCols, pathnode->path.rows, |
1907 | 0 | NIL, |
1908 | 0 | subpath->disabled_nodes, |
1909 | 0 | subpath->startup_cost, |
1910 | 0 | subpath->total_cost, |
1911 | 0 | rel->rows, |
1912 | 0 | subpath->pathtarget->width); |
1913 | 0 | } |
1914 | |
|
1915 | 0 | if (sjinfo->semi_can_btree && sjinfo->semi_can_hash) |
1916 | 0 | { |
1917 | 0 | if (agg_path.disabled_nodes < sort_path.disabled_nodes || |
1918 | 0 | (agg_path.disabled_nodes == sort_path.disabled_nodes && |
1919 | 0 | agg_path.total_cost < sort_path.total_cost)) |
1920 | 0 | pathnode->umethod = UNIQUE_PATH_HASH; |
1921 | 0 | else |
1922 | 0 | pathnode->umethod = UNIQUE_PATH_SORT; |
1923 | 0 | } |
1924 | 0 | else if (sjinfo->semi_can_btree) |
1925 | 0 | pathnode->umethod = UNIQUE_PATH_SORT; |
1926 | 0 | else if (sjinfo->semi_can_hash) |
1927 | 0 | pathnode->umethod = UNIQUE_PATH_HASH; |
1928 | 0 | else |
1929 | 0 | { |
1930 | | /* we can get here only if we abandoned hashing above */ |
1931 | 0 | MemoryContextSwitchTo(oldcontext); |
1932 | 0 | return NULL; |
1933 | 0 | } |
1934 | | |
1935 | 0 | if (pathnode->umethod == UNIQUE_PATH_HASH) |
1936 | 0 | { |
1937 | 0 | pathnode->path.disabled_nodes = agg_path.disabled_nodes; |
1938 | 0 | pathnode->path.startup_cost = agg_path.startup_cost; |
1939 | 0 | pathnode->path.total_cost = agg_path.total_cost; |
1940 | 0 | } |
1941 | 0 | else |
1942 | 0 | { |
1943 | 0 | pathnode->path.disabled_nodes = sort_path.disabled_nodes; |
1944 | 0 | pathnode->path.startup_cost = sort_path.startup_cost; |
1945 | 0 | pathnode->path.total_cost = sort_path.total_cost; |
1946 | 0 | } |
1947 | |
|
1948 | 0 | rel->cheapest_unique_path = (Path *) pathnode; |
1949 | |
|
1950 | 0 | MemoryContextSwitchTo(oldcontext); |
1951 | |
|
1952 | 0 | return pathnode; |
1953 | 0 | } |
1954 | | |
1955 | | /* |
1956 | | * create_gather_merge_path |
1957 | | * |
1958 | | * Creates a path corresponding to a gather merge scan, returning |
1959 | | * the pathnode. |
1960 | | */ |
1961 | | GatherMergePath * |
1962 | | create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, |
1963 | | PathTarget *target, List *pathkeys, |
1964 | | Relids required_outer, double *rows) |
1965 | 0 | { |
1966 | 0 | GatherMergePath *pathnode = makeNode(GatherMergePath); |
1967 | 0 | int input_disabled_nodes = 0; |
1968 | 0 | Cost input_startup_cost = 0; |
1969 | 0 | Cost input_total_cost = 0; |
1970 | |
|
1971 | 0 | Assert(subpath->parallel_safe); |
1972 | 0 | Assert(pathkeys); |
1973 | | |
1974 | | /* |
1975 | | * The subpath should guarantee that it is adequately ordered either by |
1976 | | * adding an explicit sort node or by using presorted input. We cannot |
1977 | | * add an explicit Sort node for the subpath in createplan.c on additional |
1978 | | * pathkeys, because we can't guarantee the sort would be safe. For |
1979 | | * example, expressions may be volatile or otherwise parallel unsafe. |
1980 | | */ |
1981 | 0 | if (!pathkeys_contained_in(pathkeys, subpath->pathkeys)) |
1982 | 0 | elog(ERROR, "gather merge input not sufficiently sorted"); |
1983 | | |
1984 | 0 | pathnode->path.pathtype = T_GatherMerge; |
1985 | 0 | pathnode->path.parent = rel; |
1986 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
1987 | 0 | required_outer); |
1988 | 0 | pathnode->path.parallel_aware = false; |
1989 | |
|
1990 | 0 | pathnode->subpath = subpath; |
1991 | 0 | pathnode->num_workers = subpath->parallel_workers; |
1992 | 0 | pathnode->path.pathkeys = pathkeys; |
1993 | 0 | pathnode->path.pathtarget = target ? target : rel->reltarget; |
1994 | |
|
1995 | 0 | input_disabled_nodes += subpath->disabled_nodes; |
1996 | 0 | input_startup_cost += subpath->startup_cost; |
1997 | 0 | input_total_cost += subpath->total_cost; |
1998 | |
|
1999 | 0 | cost_gather_merge(pathnode, root, rel, pathnode->path.param_info, |
2000 | 0 | input_disabled_nodes, input_startup_cost, |
2001 | 0 | input_total_cost, rows); |
2002 | |
|
2003 | 0 | return pathnode; |
2004 | 0 | } |
2005 | | |
2006 | | /* |
2007 | | * translate_sub_tlist - get subquery column numbers represented by tlist |
2008 | | * |
2009 | | * The given targetlist usually contains only Vars referencing the given relid. |
2010 | | * Extract their varattnos (ie, the column numbers of the subquery) and return |
2011 | | * as an integer List. |
2012 | | * |
2013 | | * If any of the tlist items is not a simple Var, we cannot determine whether |
2014 | | * the subquery's uniqueness condition (if any) matches ours, so punt and |
2015 | | * return NIL. |
2016 | | */ |
2017 | | static List * |
2018 | | translate_sub_tlist(List *tlist, int relid) |
2019 | 0 | { |
2020 | 0 | List *result = NIL; |
2021 | 0 | ListCell *l; |
2022 | |
|
2023 | 0 | foreach(l, tlist) |
2024 | 0 | { |
2025 | 0 | Var *var = (Var *) lfirst(l); |
2026 | |
|
2027 | 0 | if (!var || !IsA(var, Var) || |
2028 | 0 | var->varno != relid) |
2029 | 0 | return NIL; /* punt */ |
2030 | | |
2031 | 0 | result = lappend_int(result, var->varattno); |
2032 | 0 | } |
2033 | 0 | return result; |
2034 | 0 | } |
2035 | | |
2036 | | /* |
2037 | | * create_gather_path |
2038 | | * Creates a path corresponding to a gather scan, returning the |
2039 | | * pathnode. |
2040 | | * |
2041 | | * 'rows' may optionally be set to override row estimates from other sources. |
2042 | | */ |
2043 | | GatherPath * |
2044 | | create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, |
2045 | | PathTarget *target, Relids required_outer, double *rows) |
2046 | 0 | { |
2047 | 0 | GatherPath *pathnode = makeNode(GatherPath); |
2048 | |
|
2049 | 0 | Assert(subpath->parallel_safe); |
2050 | |
|
2051 | 0 | pathnode->path.pathtype = T_Gather; |
2052 | 0 | pathnode->path.parent = rel; |
2053 | 0 | pathnode->path.pathtarget = target; |
2054 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
2055 | 0 | required_outer); |
2056 | 0 | pathnode->path.parallel_aware = false; |
2057 | 0 | pathnode->path.parallel_safe = false; |
2058 | 0 | pathnode->path.parallel_workers = 0; |
2059 | 0 | pathnode->path.pathkeys = NIL; /* Gather has unordered result */ |
2060 | |
|
2061 | 0 | pathnode->subpath = subpath; |
2062 | 0 | pathnode->num_workers = subpath->parallel_workers; |
2063 | 0 | pathnode->single_copy = false; |
2064 | |
|
2065 | 0 | if (pathnode->num_workers == 0) |
2066 | 0 | { |
2067 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
2068 | 0 | pathnode->num_workers = 1; |
2069 | 0 | pathnode->single_copy = true; |
2070 | 0 | } |
2071 | |
|
2072 | 0 | cost_gather(pathnode, root, rel, pathnode->path.param_info, rows); |
2073 | |
|
2074 | 0 | return pathnode; |
2075 | 0 | } |
2076 | | |
2077 | | /* |
2078 | | * create_subqueryscan_path |
2079 | | * Creates a path corresponding to a scan of a subquery, |
2080 | | * returning the pathnode. |
2081 | | * |
2082 | | * Caller must pass trivial_pathtarget = true if it believes rel->reltarget to |
2083 | | * be trivial, ie just a fetch of all the subquery output columns in order. |
2084 | | * While we could determine that here, the caller can usually do it more |
2085 | | * efficiently (or at least amortize it over multiple calls). |
2086 | | */ |
2087 | | SubqueryScanPath * |
2088 | | create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, |
2089 | | bool trivial_pathtarget, |
2090 | | List *pathkeys, Relids required_outer) |
2091 | 0 | { |
2092 | 0 | SubqueryScanPath *pathnode = makeNode(SubqueryScanPath); |
2093 | |
|
2094 | 0 | pathnode->path.pathtype = T_SubqueryScan; |
2095 | 0 | pathnode->path.parent = rel; |
2096 | 0 | pathnode->path.pathtarget = rel->reltarget; |
2097 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
2098 | 0 | required_outer); |
2099 | 0 | pathnode->path.parallel_aware = false; |
2100 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
2101 | 0 | subpath->parallel_safe; |
2102 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
2103 | 0 | pathnode->path.pathkeys = pathkeys; |
2104 | 0 | pathnode->subpath = subpath; |
2105 | |
|
2106 | 0 | cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info, |
2107 | 0 | trivial_pathtarget); |
2108 | |
|
2109 | 0 | return pathnode; |
2110 | 0 | } |
2111 | | |
2112 | | /* |
2113 | | * create_functionscan_path |
2114 | | * Creates a path corresponding to a sequential scan of a function, |
2115 | | * returning the pathnode. |
2116 | | */ |
2117 | | Path * |
2118 | | create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, |
2119 | | List *pathkeys, Relids required_outer) |
2120 | 0 | { |
2121 | 0 | Path *pathnode = makeNode(Path); |
2122 | |
|
2123 | 0 | pathnode->pathtype = T_FunctionScan; |
2124 | 0 | pathnode->parent = rel; |
2125 | 0 | pathnode->pathtarget = rel->reltarget; |
2126 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2127 | 0 | required_outer); |
2128 | 0 | pathnode->parallel_aware = false; |
2129 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2130 | 0 | pathnode->parallel_workers = 0; |
2131 | 0 | pathnode->pathkeys = pathkeys; |
2132 | |
|
2133 | 0 | cost_functionscan(pathnode, root, rel, pathnode->param_info); |
2134 | |
|
2135 | 0 | return pathnode; |
2136 | 0 | } |
2137 | | |
2138 | | /* |
2139 | | * create_tablefuncscan_path |
2140 | | * Creates a path corresponding to a sequential scan of a table function, |
2141 | | * returning the pathnode. |
2142 | | */ |
2143 | | Path * |
2144 | | create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, |
2145 | | Relids required_outer) |
2146 | 0 | { |
2147 | 0 | Path *pathnode = makeNode(Path); |
2148 | |
|
2149 | 0 | pathnode->pathtype = T_TableFuncScan; |
2150 | 0 | pathnode->parent = rel; |
2151 | 0 | pathnode->pathtarget = rel->reltarget; |
2152 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2153 | 0 | required_outer); |
2154 | 0 | pathnode->parallel_aware = false; |
2155 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2156 | 0 | pathnode->parallel_workers = 0; |
2157 | 0 | pathnode->pathkeys = NIL; /* result is always unordered */ |
2158 | |
|
2159 | 0 | cost_tablefuncscan(pathnode, root, rel, pathnode->param_info); |
2160 | |
|
2161 | 0 | return pathnode; |
2162 | 0 | } |
2163 | | |
2164 | | /* |
2165 | | * create_valuesscan_path |
2166 | | * Creates a path corresponding to a scan of a VALUES list, |
2167 | | * returning the pathnode. |
2168 | | */ |
2169 | | Path * |
2170 | | create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, |
2171 | | Relids required_outer) |
2172 | 0 | { |
2173 | 0 | Path *pathnode = makeNode(Path); |
2174 | |
|
2175 | 0 | pathnode->pathtype = T_ValuesScan; |
2176 | 0 | pathnode->parent = rel; |
2177 | 0 | pathnode->pathtarget = rel->reltarget; |
2178 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2179 | 0 | required_outer); |
2180 | 0 | pathnode->parallel_aware = false; |
2181 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2182 | 0 | pathnode->parallel_workers = 0; |
2183 | 0 | pathnode->pathkeys = NIL; /* result is always unordered */ |
2184 | |
|
2185 | 0 | cost_valuesscan(pathnode, root, rel, pathnode->param_info); |
2186 | |
|
2187 | 0 | return pathnode; |
2188 | 0 | } |
2189 | | |
2190 | | /* |
2191 | | * create_ctescan_path |
2192 | | * Creates a path corresponding to a scan of a non-self-reference CTE, |
2193 | | * returning the pathnode. |
2194 | | */ |
2195 | | Path * |
2196 | | create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, |
2197 | | List *pathkeys, Relids required_outer) |
2198 | 0 | { |
2199 | 0 | Path *pathnode = makeNode(Path); |
2200 | |
|
2201 | 0 | pathnode->pathtype = T_CteScan; |
2202 | 0 | pathnode->parent = rel; |
2203 | 0 | pathnode->pathtarget = rel->reltarget; |
2204 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2205 | 0 | required_outer); |
2206 | 0 | pathnode->parallel_aware = false; |
2207 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2208 | 0 | pathnode->parallel_workers = 0; |
2209 | 0 | pathnode->pathkeys = pathkeys; |
2210 | |
|
2211 | 0 | cost_ctescan(pathnode, root, rel, pathnode->param_info); |
2212 | |
|
2213 | 0 | return pathnode; |
2214 | 0 | } |
2215 | | |
2216 | | /* |
2217 | | * create_namedtuplestorescan_path |
2218 | | * Creates a path corresponding to a scan of a named tuplestore, returning |
2219 | | * the pathnode. |
2220 | | */ |
2221 | | Path * |
2222 | | create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, |
2223 | | Relids required_outer) |
2224 | 0 | { |
2225 | 0 | Path *pathnode = makeNode(Path); |
2226 | |
|
2227 | 0 | pathnode->pathtype = T_NamedTuplestoreScan; |
2228 | 0 | pathnode->parent = rel; |
2229 | 0 | pathnode->pathtarget = rel->reltarget; |
2230 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2231 | 0 | required_outer); |
2232 | 0 | pathnode->parallel_aware = false; |
2233 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2234 | 0 | pathnode->parallel_workers = 0; |
2235 | 0 | pathnode->pathkeys = NIL; /* result is always unordered */ |
2236 | |
|
2237 | 0 | cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info); |
2238 | |
|
2239 | 0 | return pathnode; |
2240 | 0 | } |
2241 | | |
2242 | | /* |
2243 | | * create_resultscan_path |
2244 | | * Creates a path corresponding to a scan of an RTE_RESULT relation, |
2245 | | * returning the pathnode. |
2246 | | */ |
2247 | | Path * |
2248 | | create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, |
2249 | | Relids required_outer) |
2250 | 0 | { |
2251 | 0 | Path *pathnode = makeNode(Path); |
2252 | |
|
2253 | 0 | pathnode->pathtype = T_Result; |
2254 | 0 | pathnode->parent = rel; |
2255 | 0 | pathnode->pathtarget = rel->reltarget; |
2256 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2257 | 0 | required_outer); |
2258 | 0 | pathnode->parallel_aware = false; |
2259 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2260 | 0 | pathnode->parallel_workers = 0; |
2261 | 0 | pathnode->pathkeys = NIL; /* result is always unordered */ |
2262 | |
|
2263 | 0 | cost_resultscan(pathnode, root, rel, pathnode->param_info); |
2264 | |
|
2265 | 0 | return pathnode; |
2266 | 0 | } |
2267 | | |
2268 | | /* |
2269 | | * create_worktablescan_path |
2270 | | * Creates a path corresponding to a scan of a self-reference CTE, |
2271 | | * returning the pathnode. |
2272 | | */ |
2273 | | Path * |
2274 | | create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, |
2275 | | Relids required_outer) |
2276 | 0 | { |
2277 | 0 | Path *pathnode = makeNode(Path); |
2278 | |
|
2279 | 0 | pathnode->pathtype = T_WorkTableScan; |
2280 | 0 | pathnode->parent = rel; |
2281 | 0 | pathnode->pathtarget = rel->reltarget; |
2282 | 0 | pathnode->param_info = get_baserel_parampathinfo(root, rel, |
2283 | 0 | required_outer); |
2284 | 0 | pathnode->parallel_aware = false; |
2285 | 0 | pathnode->parallel_safe = rel->consider_parallel; |
2286 | 0 | pathnode->parallel_workers = 0; |
2287 | 0 | pathnode->pathkeys = NIL; /* result is always unordered */ |
2288 | | |
2289 | | /* Cost is the same as for a regular CTE scan */ |
2290 | 0 | cost_ctescan(pathnode, root, rel, pathnode->param_info); |
2291 | |
|
2292 | 0 | return pathnode; |
2293 | 0 | } |
2294 | | |
2295 | | /* |
2296 | | * create_foreignscan_path |
2297 | | * Creates a path corresponding to a scan of a foreign base table, |
2298 | | * returning the pathnode. |
2299 | | * |
2300 | | * This function is never called from core Postgres; rather, it's expected |
2301 | | * to be called by the GetForeignPaths function of a foreign data wrapper. |
2302 | | * We make the FDW supply all fields of the path, since we do not have any way |
2303 | | * to calculate them in core. However, there is a usually-sane default for |
2304 | | * the pathtarget (rel->reltarget), so we let a NULL for "target" select that. |
2305 | | */ |
2306 | | ForeignPath * |
2307 | | create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel, |
2308 | | PathTarget *target, |
2309 | | double rows, int disabled_nodes, |
2310 | | Cost startup_cost, Cost total_cost, |
2311 | | List *pathkeys, |
2312 | | Relids required_outer, |
2313 | | Path *fdw_outerpath, |
2314 | | List *fdw_restrictinfo, |
2315 | | List *fdw_private) |
2316 | 0 | { |
2317 | 0 | ForeignPath *pathnode = makeNode(ForeignPath); |
2318 | | |
2319 | | /* Historically some FDWs were confused about when to use this */ |
2320 | 0 | Assert(IS_SIMPLE_REL(rel)); |
2321 | |
|
2322 | 0 | pathnode->path.pathtype = T_ForeignScan; |
2323 | 0 | pathnode->path.parent = rel; |
2324 | 0 | pathnode->path.pathtarget = target ? target : rel->reltarget; |
2325 | 0 | pathnode->path.param_info = get_baserel_parampathinfo(root, rel, |
2326 | 0 | required_outer); |
2327 | 0 | pathnode->path.parallel_aware = false; |
2328 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
2329 | 0 | pathnode->path.parallel_workers = 0; |
2330 | 0 | pathnode->path.rows = rows; |
2331 | 0 | pathnode->path.disabled_nodes = disabled_nodes; |
2332 | 0 | pathnode->path.startup_cost = startup_cost; |
2333 | 0 | pathnode->path.total_cost = total_cost; |
2334 | 0 | pathnode->path.pathkeys = pathkeys; |
2335 | |
|
2336 | 0 | pathnode->fdw_outerpath = fdw_outerpath; |
2337 | 0 | pathnode->fdw_restrictinfo = fdw_restrictinfo; |
2338 | 0 | pathnode->fdw_private = fdw_private; |
2339 | |
|
2340 | 0 | return pathnode; |
2341 | 0 | } |
2342 | | |
2343 | | /* |
2344 | | * create_foreign_join_path |
2345 | | * Creates a path corresponding to a scan of a foreign join, |
2346 | | * returning the pathnode. |
2347 | | * |
2348 | | * This function is never called from core Postgres; rather, it's expected |
2349 | | * to be called by the GetForeignJoinPaths function of a foreign data wrapper. |
2350 | | * We make the FDW supply all fields of the path, since we do not have any way |
2351 | | * to calculate them in core. However, there is a usually-sane default for |
2352 | | * the pathtarget (rel->reltarget), so we let a NULL for "target" select that. |
2353 | | */ |
2354 | | ForeignPath * |
2355 | | create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel, |
2356 | | PathTarget *target, |
2357 | | double rows, int disabled_nodes, |
2358 | | Cost startup_cost, Cost total_cost, |
2359 | | List *pathkeys, |
2360 | | Relids required_outer, |
2361 | | Path *fdw_outerpath, |
2362 | | List *fdw_restrictinfo, |
2363 | | List *fdw_private) |
2364 | 0 | { |
2365 | 0 | ForeignPath *pathnode = makeNode(ForeignPath); |
2366 | | |
2367 | | /* |
2368 | | * We should use get_joinrel_parampathinfo to handle parameterized paths, |
2369 | | * but the API of this function doesn't support it, and existing |
2370 | | * extensions aren't yet trying to build such paths anyway. For the |
2371 | | * moment just throw an error if someone tries it; eventually we should |
2372 | | * revisit this. |
2373 | | */ |
2374 | 0 | if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids)) |
2375 | 0 | elog(ERROR, "parameterized foreign joins are not supported yet"); |
2376 | | |
2377 | 0 | pathnode->path.pathtype = T_ForeignScan; |
2378 | 0 | pathnode->path.parent = rel; |
2379 | 0 | pathnode->path.pathtarget = target ? target : rel->reltarget; |
2380 | 0 | pathnode->path.param_info = NULL; /* XXX see above */ |
2381 | 0 | pathnode->path.parallel_aware = false; |
2382 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
2383 | 0 | pathnode->path.parallel_workers = 0; |
2384 | 0 | pathnode->path.rows = rows; |
2385 | 0 | pathnode->path.disabled_nodes = disabled_nodes; |
2386 | 0 | pathnode->path.startup_cost = startup_cost; |
2387 | 0 | pathnode->path.total_cost = total_cost; |
2388 | 0 | pathnode->path.pathkeys = pathkeys; |
2389 | |
|
2390 | 0 | pathnode->fdw_outerpath = fdw_outerpath; |
2391 | 0 | pathnode->fdw_restrictinfo = fdw_restrictinfo; |
2392 | 0 | pathnode->fdw_private = fdw_private; |
2393 | |
|
2394 | 0 | return pathnode; |
2395 | 0 | } |
2396 | | |
2397 | | /* |
2398 | | * create_foreign_upper_path |
2399 | | * Creates a path corresponding to an upper relation that's computed |
2400 | | * directly by an FDW, returning the pathnode. |
2401 | | * |
2402 | | * This function is never called from core Postgres; rather, it's expected to |
2403 | | * be called by the GetForeignUpperPaths function of a foreign data wrapper. |
2404 | | * We make the FDW supply all fields of the path, since we do not have any way |
2405 | | * to calculate them in core. However, there is a usually-sane default for |
2406 | | * the pathtarget (rel->reltarget), so we let a NULL for "target" select that. |
2407 | | */ |
2408 | | ForeignPath * |
2409 | | create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel, |
2410 | | PathTarget *target, |
2411 | | double rows, int disabled_nodes, |
2412 | | Cost startup_cost, Cost total_cost, |
2413 | | List *pathkeys, |
2414 | | Path *fdw_outerpath, |
2415 | | List *fdw_restrictinfo, |
2416 | | List *fdw_private) |
2417 | 0 | { |
2418 | 0 | ForeignPath *pathnode = makeNode(ForeignPath); |
2419 | | |
2420 | | /* |
2421 | | * Upper relations should never have any lateral references, since joining |
2422 | | * is complete. |
2423 | | */ |
2424 | 0 | Assert(bms_is_empty(rel->lateral_relids)); |
2425 | |
|
2426 | 0 | pathnode->path.pathtype = T_ForeignScan; |
2427 | 0 | pathnode->path.parent = rel; |
2428 | 0 | pathnode->path.pathtarget = target ? target : rel->reltarget; |
2429 | 0 | pathnode->path.param_info = NULL; |
2430 | 0 | pathnode->path.parallel_aware = false; |
2431 | 0 | pathnode->path.parallel_safe = rel->consider_parallel; |
2432 | 0 | pathnode->path.parallel_workers = 0; |
2433 | 0 | pathnode->path.rows = rows; |
2434 | 0 | pathnode->path.disabled_nodes = disabled_nodes; |
2435 | 0 | pathnode->path.startup_cost = startup_cost; |
2436 | 0 | pathnode->path.total_cost = total_cost; |
2437 | 0 | pathnode->path.pathkeys = pathkeys; |
2438 | |
|
2439 | 0 | pathnode->fdw_outerpath = fdw_outerpath; |
2440 | 0 | pathnode->fdw_restrictinfo = fdw_restrictinfo; |
2441 | 0 | pathnode->fdw_private = fdw_private; |
2442 | |
|
2443 | 0 | return pathnode; |
2444 | 0 | } |
2445 | | |
2446 | | /* |
2447 | | * calc_nestloop_required_outer |
2448 | | * Compute the required_outer set for a nestloop join path |
2449 | | * |
2450 | | * Note: when considering a child join, the inputs nonetheless use top-level |
2451 | | * parent relids |
2452 | | * |
2453 | | * Note: result must not share storage with either input |
2454 | | */ |
2455 | | Relids |
2456 | | calc_nestloop_required_outer(Relids outerrelids, |
2457 | | Relids outer_paramrels, |
2458 | | Relids innerrelids, |
2459 | | Relids inner_paramrels) |
2460 | 0 | { |
2461 | 0 | Relids required_outer; |
2462 | | |
2463 | | /* inner_path can require rels from outer path, but not vice versa */ |
2464 | 0 | Assert(!bms_overlap(outer_paramrels, innerrelids)); |
2465 | | /* easy case if inner path is not parameterized */ |
2466 | 0 | if (!inner_paramrels) |
2467 | 0 | return bms_copy(outer_paramrels); |
2468 | | /* else, form the union ... */ |
2469 | 0 | required_outer = bms_union(outer_paramrels, inner_paramrels); |
2470 | | /* ... and remove any mention of now-satisfied outer rels */ |
2471 | 0 | required_outer = bms_del_members(required_outer, |
2472 | 0 | outerrelids); |
2473 | 0 | return required_outer; |
2474 | 0 | } |
2475 | | |
2476 | | /* |
2477 | | * calc_non_nestloop_required_outer |
2478 | | * Compute the required_outer set for a merge or hash join path |
2479 | | * |
2480 | | * Note: result must not share storage with either input |
2481 | | */ |
2482 | | Relids |
2483 | | calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path) |
2484 | 0 | { |
2485 | 0 | Relids outer_paramrels = PATH_REQ_OUTER(outer_path); |
2486 | 0 | Relids inner_paramrels = PATH_REQ_OUTER(inner_path); |
2487 | 0 | Relids innerrelids PG_USED_FOR_ASSERTS_ONLY; |
2488 | 0 | Relids outerrelids PG_USED_FOR_ASSERTS_ONLY; |
2489 | 0 | Relids required_outer; |
2490 | | |
2491 | | /* |
2492 | | * Any parameterization of the input paths refers to topmost parents of |
2493 | | * the relevant relations, because reparameterize_path_by_child() hasn't |
2494 | | * been called yet. So we must consider topmost parents of the relations |
2495 | | * being joined, too, while checking for disallowed parameterization |
2496 | | * cases. |
2497 | | */ |
2498 | 0 | if (inner_path->parent->top_parent_relids) |
2499 | 0 | innerrelids = inner_path->parent->top_parent_relids; |
2500 | 0 | else |
2501 | 0 | innerrelids = inner_path->parent->relids; |
2502 | |
|
2503 | 0 | if (outer_path->parent->top_parent_relids) |
2504 | 0 | outerrelids = outer_path->parent->top_parent_relids; |
2505 | 0 | else |
2506 | 0 | outerrelids = outer_path->parent->relids; |
2507 | | |
2508 | | /* neither path can require rels from the other */ |
2509 | 0 | Assert(!bms_overlap(outer_paramrels, innerrelids)); |
2510 | 0 | Assert(!bms_overlap(inner_paramrels, outerrelids)); |
2511 | | /* form the union ... */ |
2512 | 0 | required_outer = bms_union(outer_paramrels, inner_paramrels); |
2513 | | /* we do not need an explicit test for empty; bms_union gets it right */ |
2514 | 0 | return required_outer; |
2515 | 0 | } |
2516 | | |
2517 | | /* |
2518 | | * create_nestloop_path |
2519 | | * Creates a pathnode corresponding to a nestloop join between two |
2520 | | * relations. |
2521 | | * |
2522 | | * 'joinrel' is the join relation. |
2523 | | * 'jointype' is the type of join required |
2524 | | * 'workspace' is the result from initial_cost_nestloop |
2525 | | * 'extra' contains various information about the join |
2526 | | * 'outer_path' is the outer path |
2527 | | * 'inner_path' is the inner path |
2528 | | * 'restrict_clauses' are the RestrictInfo nodes to apply at the join |
2529 | | * 'pathkeys' are the path keys of the new join path |
2530 | | * 'required_outer' is the set of required outer rels |
2531 | | * |
2532 | | * Returns the resulting path node. |
2533 | | */ |
2534 | | NestPath * |
2535 | | create_nestloop_path(PlannerInfo *root, |
2536 | | RelOptInfo *joinrel, |
2537 | | JoinType jointype, |
2538 | | JoinCostWorkspace *workspace, |
2539 | | JoinPathExtraData *extra, |
2540 | | Path *outer_path, |
2541 | | Path *inner_path, |
2542 | | List *restrict_clauses, |
2543 | | List *pathkeys, |
2544 | | Relids required_outer) |
2545 | 0 | { |
2546 | 0 | NestPath *pathnode = makeNode(NestPath); |
2547 | 0 | Relids inner_req_outer = PATH_REQ_OUTER(inner_path); |
2548 | 0 | Relids outerrelids; |
2549 | | |
2550 | | /* |
2551 | | * Paths are parameterized by top-level parents, so run parameterization |
2552 | | * tests on the parent relids. |
2553 | | */ |
2554 | 0 | if (outer_path->parent->top_parent_relids) |
2555 | 0 | outerrelids = outer_path->parent->top_parent_relids; |
2556 | 0 | else |
2557 | 0 | outerrelids = outer_path->parent->relids; |
2558 | | |
2559 | | /* |
2560 | | * If the inner path is parameterized by the outer, we must drop any |
2561 | | * restrict_clauses that are due to be moved into the inner path. We have |
2562 | | * to do this now, rather than postpone the work till createplan time, |
2563 | | * because the restrict_clauses list can affect the size and cost |
2564 | | * estimates for this path. We detect such clauses by checking for serial |
2565 | | * number match to clauses already enforced in the inner path. |
2566 | | */ |
2567 | 0 | if (bms_overlap(inner_req_outer, outerrelids)) |
2568 | 0 | { |
2569 | 0 | Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path); |
2570 | 0 | List *jclauses = NIL; |
2571 | 0 | ListCell *lc; |
2572 | |
|
2573 | 0 | foreach(lc, restrict_clauses) |
2574 | 0 | { |
2575 | 0 | RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); |
2576 | |
|
2577 | 0 | if (!bms_is_member(rinfo->rinfo_serial, enforced_serials)) |
2578 | 0 | jclauses = lappend(jclauses, rinfo); |
2579 | 0 | } |
2580 | 0 | restrict_clauses = jclauses; |
2581 | 0 | } |
2582 | |
|
2583 | 0 | pathnode->jpath.path.pathtype = T_NestLoop; |
2584 | 0 | pathnode->jpath.path.parent = joinrel; |
2585 | 0 | pathnode->jpath.path.pathtarget = joinrel->reltarget; |
2586 | 0 | pathnode->jpath.path.param_info = |
2587 | 0 | get_joinrel_parampathinfo(root, |
2588 | 0 | joinrel, |
2589 | 0 | outer_path, |
2590 | 0 | inner_path, |
2591 | 0 | extra->sjinfo, |
2592 | 0 | required_outer, |
2593 | 0 | &restrict_clauses); |
2594 | 0 | pathnode->jpath.path.parallel_aware = false; |
2595 | 0 | pathnode->jpath.path.parallel_safe = joinrel->consider_parallel && |
2596 | 0 | outer_path->parallel_safe && inner_path->parallel_safe; |
2597 | | /* This is a foolish way to estimate parallel_workers, but for now... */ |
2598 | 0 | pathnode->jpath.path.parallel_workers = outer_path->parallel_workers; |
2599 | 0 | pathnode->jpath.path.pathkeys = pathkeys; |
2600 | 0 | pathnode->jpath.jointype = jointype; |
2601 | 0 | pathnode->jpath.inner_unique = extra->inner_unique; |
2602 | 0 | pathnode->jpath.outerjoinpath = outer_path; |
2603 | 0 | pathnode->jpath.innerjoinpath = inner_path; |
2604 | 0 | pathnode->jpath.joinrestrictinfo = restrict_clauses; |
2605 | |
|
2606 | 0 | final_cost_nestloop(root, pathnode, workspace, extra); |
2607 | |
|
2608 | 0 | return pathnode; |
2609 | 0 | } |
2610 | | |
2611 | | /* |
2612 | | * create_mergejoin_path |
2613 | | * Creates a pathnode corresponding to a mergejoin join between |
2614 | | * two relations |
2615 | | * |
2616 | | * 'joinrel' is the join relation |
2617 | | * 'jointype' is the type of join required |
2618 | | * 'workspace' is the result from initial_cost_mergejoin |
2619 | | * 'extra' contains various information about the join |
2620 | | * 'outer_path' is the outer path |
2621 | | * 'inner_path' is the inner path |
2622 | | * 'restrict_clauses' are the RestrictInfo nodes to apply at the join |
2623 | | * 'pathkeys' are the path keys of the new join path |
2624 | | * 'required_outer' is the set of required outer rels |
2625 | | * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses |
2626 | | * (this should be a subset of the restrict_clauses list) |
2627 | | * 'outersortkeys' are the sort varkeys for the outer relation |
2628 | | * 'innersortkeys' are the sort varkeys for the inner relation |
2629 | | * 'outer_presorted_keys' is the number of presorted keys of the outer path |
2630 | | */ |
2631 | | MergePath * |
2632 | | create_mergejoin_path(PlannerInfo *root, |
2633 | | RelOptInfo *joinrel, |
2634 | | JoinType jointype, |
2635 | | JoinCostWorkspace *workspace, |
2636 | | JoinPathExtraData *extra, |
2637 | | Path *outer_path, |
2638 | | Path *inner_path, |
2639 | | List *restrict_clauses, |
2640 | | List *pathkeys, |
2641 | | Relids required_outer, |
2642 | | List *mergeclauses, |
2643 | | List *outersortkeys, |
2644 | | List *innersortkeys, |
2645 | | int outer_presorted_keys) |
2646 | 0 | { |
2647 | 0 | MergePath *pathnode = makeNode(MergePath); |
2648 | |
|
2649 | 0 | pathnode->jpath.path.pathtype = T_MergeJoin; |
2650 | 0 | pathnode->jpath.path.parent = joinrel; |
2651 | 0 | pathnode->jpath.path.pathtarget = joinrel->reltarget; |
2652 | 0 | pathnode->jpath.path.param_info = |
2653 | 0 | get_joinrel_parampathinfo(root, |
2654 | 0 | joinrel, |
2655 | 0 | outer_path, |
2656 | 0 | inner_path, |
2657 | 0 | extra->sjinfo, |
2658 | 0 | required_outer, |
2659 | 0 | &restrict_clauses); |
2660 | 0 | pathnode->jpath.path.parallel_aware = false; |
2661 | 0 | pathnode->jpath.path.parallel_safe = joinrel->consider_parallel && |
2662 | 0 | outer_path->parallel_safe && inner_path->parallel_safe; |
2663 | | /* This is a foolish way to estimate parallel_workers, but for now... */ |
2664 | 0 | pathnode->jpath.path.parallel_workers = outer_path->parallel_workers; |
2665 | 0 | pathnode->jpath.path.pathkeys = pathkeys; |
2666 | 0 | pathnode->jpath.jointype = jointype; |
2667 | 0 | pathnode->jpath.inner_unique = extra->inner_unique; |
2668 | 0 | pathnode->jpath.outerjoinpath = outer_path; |
2669 | 0 | pathnode->jpath.innerjoinpath = inner_path; |
2670 | 0 | pathnode->jpath.joinrestrictinfo = restrict_clauses; |
2671 | 0 | pathnode->path_mergeclauses = mergeclauses; |
2672 | 0 | pathnode->outersortkeys = outersortkeys; |
2673 | 0 | pathnode->innersortkeys = innersortkeys; |
2674 | 0 | pathnode->outer_presorted_keys = outer_presorted_keys; |
2675 | | /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */ |
2676 | | /* pathnode->materialize_inner will be set by final_cost_mergejoin */ |
2677 | |
|
2678 | 0 | final_cost_mergejoin(root, pathnode, workspace, extra); |
2679 | |
|
2680 | 0 | return pathnode; |
2681 | 0 | } |
2682 | | |
2683 | | /* |
2684 | | * create_hashjoin_path |
2685 | | * Creates a pathnode corresponding to a hash join between two relations. |
2686 | | * |
2687 | | * 'joinrel' is the join relation |
2688 | | * 'jointype' is the type of join required |
2689 | | * 'workspace' is the result from initial_cost_hashjoin |
2690 | | * 'extra' contains various information about the join |
2691 | | * 'outer_path' is the cheapest outer path |
2692 | | * 'inner_path' is the cheapest inner path |
2693 | | * 'parallel_hash' to select Parallel Hash of inner path (shared hash table) |
2694 | | * 'restrict_clauses' are the RestrictInfo nodes to apply at the join |
2695 | | * 'required_outer' is the set of required outer rels |
2696 | | * 'hashclauses' are the RestrictInfo nodes to use as hash clauses |
2697 | | * (this should be a subset of the restrict_clauses list) |
2698 | | */ |
2699 | | HashPath * |
2700 | | create_hashjoin_path(PlannerInfo *root, |
2701 | | RelOptInfo *joinrel, |
2702 | | JoinType jointype, |
2703 | | JoinCostWorkspace *workspace, |
2704 | | JoinPathExtraData *extra, |
2705 | | Path *outer_path, |
2706 | | Path *inner_path, |
2707 | | bool parallel_hash, |
2708 | | List *restrict_clauses, |
2709 | | Relids required_outer, |
2710 | | List *hashclauses) |
2711 | 0 | { |
2712 | 0 | HashPath *pathnode = makeNode(HashPath); |
2713 | |
|
2714 | 0 | pathnode->jpath.path.pathtype = T_HashJoin; |
2715 | 0 | pathnode->jpath.path.parent = joinrel; |
2716 | 0 | pathnode->jpath.path.pathtarget = joinrel->reltarget; |
2717 | 0 | pathnode->jpath.path.param_info = |
2718 | 0 | get_joinrel_parampathinfo(root, |
2719 | 0 | joinrel, |
2720 | 0 | outer_path, |
2721 | 0 | inner_path, |
2722 | 0 | extra->sjinfo, |
2723 | 0 | required_outer, |
2724 | 0 | &restrict_clauses); |
2725 | 0 | pathnode->jpath.path.parallel_aware = |
2726 | 0 | joinrel->consider_parallel && parallel_hash; |
2727 | 0 | pathnode->jpath.path.parallel_safe = joinrel->consider_parallel && |
2728 | 0 | outer_path->parallel_safe && inner_path->parallel_safe; |
2729 | | /* This is a foolish way to estimate parallel_workers, but for now... */ |
2730 | 0 | pathnode->jpath.path.parallel_workers = outer_path->parallel_workers; |
2731 | | |
2732 | | /* |
2733 | | * A hashjoin never has pathkeys, since its output ordering is |
2734 | | * unpredictable due to possible batching. XXX If the inner relation is |
2735 | | * small enough, we could instruct the executor that it must not batch, |
2736 | | * and then we could assume that the output inherits the outer relation's |
2737 | | * ordering, which might save a sort step. However there is considerable |
2738 | | * downside if our estimate of the inner relation size is badly off. For |
2739 | | * the moment we don't risk it. (Note also that if we wanted to take this |
2740 | | * seriously, joinpath.c would have to consider many more paths for the |
2741 | | * outer rel than it does now.) |
2742 | | */ |
2743 | 0 | pathnode->jpath.path.pathkeys = NIL; |
2744 | 0 | pathnode->jpath.jointype = jointype; |
2745 | 0 | pathnode->jpath.inner_unique = extra->inner_unique; |
2746 | 0 | pathnode->jpath.outerjoinpath = outer_path; |
2747 | 0 | pathnode->jpath.innerjoinpath = inner_path; |
2748 | 0 | pathnode->jpath.joinrestrictinfo = restrict_clauses; |
2749 | 0 | pathnode->path_hashclauses = hashclauses; |
2750 | | /* final_cost_hashjoin will fill in pathnode->num_batches */ |
2751 | |
|
2752 | 0 | final_cost_hashjoin(root, pathnode, workspace, extra); |
2753 | |
|
2754 | 0 | return pathnode; |
2755 | 0 | } |
2756 | | |
2757 | | /* |
2758 | | * create_projection_path |
2759 | | * Creates a pathnode that represents performing a projection. |
2760 | | * |
2761 | | * 'rel' is the parent relation associated with the result |
2762 | | * 'subpath' is the path representing the source of data |
2763 | | * 'target' is the PathTarget to be computed |
2764 | | */ |
2765 | | ProjectionPath * |
2766 | | create_projection_path(PlannerInfo *root, |
2767 | | RelOptInfo *rel, |
2768 | | Path *subpath, |
2769 | | PathTarget *target) |
2770 | 0 | { |
2771 | 0 | ProjectionPath *pathnode = makeNode(ProjectionPath); |
2772 | 0 | PathTarget *oldtarget; |
2773 | | |
2774 | | /* |
2775 | | * We mustn't put a ProjectionPath directly above another; it's useless |
2776 | | * and will confuse create_projection_plan. Rather than making sure all |
2777 | | * callers handle that, let's implement it here, by stripping off any |
2778 | | * ProjectionPath in what we're given. Given this rule, there won't be |
2779 | | * more than one. |
2780 | | */ |
2781 | 0 | if (IsA(subpath, ProjectionPath)) |
2782 | 0 | { |
2783 | 0 | ProjectionPath *subpp = (ProjectionPath *) subpath; |
2784 | |
|
2785 | 0 | Assert(subpp->path.parent == rel); |
2786 | 0 | subpath = subpp->subpath; |
2787 | 0 | Assert(!IsA(subpath, ProjectionPath)); |
2788 | 0 | } |
2789 | |
|
2790 | 0 | pathnode->path.pathtype = T_Result; |
2791 | 0 | pathnode->path.parent = rel; |
2792 | 0 | pathnode->path.pathtarget = target; |
2793 | | /* For now, assume we are above any joins, so no parameterization */ |
2794 | 0 | pathnode->path.param_info = NULL; |
2795 | 0 | pathnode->path.parallel_aware = false; |
2796 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
2797 | 0 | subpath->parallel_safe && |
2798 | 0 | is_parallel_safe(root, (Node *) target->exprs); |
2799 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
2800 | | /* Projection does not change the sort order */ |
2801 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
2802 | |
|
2803 | 0 | pathnode->subpath = subpath; |
2804 | | |
2805 | | /* |
2806 | | * We might not need a separate Result node. If the input plan node type |
2807 | | * can project, we can just tell it to project something else. Or, if it |
2808 | | * can't project but the desired target has the same expression list as |
2809 | | * what the input will produce anyway, we can still give it the desired |
2810 | | * tlist (possibly changing its ressortgroupref labels, but nothing else). |
2811 | | * Note: in the latter case, create_projection_plan has to recheck our |
2812 | | * conclusion; see comments therein. |
2813 | | */ |
2814 | 0 | oldtarget = subpath->pathtarget; |
2815 | 0 | if (is_projection_capable_path(subpath) || |
2816 | 0 | equal(oldtarget->exprs, target->exprs)) |
2817 | 0 | { |
2818 | | /* No separate Result node needed */ |
2819 | 0 | pathnode->dummypp = true; |
2820 | | |
2821 | | /* |
2822 | | * Set cost of plan as subpath's cost, adjusted for tlist replacement. |
2823 | | */ |
2824 | 0 | pathnode->path.rows = subpath->rows; |
2825 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
2826 | 0 | pathnode->path.startup_cost = subpath->startup_cost + |
2827 | 0 | (target->cost.startup - oldtarget->cost.startup); |
2828 | 0 | pathnode->path.total_cost = subpath->total_cost + |
2829 | 0 | (target->cost.startup - oldtarget->cost.startup) + |
2830 | 0 | (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows; |
2831 | 0 | } |
2832 | 0 | else |
2833 | 0 | { |
2834 | | /* We really do need the Result node */ |
2835 | 0 | pathnode->dummypp = false; |
2836 | | |
2837 | | /* |
2838 | | * The Result node's cost is cpu_tuple_cost per row, plus the cost of |
2839 | | * evaluating the tlist. There is no qual to worry about. |
2840 | | */ |
2841 | 0 | pathnode->path.rows = subpath->rows; |
2842 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
2843 | 0 | pathnode->path.startup_cost = subpath->startup_cost + |
2844 | 0 | target->cost.startup; |
2845 | 0 | pathnode->path.total_cost = subpath->total_cost + |
2846 | 0 | target->cost.startup + |
2847 | 0 | (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows; |
2848 | 0 | } |
2849 | |
|
2850 | 0 | return pathnode; |
2851 | 0 | } |
2852 | | |
2853 | | /* |
2854 | | * apply_projection_to_path |
2855 | | * Add a projection step, or just apply the target directly to given path. |
2856 | | * |
2857 | | * This has the same net effect as create_projection_path(), except that if |
2858 | | * a separate Result plan node isn't needed, we just replace the given path's |
2859 | | * pathtarget with the desired one. This must be used only when the caller |
2860 | | * knows that the given path isn't referenced elsewhere and so can be modified |
2861 | | * in-place. |
2862 | | * |
2863 | | * If the input path is a GatherPath or GatherMergePath, we try to push the |
2864 | | * new target down to its input as well; this is a yet more invasive |
2865 | | * modification of the input path, which create_projection_path() can't do. |
2866 | | * |
2867 | | * Note that we mustn't change the source path's parent link; so when it is |
2868 | | * add_path'd to "rel" things will be a bit inconsistent. So far that has |
2869 | | * not caused any trouble. |
2870 | | * |
2871 | | * 'rel' is the parent relation associated with the result |
2872 | | * 'path' is the path representing the source of data |
2873 | | * 'target' is the PathTarget to be computed |
2874 | | */ |
2875 | | Path * |
2876 | | apply_projection_to_path(PlannerInfo *root, |
2877 | | RelOptInfo *rel, |
2878 | | Path *path, |
2879 | | PathTarget *target) |
2880 | 0 | { |
2881 | 0 | QualCost oldcost; |
2882 | | |
2883 | | /* |
2884 | | * If given path can't project, we might need a Result node, so make a |
2885 | | * separate ProjectionPath. |
2886 | | */ |
2887 | 0 | if (!is_projection_capable_path(path)) |
2888 | 0 | return (Path *) create_projection_path(root, rel, path, target); |
2889 | | |
2890 | | /* |
2891 | | * We can just jam the desired tlist into the existing path, being sure to |
2892 | | * update its cost estimates appropriately. |
2893 | | */ |
2894 | 0 | oldcost = path->pathtarget->cost; |
2895 | 0 | path->pathtarget = target; |
2896 | |
|
2897 | 0 | path->startup_cost += target->cost.startup - oldcost.startup; |
2898 | 0 | path->total_cost += target->cost.startup - oldcost.startup + |
2899 | 0 | (target->cost.per_tuple - oldcost.per_tuple) * path->rows; |
2900 | | |
2901 | | /* |
2902 | | * If the path happens to be a Gather or GatherMerge path, we'd like to |
2903 | | * arrange for the subpath to return the required target list so that |
2904 | | * workers can help project. But if there is something that is not |
2905 | | * parallel-safe in the target expressions, then we can't. |
2906 | | */ |
2907 | 0 | if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) && |
2908 | 0 | is_parallel_safe(root, (Node *) target->exprs)) |
2909 | 0 | { |
2910 | | /* |
2911 | | * We always use create_projection_path here, even if the subpath is |
2912 | | * projection-capable, so as to avoid modifying the subpath in place. |
2913 | | * It seems unlikely at present that there could be any other |
2914 | | * references to the subpath, but better safe than sorry. |
2915 | | * |
2916 | | * Note that we don't change the parallel path's cost estimates; it |
2917 | | * might be appropriate to do so, to reflect the fact that the bulk of |
2918 | | * the target evaluation will happen in workers. |
2919 | | */ |
2920 | 0 | if (IsA(path, GatherPath)) |
2921 | 0 | { |
2922 | 0 | GatherPath *gpath = (GatherPath *) path; |
2923 | |
|
2924 | 0 | gpath->subpath = (Path *) |
2925 | 0 | create_projection_path(root, |
2926 | 0 | gpath->subpath->parent, |
2927 | 0 | gpath->subpath, |
2928 | 0 | target); |
2929 | 0 | } |
2930 | 0 | else |
2931 | 0 | { |
2932 | 0 | GatherMergePath *gmpath = (GatherMergePath *) path; |
2933 | |
|
2934 | 0 | gmpath->subpath = (Path *) |
2935 | 0 | create_projection_path(root, |
2936 | 0 | gmpath->subpath->parent, |
2937 | 0 | gmpath->subpath, |
2938 | 0 | target); |
2939 | 0 | } |
2940 | 0 | } |
2941 | 0 | else if (path->parallel_safe && |
2942 | 0 | !is_parallel_safe(root, (Node *) target->exprs)) |
2943 | 0 | { |
2944 | | /* |
2945 | | * We're inserting a parallel-restricted target list into a path |
2946 | | * currently marked parallel-safe, so we have to mark it as no longer |
2947 | | * safe. |
2948 | | */ |
2949 | 0 | path->parallel_safe = false; |
2950 | 0 | } |
2951 | |
|
2952 | 0 | return path; |
2953 | 0 | } |
2954 | | |
2955 | | /* |
2956 | | * create_set_projection_path |
2957 | | * Creates a pathnode that represents performing a projection that |
2958 | | * includes set-returning functions. |
2959 | | * |
2960 | | * 'rel' is the parent relation associated with the result |
2961 | | * 'subpath' is the path representing the source of data |
2962 | | * 'target' is the PathTarget to be computed |
2963 | | */ |
2964 | | ProjectSetPath * |
2965 | | create_set_projection_path(PlannerInfo *root, |
2966 | | RelOptInfo *rel, |
2967 | | Path *subpath, |
2968 | | PathTarget *target) |
2969 | 0 | { |
2970 | 0 | ProjectSetPath *pathnode = makeNode(ProjectSetPath); |
2971 | 0 | double tlist_rows; |
2972 | 0 | ListCell *lc; |
2973 | |
|
2974 | 0 | pathnode->path.pathtype = T_ProjectSet; |
2975 | 0 | pathnode->path.parent = rel; |
2976 | 0 | pathnode->path.pathtarget = target; |
2977 | | /* For now, assume we are above any joins, so no parameterization */ |
2978 | 0 | pathnode->path.param_info = NULL; |
2979 | 0 | pathnode->path.parallel_aware = false; |
2980 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
2981 | 0 | subpath->parallel_safe && |
2982 | 0 | is_parallel_safe(root, (Node *) target->exprs); |
2983 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
2984 | | /* Projection does not change the sort order XXX? */ |
2985 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
2986 | |
|
2987 | 0 | pathnode->subpath = subpath; |
2988 | | |
2989 | | /* |
2990 | | * Estimate number of rows produced by SRFs for each row of input; if |
2991 | | * there's more than one in this node, use the maximum. |
2992 | | */ |
2993 | 0 | tlist_rows = 1; |
2994 | 0 | foreach(lc, target->exprs) |
2995 | 0 | { |
2996 | 0 | Node *node = (Node *) lfirst(lc); |
2997 | 0 | double itemrows; |
2998 | |
|
2999 | 0 | itemrows = expression_returns_set_rows(root, node); |
3000 | 0 | if (tlist_rows < itemrows) |
3001 | 0 | tlist_rows = itemrows; |
3002 | 0 | } |
3003 | | |
3004 | | /* |
3005 | | * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost |
3006 | | * per input row, and half of cpu_tuple_cost for each added output row. |
3007 | | * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit |
3008 | | * this estimate later. |
3009 | | */ |
3010 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
3011 | 0 | pathnode->path.rows = subpath->rows * tlist_rows; |
3012 | 0 | pathnode->path.startup_cost = subpath->startup_cost + |
3013 | 0 | target->cost.startup; |
3014 | 0 | pathnode->path.total_cost = subpath->total_cost + |
3015 | 0 | target->cost.startup + |
3016 | 0 | (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows + |
3017 | 0 | (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2; |
3018 | |
|
3019 | 0 | return pathnode; |
3020 | 0 | } |
3021 | | |
3022 | | /* |
3023 | | * create_incremental_sort_path |
3024 | | * Creates a pathnode that represents performing an incremental sort. |
3025 | | * |
3026 | | * 'rel' is the parent relation associated with the result |
3027 | | * 'subpath' is the path representing the source of data |
3028 | | * 'pathkeys' represents the desired sort order |
3029 | | * 'presorted_keys' is the number of keys by which the input path is |
3030 | | * already sorted |
3031 | | * 'limit_tuples' is the estimated bound on the number of output tuples, |
3032 | | * or -1 if no LIMIT or couldn't estimate |
3033 | | */ |
3034 | | IncrementalSortPath * |
3035 | | create_incremental_sort_path(PlannerInfo *root, |
3036 | | RelOptInfo *rel, |
3037 | | Path *subpath, |
3038 | | List *pathkeys, |
3039 | | int presorted_keys, |
3040 | | double limit_tuples) |
3041 | 0 | { |
3042 | 0 | IncrementalSortPath *sort = makeNode(IncrementalSortPath); |
3043 | 0 | SortPath *pathnode = &sort->spath; |
3044 | |
|
3045 | 0 | pathnode->path.pathtype = T_IncrementalSort; |
3046 | 0 | pathnode->path.parent = rel; |
3047 | | /* Sort doesn't project, so use source path's pathtarget */ |
3048 | 0 | pathnode->path.pathtarget = subpath->pathtarget; |
3049 | | /* For now, assume we are above any joins, so no parameterization */ |
3050 | 0 | pathnode->path.param_info = NULL; |
3051 | 0 | pathnode->path.parallel_aware = false; |
3052 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3053 | 0 | subpath->parallel_safe; |
3054 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3055 | 0 | pathnode->path.pathkeys = pathkeys; |
3056 | |
|
3057 | 0 | pathnode->subpath = subpath; |
3058 | |
|
3059 | 0 | cost_incremental_sort(&pathnode->path, |
3060 | 0 | root, pathkeys, presorted_keys, |
3061 | 0 | subpath->disabled_nodes, |
3062 | 0 | subpath->startup_cost, |
3063 | 0 | subpath->total_cost, |
3064 | 0 | subpath->rows, |
3065 | 0 | subpath->pathtarget->width, |
3066 | 0 | 0.0, /* XXX comparison_cost shouldn't be 0? */ |
3067 | 0 | work_mem, limit_tuples); |
3068 | |
|
3069 | 0 | sort->nPresortedCols = presorted_keys; |
3070 | |
|
3071 | 0 | return sort; |
3072 | 0 | } |
3073 | | |
3074 | | /* |
3075 | | * create_sort_path |
3076 | | * Creates a pathnode that represents performing an explicit sort. |
3077 | | * |
3078 | | * 'rel' is the parent relation associated with the result |
3079 | | * 'subpath' is the path representing the source of data |
3080 | | * 'pathkeys' represents the desired sort order |
3081 | | * 'limit_tuples' is the estimated bound on the number of output tuples, |
3082 | | * or -1 if no LIMIT or couldn't estimate |
3083 | | */ |
3084 | | SortPath * |
3085 | | create_sort_path(PlannerInfo *root, |
3086 | | RelOptInfo *rel, |
3087 | | Path *subpath, |
3088 | | List *pathkeys, |
3089 | | double limit_tuples) |
3090 | 0 | { |
3091 | 0 | SortPath *pathnode = makeNode(SortPath); |
3092 | |
|
3093 | 0 | pathnode->path.pathtype = T_Sort; |
3094 | 0 | pathnode->path.parent = rel; |
3095 | | /* Sort doesn't project, so use source path's pathtarget */ |
3096 | 0 | pathnode->path.pathtarget = subpath->pathtarget; |
3097 | | /* For now, assume we are above any joins, so no parameterization */ |
3098 | 0 | pathnode->path.param_info = NULL; |
3099 | 0 | pathnode->path.parallel_aware = false; |
3100 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3101 | 0 | subpath->parallel_safe; |
3102 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3103 | 0 | pathnode->path.pathkeys = pathkeys; |
3104 | |
|
3105 | 0 | pathnode->subpath = subpath; |
3106 | |
|
3107 | 0 | cost_sort(&pathnode->path, root, pathkeys, |
3108 | 0 | subpath->disabled_nodes, |
3109 | 0 | subpath->total_cost, |
3110 | 0 | subpath->rows, |
3111 | 0 | subpath->pathtarget->width, |
3112 | 0 | 0.0, /* XXX comparison_cost shouldn't be 0? */ |
3113 | 0 | work_mem, limit_tuples); |
3114 | |
|
3115 | 0 | return pathnode; |
3116 | 0 | } |
3117 | | |
3118 | | /* |
3119 | | * create_group_path |
3120 | | * Creates a pathnode that represents performing grouping of presorted input |
3121 | | * |
3122 | | * 'rel' is the parent relation associated with the result |
3123 | | * 'subpath' is the path representing the source of data |
3124 | | * 'target' is the PathTarget to be computed |
3125 | | * 'groupClause' is a list of SortGroupClause's representing the grouping |
3126 | | * 'qual' is the HAVING quals if any |
3127 | | * 'numGroups' is the estimated number of groups |
3128 | | */ |
3129 | | GroupPath * |
3130 | | create_group_path(PlannerInfo *root, |
3131 | | RelOptInfo *rel, |
3132 | | Path *subpath, |
3133 | | List *groupClause, |
3134 | | List *qual, |
3135 | | double numGroups) |
3136 | 0 | { |
3137 | 0 | GroupPath *pathnode = makeNode(GroupPath); |
3138 | 0 | PathTarget *target = rel->reltarget; |
3139 | |
|
3140 | 0 | pathnode->path.pathtype = T_Group; |
3141 | 0 | pathnode->path.parent = rel; |
3142 | 0 | pathnode->path.pathtarget = target; |
3143 | | /* For now, assume we are above any joins, so no parameterization */ |
3144 | 0 | pathnode->path.param_info = NULL; |
3145 | 0 | pathnode->path.parallel_aware = false; |
3146 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3147 | 0 | subpath->parallel_safe; |
3148 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3149 | | /* Group doesn't change sort ordering */ |
3150 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
3151 | |
|
3152 | 0 | pathnode->subpath = subpath; |
3153 | |
|
3154 | 0 | pathnode->groupClause = groupClause; |
3155 | 0 | pathnode->qual = qual; |
3156 | |
|
3157 | 0 | cost_group(&pathnode->path, root, |
3158 | 0 | list_length(groupClause), |
3159 | 0 | numGroups, |
3160 | 0 | qual, |
3161 | 0 | subpath->disabled_nodes, |
3162 | 0 | subpath->startup_cost, subpath->total_cost, |
3163 | 0 | subpath->rows); |
3164 | | |
3165 | | /* add tlist eval cost for each output row */ |
3166 | 0 | pathnode->path.startup_cost += target->cost.startup; |
3167 | 0 | pathnode->path.total_cost += target->cost.startup + |
3168 | 0 | target->cost.per_tuple * pathnode->path.rows; |
3169 | |
|
3170 | 0 | return pathnode; |
3171 | 0 | } |
3172 | | |
3173 | | /* |
3174 | | * create_upper_unique_path |
3175 | | * Creates a pathnode that represents performing an explicit Unique step |
3176 | | * on presorted input. |
3177 | | * |
3178 | | * This produces a Unique plan node, but the use-case is so different from |
3179 | | * create_unique_path that it doesn't seem worth trying to merge the two. |
3180 | | * |
3181 | | * 'rel' is the parent relation associated with the result |
3182 | | * 'subpath' is the path representing the source of data |
3183 | | * 'numCols' is the number of grouping columns |
3184 | | * 'numGroups' is the estimated number of groups |
3185 | | * |
3186 | | * The input path must be sorted on the grouping columns, plus possibly |
3187 | | * additional columns; so the first numCols pathkeys are the grouping columns |
3188 | | */ |
3189 | | UpperUniquePath * |
3190 | | create_upper_unique_path(PlannerInfo *root, |
3191 | | RelOptInfo *rel, |
3192 | | Path *subpath, |
3193 | | int numCols, |
3194 | | double numGroups) |
3195 | 0 | { |
3196 | 0 | UpperUniquePath *pathnode = makeNode(UpperUniquePath); |
3197 | |
|
3198 | 0 | pathnode->path.pathtype = T_Unique; |
3199 | 0 | pathnode->path.parent = rel; |
3200 | | /* Unique doesn't project, so use source path's pathtarget */ |
3201 | 0 | pathnode->path.pathtarget = subpath->pathtarget; |
3202 | | /* For now, assume we are above any joins, so no parameterization */ |
3203 | 0 | pathnode->path.param_info = NULL; |
3204 | 0 | pathnode->path.parallel_aware = false; |
3205 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3206 | 0 | subpath->parallel_safe; |
3207 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3208 | | /* Unique doesn't change the input ordering */ |
3209 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
3210 | |
|
3211 | 0 | pathnode->subpath = subpath; |
3212 | 0 | pathnode->numkeys = numCols; |
3213 | | |
3214 | | /* |
3215 | | * Charge one cpu_operator_cost per comparison per input tuple. We assume |
3216 | | * all columns get compared at most of the tuples. (XXX probably this is |
3217 | | * an overestimate.) |
3218 | | */ |
3219 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
3220 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
3221 | 0 | pathnode->path.total_cost = subpath->total_cost + |
3222 | 0 | cpu_operator_cost * subpath->rows * numCols; |
3223 | 0 | pathnode->path.rows = numGroups; |
3224 | |
|
3225 | 0 | return pathnode; |
3226 | 0 | } |
3227 | | |
3228 | | /* |
3229 | | * create_agg_path |
3230 | | * Creates a pathnode that represents performing aggregation/grouping |
3231 | | * |
3232 | | * 'rel' is the parent relation associated with the result |
3233 | | * 'subpath' is the path representing the source of data |
3234 | | * 'target' is the PathTarget to be computed |
3235 | | * 'aggstrategy' is the Agg node's basic implementation strategy |
3236 | | * 'aggsplit' is the Agg node's aggregate-splitting mode |
3237 | | * 'groupClause' is a list of SortGroupClause's representing the grouping |
3238 | | * 'qual' is the HAVING quals if any |
3239 | | * 'aggcosts' contains cost info about the aggregate functions to be computed |
3240 | | * 'numGroups' is the estimated number of groups (1 if not grouping) |
3241 | | */ |
3242 | | AggPath * |
3243 | | create_agg_path(PlannerInfo *root, |
3244 | | RelOptInfo *rel, |
3245 | | Path *subpath, |
3246 | | PathTarget *target, |
3247 | | AggStrategy aggstrategy, |
3248 | | AggSplit aggsplit, |
3249 | | List *groupClause, |
3250 | | List *qual, |
3251 | | const AggClauseCosts *aggcosts, |
3252 | | double numGroups) |
3253 | 0 | { |
3254 | 0 | AggPath *pathnode = makeNode(AggPath); |
3255 | |
|
3256 | 0 | pathnode->path.pathtype = T_Agg; |
3257 | 0 | pathnode->path.parent = rel; |
3258 | 0 | pathnode->path.pathtarget = target; |
3259 | | /* For now, assume we are above any joins, so no parameterization */ |
3260 | 0 | pathnode->path.param_info = NULL; |
3261 | 0 | pathnode->path.parallel_aware = false; |
3262 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3263 | 0 | subpath->parallel_safe; |
3264 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3265 | |
|
3266 | 0 | if (aggstrategy == AGG_SORTED) |
3267 | 0 | { |
3268 | | /* |
3269 | | * Attempt to preserve the order of the subpath. Additional pathkeys |
3270 | | * may have been added in adjust_group_pathkeys_for_groupagg() to |
3271 | | * support ORDER BY / DISTINCT aggregates. Pathkeys added there |
3272 | | * belong to columns within the aggregate function, so we must strip |
3273 | | * these additional pathkeys off as those columns are unavailable |
3274 | | * above the aggregate node. |
3275 | | */ |
3276 | 0 | if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys) |
3277 | 0 | pathnode->path.pathkeys = list_copy_head(subpath->pathkeys, |
3278 | 0 | root->num_groupby_pathkeys); |
3279 | 0 | else |
3280 | 0 | pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */ |
3281 | 0 | } |
3282 | 0 | else |
3283 | 0 | pathnode->path.pathkeys = NIL; /* output is unordered */ |
3284 | |
|
3285 | 0 | pathnode->subpath = subpath; |
3286 | |
|
3287 | 0 | pathnode->aggstrategy = aggstrategy; |
3288 | 0 | pathnode->aggsplit = aggsplit; |
3289 | 0 | pathnode->numGroups = numGroups; |
3290 | 0 | pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0; |
3291 | 0 | pathnode->groupClause = groupClause; |
3292 | 0 | pathnode->qual = qual; |
3293 | |
|
3294 | 0 | cost_agg(&pathnode->path, root, |
3295 | 0 | aggstrategy, aggcosts, |
3296 | 0 | list_length(groupClause), numGroups, |
3297 | 0 | qual, |
3298 | 0 | subpath->disabled_nodes, |
3299 | 0 | subpath->startup_cost, subpath->total_cost, |
3300 | 0 | subpath->rows, subpath->pathtarget->width); |
3301 | | |
3302 | | /* add tlist eval cost for each output row */ |
3303 | 0 | pathnode->path.startup_cost += target->cost.startup; |
3304 | 0 | pathnode->path.total_cost += target->cost.startup + |
3305 | 0 | target->cost.per_tuple * pathnode->path.rows; |
3306 | |
|
3307 | 0 | return pathnode; |
3308 | 0 | } |
3309 | | |
3310 | | /* |
3311 | | * create_groupingsets_path |
3312 | | * Creates a pathnode that represents performing GROUPING SETS aggregation |
3313 | | * |
3314 | | * GroupingSetsPath represents sorted grouping with one or more grouping sets. |
3315 | | * The input path's result must be sorted to match the last entry in |
3316 | | * rollup_groupclauses. |
3317 | | * |
3318 | | * 'rel' is the parent relation associated with the result |
3319 | | * 'subpath' is the path representing the source of data |
3320 | | * 'target' is the PathTarget to be computed |
3321 | | * 'having_qual' is the HAVING quals if any |
3322 | | * 'rollups' is a list of RollupData nodes |
3323 | | * 'agg_costs' contains cost info about the aggregate functions to be computed |
3324 | | */ |
3325 | | GroupingSetsPath * |
3326 | | create_groupingsets_path(PlannerInfo *root, |
3327 | | RelOptInfo *rel, |
3328 | | Path *subpath, |
3329 | | List *having_qual, |
3330 | | AggStrategy aggstrategy, |
3331 | | List *rollups, |
3332 | | const AggClauseCosts *agg_costs) |
3333 | 0 | { |
3334 | 0 | GroupingSetsPath *pathnode = makeNode(GroupingSetsPath); |
3335 | 0 | PathTarget *target = rel->reltarget; |
3336 | 0 | ListCell *lc; |
3337 | 0 | bool is_first = true; |
3338 | 0 | bool is_first_sort = true; |
3339 | | |
3340 | | /* The topmost generated Plan node will be an Agg */ |
3341 | 0 | pathnode->path.pathtype = T_Agg; |
3342 | 0 | pathnode->path.parent = rel; |
3343 | 0 | pathnode->path.pathtarget = target; |
3344 | 0 | pathnode->path.param_info = subpath->param_info; |
3345 | 0 | pathnode->path.parallel_aware = false; |
3346 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3347 | 0 | subpath->parallel_safe; |
3348 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3349 | 0 | pathnode->subpath = subpath; |
3350 | | |
3351 | | /* |
3352 | | * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED |
3353 | | * to AGG_HASHED, here if possible. |
3354 | | */ |
3355 | 0 | if (aggstrategy == AGG_SORTED && |
3356 | 0 | list_length(rollups) == 1 && |
3357 | 0 | ((RollupData *) linitial(rollups))->groupClause == NIL) |
3358 | 0 | aggstrategy = AGG_PLAIN; |
3359 | |
|
3360 | 0 | if (aggstrategy == AGG_MIXED && |
3361 | 0 | list_length(rollups) == 1) |
3362 | 0 | aggstrategy = AGG_HASHED; |
3363 | | |
3364 | | /* |
3365 | | * Output will be in sorted order by group_pathkeys if, and only if, there |
3366 | | * is a single rollup operation on a non-empty list of grouping |
3367 | | * expressions. |
3368 | | */ |
3369 | 0 | if (aggstrategy == AGG_SORTED && list_length(rollups) == 1) |
3370 | 0 | pathnode->path.pathkeys = root->group_pathkeys; |
3371 | 0 | else |
3372 | 0 | pathnode->path.pathkeys = NIL; |
3373 | |
|
3374 | 0 | pathnode->aggstrategy = aggstrategy; |
3375 | 0 | pathnode->rollups = rollups; |
3376 | 0 | pathnode->qual = having_qual; |
3377 | 0 | pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0; |
3378 | |
|
3379 | 0 | Assert(rollups != NIL); |
3380 | 0 | Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1); |
3381 | 0 | Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1); |
3382 | |
|
3383 | 0 | foreach(lc, rollups) |
3384 | 0 | { |
3385 | 0 | RollupData *rollup = lfirst(lc); |
3386 | 0 | List *gsets = rollup->gsets; |
3387 | 0 | int numGroupCols = list_length(linitial(gsets)); |
3388 | | |
3389 | | /* |
3390 | | * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the |
3391 | | * (already-sorted) input, and following ones do their own sort. |
3392 | | * |
3393 | | * In AGG_HASHED mode, there is one rollup for each grouping set. |
3394 | | * |
3395 | | * In AGG_MIXED mode, the first rollups are hashed, the first |
3396 | | * non-hashed one takes the (already-sorted) input, and following ones |
3397 | | * do their own sort. |
3398 | | */ |
3399 | 0 | if (is_first) |
3400 | 0 | { |
3401 | 0 | cost_agg(&pathnode->path, root, |
3402 | 0 | aggstrategy, |
3403 | 0 | agg_costs, |
3404 | 0 | numGroupCols, |
3405 | 0 | rollup->numGroups, |
3406 | 0 | having_qual, |
3407 | 0 | subpath->disabled_nodes, |
3408 | 0 | subpath->startup_cost, |
3409 | 0 | subpath->total_cost, |
3410 | 0 | subpath->rows, |
3411 | 0 | subpath->pathtarget->width); |
3412 | 0 | is_first = false; |
3413 | 0 | if (!rollup->is_hashed) |
3414 | 0 | is_first_sort = false; |
3415 | 0 | } |
3416 | 0 | else |
3417 | 0 | { |
3418 | 0 | Path sort_path; /* dummy for result of cost_sort */ |
3419 | 0 | Path agg_path; /* dummy for result of cost_agg */ |
3420 | |
|
3421 | 0 | if (rollup->is_hashed || is_first_sort) |
3422 | 0 | { |
3423 | | /* |
3424 | | * Account for cost of aggregation, but don't charge input |
3425 | | * cost again |
3426 | | */ |
3427 | 0 | cost_agg(&agg_path, root, |
3428 | 0 | rollup->is_hashed ? AGG_HASHED : AGG_SORTED, |
3429 | 0 | agg_costs, |
3430 | 0 | numGroupCols, |
3431 | 0 | rollup->numGroups, |
3432 | 0 | having_qual, |
3433 | 0 | 0, 0.0, 0.0, |
3434 | 0 | subpath->rows, |
3435 | 0 | subpath->pathtarget->width); |
3436 | 0 | if (!rollup->is_hashed) |
3437 | 0 | is_first_sort = false; |
3438 | 0 | } |
3439 | 0 | else |
3440 | 0 | { |
3441 | | /* Account for cost of sort, but don't charge input cost again */ |
3442 | 0 | cost_sort(&sort_path, root, NIL, 0, |
3443 | 0 | 0.0, |
3444 | 0 | subpath->rows, |
3445 | 0 | subpath->pathtarget->width, |
3446 | 0 | 0.0, |
3447 | 0 | work_mem, |
3448 | 0 | -1.0); |
3449 | | |
3450 | | /* Account for cost of aggregation */ |
3451 | |
|
3452 | 0 | cost_agg(&agg_path, root, |
3453 | 0 | AGG_SORTED, |
3454 | 0 | agg_costs, |
3455 | 0 | numGroupCols, |
3456 | 0 | rollup->numGroups, |
3457 | 0 | having_qual, |
3458 | 0 | sort_path.disabled_nodes, |
3459 | 0 | sort_path.startup_cost, |
3460 | 0 | sort_path.total_cost, |
3461 | 0 | sort_path.rows, |
3462 | 0 | subpath->pathtarget->width); |
3463 | 0 | } |
3464 | |
|
3465 | 0 | pathnode->path.disabled_nodes += agg_path.disabled_nodes; |
3466 | 0 | pathnode->path.total_cost += agg_path.total_cost; |
3467 | 0 | pathnode->path.rows += agg_path.rows; |
3468 | 0 | } |
3469 | 0 | } |
3470 | | |
3471 | | /* add tlist eval cost for each output row */ |
3472 | 0 | pathnode->path.startup_cost += target->cost.startup; |
3473 | 0 | pathnode->path.total_cost += target->cost.startup + |
3474 | 0 | target->cost.per_tuple * pathnode->path.rows; |
3475 | |
|
3476 | 0 | return pathnode; |
3477 | 0 | } |
3478 | | |
3479 | | /* |
3480 | | * create_minmaxagg_path |
3481 | | * Creates a pathnode that represents computation of MIN/MAX aggregates |
3482 | | * |
3483 | | * 'rel' is the parent relation associated with the result |
3484 | | * 'target' is the PathTarget to be computed |
3485 | | * 'mmaggregates' is a list of MinMaxAggInfo structs |
3486 | | * 'quals' is the HAVING quals if any |
3487 | | */ |
3488 | | MinMaxAggPath * |
3489 | | create_minmaxagg_path(PlannerInfo *root, |
3490 | | RelOptInfo *rel, |
3491 | | PathTarget *target, |
3492 | | List *mmaggregates, |
3493 | | List *quals) |
3494 | 0 | { |
3495 | 0 | MinMaxAggPath *pathnode = makeNode(MinMaxAggPath); |
3496 | 0 | Cost initplan_cost; |
3497 | 0 | int initplan_disabled_nodes = 0; |
3498 | 0 | ListCell *lc; |
3499 | | |
3500 | | /* The topmost generated Plan node will be a Result */ |
3501 | 0 | pathnode->path.pathtype = T_Result; |
3502 | 0 | pathnode->path.parent = rel; |
3503 | 0 | pathnode->path.pathtarget = target; |
3504 | | /* For now, assume we are above any joins, so no parameterization */ |
3505 | 0 | pathnode->path.param_info = NULL; |
3506 | 0 | pathnode->path.parallel_aware = false; |
3507 | 0 | pathnode->path.parallel_safe = true; /* might change below */ |
3508 | 0 | pathnode->path.parallel_workers = 0; |
3509 | | /* Result is one unordered row */ |
3510 | 0 | pathnode->path.rows = 1; |
3511 | 0 | pathnode->path.pathkeys = NIL; |
3512 | |
|
3513 | 0 | pathnode->mmaggregates = mmaggregates; |
3514 | 0 | pathnode->quals = quals; |
3515 | | |
3516 | | /* Calculate cost of all the initplans, and check parallel safety */ |
3517 | 0 | initplan_cost = 0; |
3518 | 0 | foreach(lc, mmaggregates) |
3519 | 0 | { |
3520 | 0 | MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc); |
3521 | |
|
3522 | 0 | initplan_disabled_nodes += mminfo->path->disabled_nodes; |
3523 | 0 | initplan_cost += mminfo->pathcost; |
3524 | 0 | if (!mminfo->path->parallel_safe) |
3525 | 0 | pathnode->path.parallel_safe = false; |
3526 | 0 | } |
3527 | | |
3528 | | /* add tlist eval cost for each output row, plus cpu_tuple_cost */ |
3529 | 0 | pathnode->path.disabled_nodes = initplan_disabled_nodes; |
3530 | 0 | pathnode->path.startup_cost = initplan_cost + target->cost.startup; |
3531 | 0 | pathnode->path.total_cost = initplan_cost + target->cost.startup + |
3532 | 0 | target->cost.per_tuple + cpu_tuple_cost; |
3533 | | |
3534 | | /* |
3535 | | * Add cost of qual, if any --- but we ignore its selectivity, since our |
3536 | | * rowcount estimate should be 1 no matter what the qual is. |
3537 | | */ |
3538 | 0 | if (quals) |
3539 | 0 | { |
3540 | 0 | QualCost qual_cost; |
3541 | |
|
3542 | 0 | cost_qual_eval(&qual_cost, quals, root); |
3543 | 0 | pathnode->path.startup_cost += qual_cost.startup; |
3544 | 0 | pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple; |
3545 | 0 | } |
3546 | | |
3547 | | /* |
3548 | | * If the initplans were all parallel-safe, also check safety of the |
3549 | | * target and quals. (The Result node itself isn't parallelizable, but if |
3550 | | * we are in a subquery then it can be useful for the outer query to know |
3551 | | * that this one is parallel-safe.) |
3552 | | */ |
3553 | 0 | if (pathnode->path.parallel_safe) |
3554 | 0 | pathnode->path.parallel_safe = |
3555 | 0 | is_parallel_safe(root, (Node *) target->exprs) && |
3556 | 0 | is_parallel_safe(root, (Node *) quals); |
3557 | |
|
3558 | 0 | return pathnode; |
3559 | 0 | } |
3560 | | |
3561 | | /* |
3562 | | * create_windowagg_path |
3563 | | * Creates a pathnode that represents computation of window functions |
3564 | | * |
3565 | | * 'rel' is the parent relation associated with the result |
3566 | | * 'subpath' is the path representing the source of data |
3567 | | * 'target' is the PathTarget to be computed |
3568 | | * 'windowFuncs' is a list of WindowFunc structs |
3569 | | * 'runCondition' is a list of OpExprs to short-circuit WindowAgg execution |
3570 | | * 'winclause' is a WindowClause that is common to all the WindowFuncs |
3571 | | * 'qual' WindowClause.runconditions from lower-level WindowAggPaths. |
3572 | | * Must always be NIL when topwindow == false |
3573 | | * 'topwindow' pass as true only for the top-level WindowAgg. False for all |
3574 | | * intermediate WindowAggs. |
3575 | | * |
3576 | | * The input must be sorted according to the WindowClause's PARTITION keys |
3577 | | * plus ORDER BY keys. |
3578 | | */ |
3579 | | WindowAggPath * |
3580 | | create_windowagg_path(PlannerInfo *root, |
3581 | | RelOptInfo *rel, |
3582 | | Path *subpath, |
3583 | | PathTarget *target, |
3584 | | List *windowFuncs, |
3585 | | List *runCondition, |
3586 | | WindowClause *winclause, |
3587 | | List *qual, |
3588 | | bool topwindow) |
3589 | 0 | { |
3590 | 0 | WindowAggPath *pathnode = makeNode(WindowAggPath); |
3591 | | |
3592 | | /* qual can only be set for the topwindow */ |
3593 | 0 | Assert(qual == NIL || topwindow); |
3594 | |
|
3595 | 0 | pathnode->path.pathtype = T_WindowAgg; |
3596 | 0 | pathnode->path.parent = rel; |
3597 | 0 | pathnode->path.pathtarget = target; |
3598 | | /* For now, assume we are above any joins, so no parameterization */ |
3599 | 0 | pathnode->path.param_info = NULL; |
3600 | 0 | pathnode->path.parallel_aware = false; |
3601 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3602 | 0 | subpath->parallel_safe; |
3603 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
3604 | | /* WindowAgg preserves the input sort order */ |
3605 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
3606 | |
|
3607 | 0 | pathnode->subpath = subpath; |
3608 | 0 | pathnode->winclause = winclause; |
3609 | 0 | pathnode->qual = qual; |
3610 | 0 | pathnode->runCondition = runCondition; |
3611 | 0 | pathnode->topwindow = topwindow; |
3612 | | |
3613 | | /* |
3614 | | * For costing purposes, assume that there are no redundant partitioning |
3615 | | * or ordering columns; it's not worth the trouble to deal with that |
3616 | | * corner case here. So we just pass the unmodified list lengths to |
3617 | | * cost_windowagg. |
3618 | | */ |
3619 | 0 | cost_windowagg(&pathnode->path, root, |
3620 | 0 | windowFuncs, |
3621 | 0 | winclause, |
3622 | 0 | subpath->disabled_nodes, |
3623 | 0 | subpath->startup_cost, |
3624 | 0 | subpath->total_cost, |
3625 | 0 | subpath->rows); |
3626 | | |
3627 | | /* add tlist eval cost for each output row */ |
3628 | 0 | pathnode->path.startup_cost += target->cost.startup; |
3629 | 0 | pathnode->path.total_cost += target->cost.startup + |
3630 | 0 | target->cost.per_tuple * pathnode->path.rows; |
3631 | |
|
3632 | 0 | return pathnode; |
3633 | 0 | } |
3634 | | |
3635 | | /* |
3636 | | * create_setop_path |
3637 | | * Creates a pathnode that represents computation of INTERSECT or EXCEPT |
3638 | | * |
3639 | | * 'rel' is the parent relation associated with the result |
3640 | | * 'leftpath' is the path representing the left-hand source of data |
3641 | | * 'rightpath' is the path representing the right-hand source of data |
3642 | | * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL) |
3643 | | * 'strategy' is the implementation strategy (sorted or hashed) |
3644 | | * 'groupList' is a list of SortGroupClause's representing the grouping |
3645 | | * 'numGroups' is the estimated number of distinct groups in left-hand input |
3646 | | * 'outputRows' is the estimated number of output rows |
3647 | | * |
3648 | | * leftpath and rightpath must produce the same columns. Moreover, if |
3649 | | * strategy is SETOP_SORTED, leftpath and rightpath must both be sorted |
3650 | | * by all the grouping columns. |
3651 | | */ |
3652 | | SetOpPath * |
3653 | | create_setop_path(PlannerInfo *root, |
3654 | | RelOptInfo *rel, |
3655 | | Path *leftpath, |
3656 | | Path *rightpath, |
3657 | | SetOpCmd cmd, |
3658 | | SetOpStrategy strategy, |
3659 | | List *groupList, |
3660 | | double numGroups, |
3661 | | double outputRows) |
3662 | 0 | { |
3663 | 0 | SetOpPath *pathnode = makeNode(SetOpPath); |
3664 | |
|
3665 | 0 | pathnode->path.pathtype = T_SetOp; |
3666 | 0 | pathnode->path.parent = rel; |
3667 | 0 | pathnode->path.pathtarget = rel->reltarget; |
3668 | | /* For now, assume we are above any joins, so no parameterization */ |
3669 | 0 | pathnode->path.param_info = NULL; |
3670 | 0 | pathnode->path.parallel_aware = false; |
3671 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3672 | 0 | leftpath->parallel_safe && rightpath->parallel_safe; |
3673 | 0 | pathnode->path.parallel_workers = |
3674 | 0 | leftpath->parallel_workers + rightpath->parallel_workers; |
3675 | | /* SetOp preserves the input sort order if in sort mode */ |
3676 | 0 | pathnode->path.pathkeys = |
3677 | 0 | (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL; |
3678 | |
|
3679 | 0 | pathnode->leftpath = leftpath; |
3680 | 0 | pathnode->rightpath = rightpath; |
3681 | 0 | pathnode->cmd = cmd; |
3682 | 0 | pathnode->strategy = strategy; |
3683 | 0 | pathnode->groupList = groupList; |
3684 | 0 | pathnode->numGroups = numGroups; |
3685 | | |
3686 | | /* |
3687 | | * Compute cost estimates. As things stand, we end up with the same total |
3688 | | * cost in this node for sort and hash methods, but different startup |
3689 | | * costs. This could be refined perhaps, but it'll do for now. |
3690 | | */ |
3691 | 0 | pathnode->path.disabled_nodes = |
3692 | 0 | leftpath->disabled_nodes + rightpath->disabled_nodes; |
3693 | 0 | if (strategy == SETOP_SORTED) |
3694 | 0 | { |
3695 | | /* |
3696 | | * In sorted mode, we can emit output incrementally. Charge one |
3697 | | * cpu_operator_cost per comparison per input tuple. Like cost_group, |
3698 | | * we assume all columns get compared at most of the tuples. |
3699 | | */ |
3700 | 0 | pathnode->path.startup_cost = |
3701 | 0 | leftpath->startup_cost + rightpath->startup_cost; |
3702 | 0 | pathnode->path.total_cost = |
3703 | 0 | leftpath->total_cost + rightpath->total_cost + |
3704 | 0 | cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList); |
3705 | | |
3706 | | /* |
3707 | | * Also charge a small amount per extracted tuple. Like cost_sort, |
3708 | | * charge only operator cost not cpu_tuple_cost, since SetOp does no |
3709 | | * qual-checking or projection. |
3710 | | */ |
3711 | 0 | pathnode->path.total_cost += cpu_operator_cost * outputRows; |
3712 | 0 | } |
3713 | 0 | else |
3714 | 0 | { |
3715 | 0 | Size hashentrysize; |
3716 | | |
3717 | | /* |
3718 | | * In hashed mode, we must read all the input before we can emit |
3719 | | * anything. Also charge comparison costs to represent the cost of |
3720 | | * hash table lookups. |
3721 | | */ |
3722 | 0 | pathnode->path.startup_cost = |
3723 | 0 | leftpath->total_cost + rightpath->total_cost + |
3724 | 0 | cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList); |
3725 | 0 | pathnode->path.total_cost = pathnode->path.startup_cost; |
3726 | | |
3727 | | /* |
3728 | | * Also charge a small amount per extracted tuple. Like cost_sort, |
3729 | | * charge only operator cost not cpu_tuple_cost, since SetOp does no |
3730 | | * qual-checking or projection. |
3731 | | */ |
3732 | 0 | pathnode->path.total_cost += cpu_operator_cost * outputRows; |
3733 | | |
3734 | | /* |
3735 | | * Mark the path as disabled if enable_hashagg is off. While this |
3736 | | * isn't exactly a HashAgg node, it seems close enough to justify |
3737 | | * letting that switch control it. |
3738 | | */ |
3739 | 0 | if (!enable_hashagg) |
3740 | 0 | pathnode->path.disabled_nodes++; |
3741 | | |
3742 | | /* |
3743 | | * Also disable if it doesn't look like the hashtable will fit into |
3744 | | * hash_mem. |
3745 | | */ |
3746 | 0 | hashentrysize = MAXALIGN(leftpath->pathtarget->width) + |
3747 | 0 | MAXALIGN(SizeofMinimalTupleHeader); |
3748 | 0 | if (hashentrysize * numGroups > get_hash_memory_limit()) |
3749 | 0 | pathnode->path.disabled_nodes++; |
3750 | 0 | } |
3751 | 0 | pathnode->path.rows = outputRows; |
3752 | |
|
3753 | 0 | return pathnode; |
3754 | 0 | } |
3755 | | |
3756 | | /* |
3757 | | * create_recursiveunion_path |
3758 | | * Creates a pathnode that represents a recursive UNION node |
3759 | | * |
3760 | | * 'rel' is the parent relation associated with the result |
3761 | | * 'leftpath' is the source of data for the non-recursive term |
3762 | | * 'rightpath' is the source of data for the recursive term |
3763 | | * 'target' is the PathTarget to be computed |
3764 | | * 'distinctList' is a list of SortGroupClause's representing the grouping |
3765 | | * 'wtParam' is the ID of Param representing work table |
3766 | | * 'numGroups' is the estimated number of groups |
3767 | | * |
3768 | | * For recursive UNION ALL, distinctList is empty and numGroups is zero |
3769 | | */ |
3770 | | RecursiveUnionPath * |
3771 | | create_recursiveunion_path(PlannerInfo *root, |
3772 | | RelOptInfo *rel, |
3773 | | Path *leftpath, |
3774 | | Path *rightpath, |
3775 | | PathTarget *target, |
3776 | | List *distinctList, |
3777 | | int wtParam, |
3778 | | double numGroups) |
3779 | 0 | { |
3780 | 0 | RecursiveUnionPath *pathnode = makeNode(RecursiveUnionPath); |
3781 | |
|
3782 | 0 | pathnode->path.pathtype = T_RecursiveUnion; |
3783 | 0 | pathnode->path.parent = rel; |
3784 | 0 | pathnode->path.pathtarget = target; |
3785 | | /* For now, assume we are above any joins, so no parameterization */ |
3786 | 0 | pathnode->path.param_info = NULL; |
3787 | 0 | pathnode->path.parallel_aware = false; |
3788 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3789 | 0 | leftpath->parallel_safe && rightpath->parallel_safe; |
3790 | | /* Foolish, but we'll do it like joins for now: */ |
3791 | 0 | pathnode->path.parallel_workers = leftpath->parallel_workers; |
3792 | | /* RecursiveUnion result is always unsorted */ |
3793 | 0 | pathnode->path.pathkeys = NIL; |
3794 | |
|
3795 | 0 | pathnode->leftpath = leftpath; |
3796 | 0 | pathnode->rightpath = rightpath; |
3797 | 0 | pathnode->distinctList = distinctList; |
3798 | 0 | pathnode->wtParam = wtParam; |
3799 | 0 | pathnode->numGroups = numGroups; |
3800 | |
|
3801 | 0 | cost_recursive_union(&pathnode->path, leftpath, rightpath); |
3802 | |
|
3803 | 0 | return pathnode; |
3804 | 0 | } |
3805 | | |
3806 | | /* |
3807 | | * create_lockrows_path |
3808 | | * Creates a pathnode that represents acquiring row locks |
3809 | | * |
3810 | | * 'rel' is the parent relation associated with the result |
3811 | | * 'subpath' is the path representing the source of data |
3812 | | * 'rowMarks' is a list of PlanRowMark's |
3813 | | * 'epqParam' is the ID of Param for EvalPlanQual re-eval |
3814 | | */ |
3815 | | LockRowsPath * |
3816 | | create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, |
3817 | | Path *subpath, List *rowMarks, int epqParam) |
3818 | 0 | { |
3819 | 0 | LockRowsPath *pathnode = makeNode(LockRowsPath); |
3820 | |
|
3821 | 0 | pathnode->path.pathtype = T_LockRows; |
3822 | 0 | pathnode->path.parent = rel; |
3823 | | /* LockRows doesn't project, so use source path's pathtarget */ |
3824 | 0 | pathnode->path.pathtarget = subpath->pathtarget; |
3825 | | /* For now, assume we are above any joins, so no parameterization */ |
3826 | 0 | pathnode->path.param_info = NULL; |
3827 | 0 | pathnode->path.parallel_aware = false; |
3828 | 0 | pathnode->path.parallel_safe = false; |
3829 | 0 | pathnode->path.parallel_workers = 0; |
3830 | 0 | pathnode->path.rows = subpath->rows; |
3831 | | |
3832 | | /* |
3833 | | * The result cannot be assumed sorted, since locking might cause the sort |
3834 | | * key columns to be replaced with new values. |
3835 | | */ |
3836 | 0 | pathnode->path.pathkeys = NIL; |
3837 | |
|
3838 | 0 | pathnode->subpath = subpath; |
3839 | 0 | pathnode->rowMarks = rowMarks; |
3840 | 0 | pathnode->epqParam = epqParam; |
3841 | | |
3842 | | /* |
3843 | | * We should charge something extra for the costs of row locking and |
3844 | | * possible refetches, but it's hard to say how much. For now, use |
3845 | | * cpu_tuple_cost per row. |
3846 | | */ |
3847 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
3848 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
3849 | 0 | pathnode->path.total_cost = subpath->total_cost + |
3850 | 0 | cpu_tuple_cost * subpath->rows; |
3851 | |
|
3852 | 0 | return pathnode; |
3853 | 0 | } |
3854 | | |
3855 | | /* |
3856 | | * create_modifytable_path |
3857 | | * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE |
3858 | | * mods |
3859 | | * |
3860 | | * 'rel' is the parent relation associated with the result |
3861 | | * 'subpath' is a Path producing source data |
3862 | | * 'operation' is the operation type |
3863 | | * 'canSetTag' is true if we set the command tag/es_processed |
3864 | | * 'nominalRelation' is the parent RT index for use of EXPLAIN |
3865 | | * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none |
3866 | | * 'partColsUpdated' is true if any partitioning columns are being updated, |
3867 | | * either from the target relation or a descendent partitioned table. |
3868 | | * 'resultRelations' is an integer list of actual RT indexes of target rel(s) |
3869 | | * 'updateColnosLists' is a list of UPDATE target column number lists |
3870 | | * (one sublist per rel); or NIL if not an UPDATE |
3871 | | * 'withCheckOptionLists' is a list of WCO lists (one per rel) |
3872 | | * 'returningLists' is a list of RETURNING tlists (one per rel) |
3873 | | * 'rowMarks' is a list of PlanRowMarks (non-locking only) |
3874 | | * 'onconflict' is the ON CONFLICT clause, or NULL |
3875 | | * 'epqParam' is the ID of Param for EvalPlanQual re-eval |
3876 | | * 'mergeActionLists' is a list of lists of MERGE actions (one per rel) |
3877 | | * 'mergeJoinConditions' is a list of join conditions for MERGE (one per rel) |
3878 | | */ |
3879 | | ModifyTablePath * |
3880 | | create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, |
3881 | | Path *subpath, |
3882 | | CmdType operation, bool canSetTag, |
3883 | | Index nominalRelation, Index rootRelation, |
3884 | | bool partColsUpdated, |
3885 | | List *resultRelations, |
3886 | | List *updateColnosLists, |
3887 | | List *withCheckOptionLists, List *returningLists, |
3888 | | List *rowMarks, OnConflictExpr *onconflict, |
3889 | | List *mergeActionLists, List *mergeJoinConditions, |
3890 | | int epqParam) |
3891 | 0 | { |
3892 | 0 | ModifyTablePath *pathnode = makeNode(ModifyTablePath); |
3893 | |
|
3894 | 0 | Assert(operation == CMD_MERGE || |
3895 | 0 | (operation == CMD_UPDATE ? |
3896 | 0 | list_length(resultRelations) == list_length(updateColnosLists) : |
3897 | 0 | updateColnosLists == NIL)); |
3898 | 0 | Assert(withCheckOptionLists == NIL || |
3899 | 0 | list_length(resultRelations) == list_length(withCheckOptionLists)); |
3900 | 0 | Assert(returningLists == NIL || |
3901 | 0 | list_length(resultRelations) == list_length(returningLists)); |
3902 | |
|
3903 | 0 | pathnode->path.pathtype = T_ModifyTable; |
3904 | 0 | pathnode->path.parent = rel; |
3905 | | /* pathtarget is not interesting, just make it minimally valid */ |
3906 | 0 | pathnode->path.pathtarget = rel->reltarget; |
3907 | | /* For now, assume we are above any joins, so no parameterization */ |
3908 | 0 | pathnode->path.param_info = NULL; |
3909 | 0 | pathnode->path.parallel_aware = false; |
3910 | 0 | pathnode->path.parallel_safe = false; |
3911 | 0 | pathnode->path.parallel_workers = 0; |
3912 | 0 | pathnode->path.pathkeys = NIL; |
3913 | | |
3914 | | /* |
3915 | | * Compute cost & rowcount as subpath cost & rowcount (if RETURNING) |
3916 | | * |
3917 | | * Currently, we don't charge anything extra for the actual table |
3918 | | * modification work, nor for the WITH CHECK OPTIONS or RETURNING |
3919 | | * expressions if any. It would only be window dressing, since |
3920 | | * ModifyTable is always a top-level node and there is no way for the |
3921 | | * costs to change any higher-level planning choices. But we might want |
3922 | | * to make it look better sometime. |
3923 | | */ |
3924 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
3925 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
3926 | 0 | pathnode->path.total_cost = subpath->total_cost; |
3927 | 0 | if (returningLists != NIL) |
3928 | 0 | { |
3929 | 0 | pathnode->path.rows = subpath->rows; |
3930 | | |
3931 | | /* |
3932 | | * Set width to match the subpath output. XXX this is totally wrong: |
3933 | | * we should return an average of the RETURNING tlist widths. But |
3934 | | * it's what happened historically, and improving it is a task for |
3935 | | * another day. (Again, it's mostly window dressing.) |
3936 | | */ |
3937 | 0 | pathnode->path.pathtarget->width = subpath->pathtarget->width; |
3938 | 0 | } |
3939 | 0 | else |
3940 | 0 | { |
3941 | 0 | pathnode->path.rows = 0; |
3942 | 0 | pathnode->path.pathtarget->width = 0; |
3943 | 0 | } |
3944 | |
|
3945 | 0 | pathnode->subpath = subpath; |
3946 | 0 | pathnode->operation = operation; |
3947 | 0 | pathnode->canSetTag = canSetTag; |
3948 | 0 | pathnode->nominalRelation = nominalRelation; |
3949 | 0 | pathnode->rootRelation = rootRelation; |
3950 | 0 | pathnode->partColsUpdated = partColsUpdated; |
3951 | 0 | pathnode->resultRelations = resultRelations; |
3952 | 0 | pathnode->updateColnosLists = updateColnosLists; |
3953 | 0 | pathnode->withCheckOptionLists = withCheckOptionLists; |
3954 | 0 | pathnode->returningLists = returningLists; |
3955 | 0 | pathnode->rowMarks = rowMarks; |
3956 | 0 | pathnode->onconflict = onconflict; |
3957 | 0 | pathnode->epqParam = epqParam; |
3958 | 0 | pathnode->mergeActionLists = mergeActionLists; |
3959 | 0 | pathnode->mergeJoinConditions = mergeJoinConditions; |
3960 | |
|
3961 | 0 | return pathnode; |
3962 | 0 | } |
3963 | | |
3964 | | /* |
3965 | | * create_limit_path |
3966 | | * Creates a pathnode that represents performing LIMIT/OFFSET |
3967 | | * |
3968 | | * In addition to providing the actual OFFSET and LIMIT expressions, |
3969 | | * the caller must provide estimates of their values for costing purposes. |
3970 | | * The estimates are as computed by preprocess_limit(), ie, 0 represents |
3971 | | * the clause not being present, and -1 means it's present but we could |
3972 | | * not estimate its value. |
3973 | | * |
3974 | | * 'rel' is the parent relation associated with the result |
3975 | | * 'subpath' is the path representing the source of data |
3976 | | * 'limitOffset' is the actual OFFSET expression, or NULL |
3977 | | * 'limitCount' is the actual LIMIT expression, or NULL |
3978 | | * 'offset_est' is the estimated value of the OFFSET expression |
3979 | | * 'count_est' is the estimated value of the LIMIT expression |
3980 | | */ |
3981 | | LimitPath * |
3982 | | create_limit_path(PlannerInfo *root, RelOptInfo *rel, |
3983 | | Path *subpath, |
3984 | | Node *limitOffset, Node *limitCount, |
3985 | | LimitOption limitOption, |
3986 | | int64 offset_est, int64 count_est) |
3987 | 0 | { |
3988 | 0 | LimitPath *pathnode = makeNode(LimitPath); |
3989 | |
|
3990 | 0 | pathnode->path.pathtype = T_Limit; |
3991 | 0 | pathnode->path.parent = rel; |
3992 | | /* Limit doesn't project, so use source path's pathtarget */ |
3993 | 0 | pathnode->path.pathtarget = subpath->pathtarget; |
3994 | | /* For now, assume we are above any joins, so no parameterization */ |
3995 | 0 | pathnode->path.param_info = NULL; |
3996 | 0 | pathnode->path.parallel_aware = false; |
3997 | 0 | pathnode->path.parallel_safe = rel->consider_parallel && |
3998 | 0 | subpath->parallel_safe; |
3999 | 0 | pathnode->path.parallel_workers = subpath->parallel_workers; |
4000 | 0 | pathnode->path.rows = subpath->rows; |
4001 | 0 | pathnode->path.disabled_nodes = subpath->disabled_nodes; |
4002 | 0 | pathnode->path.startup_cost = subpath->startup_cost; |
4003 | 0 | pathnode->path.total_cost = subpath->total_cost; |
4004 | 0 | pathnode->path.pathkeys = subpath->pathkeys; |
4005 | 0 | pathnode->subpath = subpath; |
4006 | 0 | pathnode->limitOffset = limitOffset; |
4007 | 0 | pathnode->limitCount = limitCount; |
4008 | 0 | pathnode->limitOption = limitOption; |
4009 | | |
4010 | | /* |
4011 | | * Adjust the output rows count and costs according to the offset/limit. |
4012 | | */ |
4013 | 0 | adjust_limit_rows_costs(&pathnode->path.rows, |
4014 | 0 | &pathnode->path.startup_cost, |
4015 | 0 | &pathnode->path.total_cost, |
4016 | 0 | offset_est, count_est); |
4017 | |
|
4018 | 0 | return pathnode; |
4019 | 0 | } |
4020 | | |
4021 | | /* |
4022 | | * adjust_limit_rows_costs |
4023 | | * Adjust the size and cost estimates for a LimitPath node according to the |
4024 | | * offset/limit. |
4025 | | * |
4026 | | * This is only a cosmetic issue if we are at top level, but if we are |
4027 | | * building a subquery then it's important to report correct info to the outer |
4028 | | * planner. |
4029 | | * |
4030 | | * When the offset or count couldn't be estimated, use 10% of the estimated |
4031 | | * number of rows emitted from the subpath. |
4032 | | * |
4033 | | * XXX we don't bother to add eval costs of the offset/limit expressions |
4034 | | * themselves to the path costs. In theory we should, but in most cases those |
4035 | | * expressions are trivial and it's just not worth the trouble. |
4036 | | */ |
4037 | | void |
4038 | | adjust_limit_rows_costs(double *rows, /* in/out parameter */ |
4039 | | Cost *startup_cost, /* in/out parameter */ |
4040 | | Cost *total_cost, /* in/out parameter */ |
4041 | | int64 offset_est, |
4042 | | int64 count_est) |
4043 | 0 | { |
4044 | 0 | double input_rows = *rows; |
4045 | 0 | Cost input_startup_cost = *startup_cost; |
4046 | 0 | Cost input_total_cost = *total_cost; |
4047 | |
|
4048 | 0 | if (offset_est != 0) |
4049 | 0 | { |
4050 | 0 | double offset_rows; |
4051 | |
|
4052 | 0 | if (offset_est > 0) |
4053 | 0 | offset_rows = (double) offset_est; |
4054 | 0 | else |
4055 | 0 | offset_rows = clamp_row_est(input_rows * 0.10); |
4056 | 0 | if (offset_rows > *rows) |
4057 | 0 | offset_rows = *rows; |
4058 | 0 | if (input_rows > 0) |
4059 | 0 | *startup_cost += |
4060 | 0 | (input_total_cost - input_startup_cost) |
4061 | 0 | * offset_rows / input_rows; |
4062 | 0 | *rows -= offset_rows; |
4063 | 0 | if (*rows < 1) |
4064 | 0 | *rows = 1; |
4065 | 0 | } |
4066 | |
|
4067 | 0 | if (count_est != 0) |
4068 | 0 | { |
4069 | 0 | double count_rows; |
4070 | |
|
4071 | 0 | if (count_est > 0) |
4072 | 0 | count_rows = (double) count_est; |
4073 | 0 | else |
4074 | 0 | count_rows = clamp_row_est(input_rows * 0.10); |
4075 | 0 | if (count_rows > *rows) |
4076 | 0 | count_rows = *rows; |
4077 | 0 | if (input_rows > 0) |
4078 | 0 | *total_cost = *startup_cost + |
4079 | 0 | (input_total_cost - input_startup_cost) |
4080 | 0 | * count_rows / input_rows; |
4081 | 0 | *rows = count_rows; |
4082 | 0 | if (*rows < 1) |
4083 | 0 | *rows = 1; |
4084 | 0 | } |
4085 | 0 | } |
4086 | | |
4087 | | |
4088 | | /* |
4089 | | * reparameterize_path |
4090 | | * Attempt to modify a Path to have greater parameterization |
4091 | | * |
4092 | | * We use this to attempt to bring all child paths of an appendrel to the |
4093 | | * same parameterization level, ensuring that they all enforce the same set |
4094 | | * of join quals (and thus that that parameterization can be attributed to |
4095 | | * an append path built from such paths). Currently, only a few path types |
4096 | | * are supported here, though more could be added at need. We return NULL |
4097 | | * if we can't reparameterize the given path. |
4098 | | * |
4099 | | * Note: we intentionally do not pass created paths to add_path(); it would |
4100 | | * possibly try to delete them on the grounds of being cost-inferior to the |
4101 | | * paths they were made from, and we don't want that. Paths made here are |
4102 | | * not necessarily of general-purpose usefulness, but they can be useful |
4103 | | * as members of an append path. |
4104 | | */ |
4105 | | Path * |
4106 | | reparameterize_path(PlannerInfo *root, Path *path, |
4107 | | Relids required_outer, |
4108 | | double loop_count) |
4109 | 0 | { |
4110 | 0 | RelOptInfo *rel = path->parent; |
4111 | | |
4112 | | /* Can only increase, not decrease, path's parameterization */ |
4113 | 0 | if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer)) |
4114 | 0 | return NULL; |
4115 | 0 | switch (path->pathtype) |
4116 | 0 | { |
4117 | 0 | case T_SeqScan: |
4118 | 0 | return create_seqscan_path(root, rel, required_outer, 0); |
4119 | 0 | case T_SampleScan: |
4120 | 0 | return (Path *) create_samplescan_path(root, rel, required_outer); |
4121 | 0 | case T_IndexScan: |
4122 | 0 | case T_IndexOnlyScan: |
4123 | 0 | { |
4124 | 0 | IndexPath *ipath = (IndexPath *) path; |
4125 | 0 | IndexPath *newpath = makeNode(IndexPath); |
4126 | | |
4127 | | /* |
4128 | | * We can't use create_index_path directly, and would not want |
4129 | | * to because it would re-compute the indexqual conditions |
4130 | | * which is wasted effort. Instead we hack things a bit: |
4131 | | * flat-copy the path node, revise its param_info, and redo |
4132 | | * the cost estimate. |
4133 | | */ |
4134 | 0 | memcpy(newpath, ipath, sizeof(IndexPath)); |
4135 | 0 | newpath->path.param_info = |
4136 | 0 | get_baserel_parampathinfo(root, rel, required_outer); |
4137 | 0 | cost_index(newpath, root, loop_count, false); |
4138 | 0 | return (Path *) newpath; |
4139 | 0 | } |
4140 | 0 | case T_BitmapHeapScan: |
4141 | 0 | { |
4142 | 0 | BitmapHeapPath *bpath = (BitmapHeapPath *) path; |
4143 | |
|
4144 | 0 | return (Path *) create_bitmap_heap_path(root, |
4145 | 0 | rel, |
4146 | 0 | bpath->bitmapqual, |
4147 | 0 | required_outer, |
4148 | 0 | loop_count, 0); |
4149 | 0 | } |
4150 | 0 | case T_SubqueryScan: |
4151 | 0 | { |
4152 | 0 | SubqueryScanPath *spath = (SubqueryScanPath *) path; |
4153 | 0 | Path *subpath = spath->subpath; |
4154 | 0 | bool trivial_pathtarget; |
4155 | | |
4156 | | /* |
4157 | | * If existing node has zero extra cost, we must have decided |
4158 | | * its target is trivial. (The converse is not true, because |
4159 | | * it might have a trivial target but quals to enforce; but in |
4160 | | * that case the new node will too, so it doesn't matter |
4161 | | * whether we get the right answer here.) |
4162 | | */ |
4163 | 0 | trivial_pathtarget = |
4164 | 0 | (subpath->total_cost == spath->path.total_cost); |
4165 | |
|
4166 | 0 | return (Path *) create_subqueryscan_path(root, |
4167 | 0 | rel, |
4168 | 0 | subpath, |
4169 | 0 | trivial_pathtarget, |
4170 | 0 | spath->path.pathkeys, |
4171 | 0 | required_outer); |
4172 | 0 | } |
4173 | 0 | case T_Result: |
4174 | | /* Supported only for RTE_RESULT scan paths */ |
4175 | 0 | if (IsA(path, Path)) |
4176 | 0 | return create_resultscan_path(root, rel, required_outer); |
4177 | 0 | break; |
4178 | 0 | case T_Append: |
4179 | 0 | { |
4180 | 0 | AppendPath *apath = (AppendPath *) path; |
4181 | 0 | List *childpaths = NIL; |
4182 | 0 | List *partialpaths = NIL; |
4183 | 0 | int i; |
4184 | 0 | ListCell *lc; |
4185 | | |
4186 | | /* Reparameterize the children */ |
4187 | 0 | i = 0; |
4188 | 0 | foreach(lc, apath->subpaths) |
4189 | 0 | { |
4190 | 0 | Path *spath = (Path *) lfirst(lc); |
4191 | |
|
4192 | 0 | spath = reparameterize_path(root, spath, |
4193 | 0 | required_outer, |
4194 | 0 | loop_count); |
4195 | 0 | if (spath == NULL) |
4196 | 0 | return NULL; |
4197 | | /* We have to re-split the regular and partial paths */ |
4198 | 0 | if (i < apath->first_partial_path) |
4199 | 0 | childpaths = lappend(childpaths, spath); |
4200 | 0 | else |
4201 | 0 | partialpaths = lappend(partialpaths, spath); |
4202 | 0 | i++; |
4203 | 0 | } |
4204 | 0 | return (Path *) |
4205 | 0 | create_append_path(root, rel, childpaths, partialpaths, |
4206 | 0 | apath->path.pathkeys, required_outer, |
4207 | 0 | apath->path.parallel_workers, |
4208 | 0 | apath->path.parallel_aware, |
4209 | 0 | -1); |
4210 | 0 | } |
4211 | 0 | case T_Material: |
4212 | 0 | { |
4213 | 0 | MaterialPath *mpath = (MaterialPath *) path; |
4214 | 0 | Path *spath = mpath->subpath; |
4215 | |
|
4216 | 0 | spath = reparameterize_path(root, spath, |
4217 | 0 | required_outer, |
4218 | 0 | loop_count); |
4219 | 0 | if (spath == NULL) |
4220 | 0 | return NULL; |
4221 | 0 | return (Path *) create_material_path(rel, spath); |
4222 | 0 | } |
4223 | 0 | case T_Memoize: |
4224 | 0 | { |
4225 | 0 | MemoizePath *mpath = (MemoizePath *) path; |
4226 | 0 | Path *spath = mpath->subpath; |
4227 | |
|
4228 | 0 | spath = reparameterize_path(root, spath, |
4229 | 0 | required_outer, |
4230 | 0 | loop_count); |
4231 | 0 | if (spath == NULL) |
4232 | 0 | return NULL; |
4233 | 0 | return (Path *) create_memoize_path(root, rel, |
4234 | 0 | spath, |
4235 | 0 | mpath->param_exprs, |
4236 | 0 | mpath->hash_operators, |
4237 | 0 | mpath->singlerow, |
4238 | 0 | mpath->binary_mode, |
4239 | 0 | mpath->calls); |
4240 | 0 | } |
4241 | 0 | default: |
4242 | 0 | break; |
4243 | 0 | } |
4244 | 0 | return NULL; |
4245 | 0 | } |
4246 | | |
4247 | | /* |
4248 | | * reparameterize_path_by_child |
4249 | | * Given a path parameterized by the parent of the given child relation, |
4250 | | * translate the path to be parameterized by the given child relation. |
4251 | | * |
4252 | | * Most fields in the path are not changed, but any expressions must be |
4253 | | * adjusted to refer to the correct varnos, and any subpaths must be |
4254 | | * recursively reparameterized. Other fields that refer to specific relids |
4255 | | * also need adjustment. |
4256 | | * |
4257 | | * The cost, number of rows, width and parallel path properties depend upon |
4258 | | * path->parent, which does not change during the translation. So we need |
4259 | | * not change those. |
4260 | | * |
4261 | | * Currently, only a few path types are supported here, though more could be |
4262 | | * added at need. We return NULL if we can't reparameterize the given path. |
4263 | | * |
4264 | | * Note that this function can change referenced RangeTblEntries, RelOptInfos |
4265 | | * and IndexOptInfos as well as the Path structures. Therefore, it's only safe |
4266 | | * to call during create_plan(), when we have made a final choice of which Path |
4267 | | * to use for each RangeTblEntry/RelOptInfo/IndexOptInfo. |
4268 | | * |
4269 | | * Keep this code in sync with path_is_reparameterizable_by_child()! |
4270 | | */ |
4271 | | Path * |
4272 | | reparameterize_path_by_child(PlannerInfo *root, Path *path, |
4273 | | RelOptInfo *child_rel) |
4274 | 0 | { |
4275 | 0 | Path *new_path; |
4276 | 0 | ParamPathInfo *new_ppi; |
4277 | 0 | ParamPathInfo *old_ppi; |
4278 | 0 | Relids required_outer; |
4279 | |
|
4280 | 0 | #define ADJUST_CHILD_ATTRS(node) \ |
4281 | 0 | ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \ |
4282 | 0 | (Node *) (node), \ |
4283 | 0 | child_rel, \ |
4284 | 0 | child_rel->top_parent)) |
4285 | |
|
4286 | 0 | #define REPARAMETERIZE_CHILD_PATH(path) \ |
4287 | 0 | do { \ |
4288 | 0 | (path) = reparameterize_path_by_child(root, (path), child_rel); \ |
4289 | 0 | if ((path) == NULL) \ |
4290 | 0 | return NULL; \ |
4291 | 0 | } while(0) |
4292 | |
|
4293 | 0 | #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \ |
4294 | 0 | do { \ |
4295 | 0 | if ((pathlist) != NIL) \ |
4296 | 0 | { \ |
4297 | 0 | (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \ |
4298 | 0 | child_rel); \ |
4299 | 0 | if ((pathlist) == NIL) \ |
4300 | 0 | return NULL; \ |
4301 | 0 | } \ |
4302 | 0 | } while(0) |
4303 | | |
4304 | | /* |
4305 | | * If the path is not parameterized by the parent of the given relation, |
4306 | | * it doesn't need reparameterization. |
4307 | | */ |
4308 | 0 | if (!path->param_info || |
4309 | 0 | !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids)) |
4310 | 0 | return path; |
4311 | | |
4312 | | /* |
4313 | | * If possible, reparameterize the given path. |
4314 | | * |
4315 | | * This function is currently only applied to the inner side of a nestloop |
4316 | | * join that is being partitioned by the partitionwise-join code. Hence, |
4317 | | * we need only support path types that plausibly arise in that context. |
4318 | | * (In particular, supporting sorted path types would be a waste of code |
4319 | | * and cycles: even if we translated them here, they'd just lose in |
4320 | | * subsequent cost comparisons.) If we do see an unsupported path type, |
4321 | | * that just means we won't be able to generate a partitionwise-join plan |
4322 | | * using that path type. |
4323 | | */ |
4324 | 0 | switch (nodeTag(path)) |
4325 | 0 | { |
4326 | 0 | case T_Path: |
4327 | 0 | new_path = path; |
4328 | 0 | ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo); |
4329 | 0 | if (path->pathtype == T_SampleScan) |
4330 | 0 | { |
4331 | 0 | Index scan_relid = path->parent->relid; |
4332 | 0 | RangeTblEntry *rte; |
4333 | | |
4334 | | /* it should be a base rel with a tablesample clause... */ |
4335 | 0 | Assert(scan_relid > 0); |
4336 | 0 | rte = planner_rt_fetch(scan_relid, root); |
4337 | 0 | Assert(rte->rtekind == RTE_RELATION); |
4338 | 0 | Assert(rte->tablesample != NULL); |
4339 | |
|
4340 | 0 | ADJUST_CHILD_ATTRS(rte->tablesample); |
4341 | 0 | } |
4342 | 0 | break; |
4343 | | |
4344 | 0 | case T_IndexPath: |
4345 | 0 | { |
4346 | 0 | IndexPath *ipath = (IndexPath *) path; |
4347 | |
|
4348 | 0 | ADJUST_CHILD_ATTRS(ipath->indexinfo->indrestrictinfo); |
4349 | 0 | ADJUST_CHILD_ATTRS(ipath->indexclauses); |
4350 | 0 | new_path = (Path *) ipath; |
4351 | 0 | } |
4352 | 0 | break; |
4353 | | |
4354 | 0 | case T_BitmapHeapPath: |
4355 | 0 | { |
4356 | 0 | BitmapHeapPath *bhpath = (BitmapHeapPath *) path; |
4357 | |
|
4358 | 0 | ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo); |
4359 | 0 | REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual); |
4360 | 0 | new_path = (Path *) bhpath; |
4361 | 0 | } |
4362 | 0 | break; |
4363 | | |
4364 | 0 | case T_BitmapAndPath: |
4365 | 0 | { |
4366 | 0 | BitmapAndPath *bapath = (BitmapAndPath *) path; |
4367 | |
|
4368 | 0 | REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals); |
4369 | 0 | new_path = (Path *) bapath; |
4370 | 0 | } |
4371 | 0 | break; |
4372 | | |
4373 | 0 | case T_BitmapOrPath: |
4374 | 0 | { |
4375 | 0 | BitmapOrPath *bopath = (BitmapOrPath *) path; |
4376 | |
|
4377 | 0 | REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals); |
4378 | 0 | new_path = (Path *) bopath; |
4379 | 0 | } |
4380 | 0 | break; |
4381 | | |
4382 | 0 | case T_ForeignPath: |
4383 | 0 | { |
4384 | 0 | ForeignPath *fpath = (ForeignPath *) path; |
4385 | 0 | ReparameterizeForeignPathByChild_function rfpc_func; |
4386 | |
|
4387 | 0 | ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo); |
4388 | 0 | if (fpath->fdw_outerpath) |
4389 | 0 | REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath); |
4390 | 0 | if (fpath->fdw_restrictinfo) |
4391 | 0 | ADJUST_CHILD_ATTRS(fpath->fdw_restrictinfo); |
4392 | | |
4393 | | /* Hand over to FDW if needed. */ |
4394 | 0 | rfpc_func = |
4395 | 0 | path->parent->fdwroutine->ReparameterizeForeignPathByChild; |
4396 | 0 | if (rfpc_func) |
4397 | 0 | fpath->fdw_private = rfpc_func(root, fpath->fdw_private, |
4398 | 0 | child_rel); |
4399 | 0 | new_path = (Path *) fpath; |
4400 | 0 | } |
4401 | 0 | break; |
4402 | | |
4403 | 0 | case T_CustomPath: |
4404 | 0 | { |
4405 | 0 | CustomPath *cpath = (CustomPath *) path; |
4406 | |
|
4407 | 0 | ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo); |
4408 | 0 | REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths); |
4409 | 0 | if (cpath->custom_restrictinfo) |
4410 | 0 | ADJUST_CHILD_ATTRS(cpath->custom_restrictinfo); |
4411 | 0 | if (cpath->methods && |
4412 | 0 | cpath->methods->ReparameterizeCustomPathByChild) |
4413 | 0 | cpath->custom_private = |
4414 | 0 | cpath->methods->ReparameterizeCustomPathByChild(root, |
4415 | 0 | cpath->custom_private, |
4416 | 0 | child_rel); |
4417 | 0 | new_path = (Path *) cpath; |
4418 | 0 | } |
4419 | 0 | break; |
4420 | | |
4421 | 0 | case T_NestPath: |
4422 | 0 | { |
4423 | 0 | NestPath *npath = (NestPath *) path; |
4424 | 0 | JoinPath *jpath = (JoinPath *) npath; |
4425 | |
|
4426 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); |
4427 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); |
4428 | 0 | ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); |
4429 | 0 | new_path = (Path *) npath; |
4430 | 0 | } |
4431 | 0 | break; |
4432 | | |
4433 | 0 | case T_MergePath: |
4434 | 0 | { |
4435 | 0 | MergePath *mpath = (MergePath *) path; |
4436 | 0 | JoinPath *jpath = (JoinPath *) mpath; |
4437 | |
|
4438 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); |
4439 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); |
4440 | 0 | ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); |
4441 | 0 | ADJUST_CHILD_ATTRS(mpath->path_mergeclauses); |
4442 | 0 | new_path = (Path *) mpath; |
4443 | 0 | } |
4444 | 0 | break; |
4445 | | |
4446 | 0 | case T_HashPath: |
4447 | 0 | { |
4448 | 0 | HashPath *hpath = (HashPath *) path; |
4449 | 0 | JoinPath *jpath = (JoinPath *) hpath; |
4450 | |
|
4451 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); |
4452 | 0 | REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); |
4453 | 0 | ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); |
4454 | 0 | ADJUST_CHILD_ATTRS(hpath->path_hashclauses); |
4455 | 0 | new_path = (Path *) hpath; |
4456 | 0 | } |
4457 | 0 | break; |
4458 | | |
4459 | 0 | case T_AppendPath: |
4460 | 0 | { |
4461 | 0 | AppendPath *apath = (AppendPath *) path; |
4462 | |
|
4463 | 0 | REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths); |
4464 | 0 | new_path = (Path *) apath; |
4465 | 0 | } |
4466 | 0 | break; |
4467 | | |
4468 | 0 | case T_MaterialPath: |
4469 | 0 | { |
4470 | 0 | MaterialPath *mpath = (MaterialPath *) path; |
4471 | |
|
4472 | 0 | REPARAMETERIZE_CHILD_PATH(mpath->subpath); |
4473 | 0 | new_path = (Path *) mpath; |
4474 | 0 | } |
4475 | 0 | break; |
4476 | | |
4477 | 0 | case T_MemoizePath: |
4478 | 0 | { |
4479 | 0 | MemoizePath *mpath = (MemoizePath *) path; |
4480 | |
|
4481 | 0 | REPARAMETERIZE_CHILD_PATH(mpath->subpath); |
4482 | 0 | ADJUST_CHILD_ATTRS(mpath->param_exprs); |
4483 | 0 | new_path = (Path *) mpath; |
4484 | 0 | } |
4485 | 0 | break; |
4486 | | |
4487 | 0 | case T_GatherPath: |
4488 | 0 | { |
4489 | 0 | GatherPath *gpath = (GatherPath *) path; |
4490 | |
|
4491 | 0 | REPARAMETERIZE_CHILD_PATH(gpath->subpath); |
4492 | 0 | new_path = (Path *) gpath; |
4493 | 0 | } |
4494 | 0 | break; |
4495 | | |
4496 | 0 | default: |
4497 | | /* We don't know how to reparameterize this path. */ |
4498 | 0 | return NULL; |
4499 | 0 | } |
4500 | | |
4501 | | /* |
4502 | | * Adjust the parameterization information, which refers to the topmost |
4503 | | * parent. The topmost parent can be multiple levels away from the given |
4504 | | * child, hence use multi-level expression adjustment routines. |
4505 | | */ |
4506 | 0 | old_ppi = new_path->param_info; |
4507 | 0 | required_outer = |
4508 | 0 | adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer, |
4509 | 0 | child_rel, |
4510 | 0 | child_rel->top_parent); |
4511 | | |
4512 | | /* If we already have a PPI for this parameterization, just return it */ |
4513 | 0 | new_ppi = find_param_path_info(new_path->parent, required_outer); |
4514 | | |
4515 | | /* |
4516 | | * If not, build a new one and link it to the list of PPIs. For the same |
4517 | | * reason as explained in mark_dummy_rel(), allocate new PPI in the same |
4518 | | * context the given RelOptInfo is in. |
4519 | | */ |
4520 | 0 | if (new_ppi == NULL) |
4521 | 0 | { |
4522 | 0 | MemoryContext oldcontext; |
4523 | 0 | RelOptInfo *rel = path->parent; |
4524 | |
|
4525 | 0 | oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); |
4526 | |
|
4527 | 0 | new_ppi = makeNode(ParamPathInfo); |
4528 | 0 | new_ppi->ppi_req_outer = bms_copy(required_outer); |
4529 | 0 | new_ppi->ppi_rows = old_ppi->ppi_rows; |
4530 | 0 | new_ppi->ppi_clauses = old_ppi->ppi_clauses; |
4531 | 0 | ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses); |
4532 | 0 | new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials); |
4533 | 0 | rel->ppilist = lappend(rel->ppilist, new_ppi); |
4534 | |
|
4535 | 0 | MemoryContextSwitchTo(oldcontext); |
4536 | 0 | } |
4537 | 0 | bms_free(required_outer); |
4538 | |
|
4539 | 0 | new_path->param_info = new_ppi; |
4540 | | |
4541 | | /* |
4542 | | * Adjust the path target if the parent of the outer relation is |
4543 | | * referenced in the targetlist. This can happen when only the parent of |
4544 | | * outer relation is laterally referenced in this relation. |
4545 | | */ |
4546 | 0 | if (bms_overlap(path->parent->lateral_relids, |
4547 | 0 | child_rel->top_parent_relids)) |
4548 | 0 | { |
4549 | 0 | new_path->pathtarget = copy_pathtarget(new_path->pathtarget); |
4550 | 0 | ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs); |
4551 | 0 | } |
4552 | |
|
4553 | 0 | return new_path; |
4554 | 0 | } |
4555 | | |
4556 | | /* |
4557 | | * path_is_reparameterizable_by_child |
4558 | | * Given a path parameterized by the parent of the given child relation, |
4559 | | * see if it can be translated to be parameterized by the child relation. |
4560 | | * |
4561 | | * This must return true if and only if reparameterize_path_by_child() |
4562 | | * would succeed on this path. Currently it's sufficient to verify that |
4563 | | * the path and all of its subpaths (if any) are of the types handled by |
4564 | | * that function. However, subpaths that are not parameterized can be |
4565 | | * disregarded since they won't require translation. |
4566 | | */ |
4567 | | bool |
4568 | | path_is_reparameterizable_by_child(Path *path, RelOptInfo *child_rel) |
4569 | 0 | { |
4570 | 0 | #define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \ |
4571 | 0 | do { \ |
4572 | 0 | if (!path_is_reparameterizable_by_child(path, child_rel)) \ |
4573 | 0 | return false; \ |
4574 | 0 | } while(0) |
4575 | |
|
4576 | 0 | #define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \ |
4577 | 0 | do { \ |
4578 | 0 | if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \ |
4579 | 0 | return false; \ |
4580 | 0 | } while(0) |
4581 | | |
4582 | | /* |
4583 | | * If the path is not parameterized by the parent of the given relation, |
4584 | | * it doesn't need reparameterization. |
4585 | | */ |
4586 | 0 | if (!path->param_info || |
4587 | 0 | !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids)) |
4588 | 0 | return true; |
4589 | | |
4590 | | /* |
4591 | | * Check that the path type is one that reparameterize_path_by_child() can |
4592 | | * handle, and recursively check subpaths. |
4593 | | */ |
4594 | 0 | switch (nodeTag(path)) |
4595 | 0 | { |
4596 | 0 | case T_Path: |
4597 | 0 | case T_IndexPath: |
4598 | 0 | break; |
4599 | | |
4600 | 0 | case T_BitmapHeapPath: |
4601 | 0 | { |
4602 | 0 | BitmapHeapPath *bhpath = (BitmapHeapPath *) path; |
4603 | |
|
4604 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(bhpath->bitmapqual); |
4605 | 0 | } |
4606 | 0 | break; |
4607 | | |
4608 | 0 | case T_BitmapAndPath: |
4609 | 0 | { |
4610 | 0 | BitmapAndPath *bapath = (BitmapAndPath *) path; |
4611 | |
|
4612 | 0 | REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bapath->bitmapquals); |
4613 | 0 | } |
4614 | 0 | break; |
4615 | | |
4616 | 0 | case T_BitmapOrPath: |
4617 | 0 | { |
4618 | 0 | BitmapOrPath *bopath = (BitmapOrPath *) path; |
4619 | |
|
4620 | 0 | REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bopath->bitmapquals); |
4621 | 0 | } |
4622 | 0 | break; |
4623 | | |
4624 | 0 | case T_ForeignPath: |
4625 | 0 | { |
4626 | 0 | ForeignPath *fpath = (ForeignPath *) path; |
4627 | |
|
4628 | 0 | if (fpath->fdw_outerpath) |
4629 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(fpath->fdw_outerpath); |
4630 | 0 | } |
4631 | 0 | break; |
4632 | | |
4633 | 0 | case T_CustomPath: |
4634 | 0 | { |
4635 | 0 | CustomPath *cpath = (CustomPath *) path; |
4636 | |
|
4637 | 0 | REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(cpath->custom_paths); |
4638 | 0 | } |
4639 | 0 | break; |
4640 | | |
4641 | 0 | case T_NestPath: |
4642 | 0 | case T_MergePath: |
4643 | 0 | case T_HashPath: |
4644 | 0 | { |
4645 | 0 | JoinPath *jpath = (JoinPath *) path; |
4646 | |
|
4647 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->outerjoinpath); |
4648 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->innerjoinpath); |
4649 | 0 | } |
4650 | 0 | break; |
4651 | | |
4652 | 0 | case T_AppendPath: |
4653 | 0 | { |
4654 | 0 | AppendPath *apath = (AppendPath *) path; |
4655 | |
|
4656 | 0 | REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(apath->subpaths); |
4657 | 0 | } |
4658 | 0 | break; |
4659 | | |
4660 | 0 | case T_MaterialPath: |
4661 | 0 | { |
4662 | 0 | MaterialPath *mpath = (MaterialPath *) path; |
4663 | |
|
4664 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath); |
4665 | 0 | } |
4666 | 0 | break; |
4667 | | |
4668 | 0 | case T_MemoizePath: |
4669 | 0 | { |
4670 | 0 | MemoizePath *mpath = (MemoizePath *) path; |
4671 | |
|
4672 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath); |
4673 | 0 | } |
4674 | 0 | break; |
4675 | | |
4676 | 0 | case T_GatherPath: |
4677 | 0 | { |
4678 | 0 | GatherPath *gpath = (GatherPath *) path; |
4679 | |
|
4680 | 0 | REJECT_IF_PATH_NOT_REPARAMETERIZABLE(gpath->subpath); |
4681 | 0 | } |
4682 | 0 | break; |
4683 | | |
4684 | 0 | default: |
4685 | | /* We don't know how to reparameterize this path. */ |
4686 | 0 | return false; |
4687 | 0 | } |
4688 | | |
4689 | 0 | return true; |
4690 | 0 | } |
4691 | | |
4692 | | /* |
4693 | | * reparameterize_pathlist_by_child |
4694 | | * Helper function to reparameterize a list of paths by given child rel. |
4695 | | * |
4696 | | * Returns NIL to indicate failure, so pathlist had better not be NIL. |
4697 | | */ |
4698 | | static List * |
4699 | | reparameterize_pathlist_by_child(PlannerInfo *root, |
4700 | | List *pathlist, |
4701 | | RelOptInfo *child_rel) |
4702 | 0 | { |
4703 | 0 | ListCell *lc; |
4704 | 0 | List *result = NIL; |
4705 | |
|
4706 | 0 | foreach(lc, pathlist) |
4707 | 0 | { |
4708 | 0 | Path *path = reparameterize_path_by_child(root, lfirst(lc), |
4709 | 0 | child_rel); |
4710 | |
|
4711 | 0 | if (path == NULL) |
4712 | 0 | { |
4713 | 0 | list_free(result); |
4714 | 0 | return NIL; |
4715 | 0 | } |
4716 | | |
4717 | 0 | result = lappend(result, path); |
4718 | 0 | } |
4719 | | |
4720 | 0 | return result; |
4721 | 0 | } |
4722 | | |
4723 | | /* |
4724 | | * pathlist_is_reparameterizable_by_child |
4725 | | * Helper function to check if a list of paths can be reparameterized. |
4726 | | */ |
4727 | | static bool |
4728 | | pathlist_is_reparameterizable_by_child(List *pathlist, RelOptInfo *child_rel) |
4729 | 0 | { |
4730 | 0 | ListCell *lc; |
4731 | |
|
4732 | 0 | foreach(lc, pathlist) |
4733 | 0 | { |
4734 | 0 | Path *path = (Path *) lfirst(lc); |
4735 | |
|
4736 | 0 | if (!path_is_reparameterizable_by_child(path, child_rel)) |
4737 | 0 | return false; |
4738 | 0 | } |
4739 | | |
4740 | 0 | return true; |
4741 | 0 | } |