Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * costsize.c
4 : : * Routines to compute (and set) relation sizes and path costs
5 : : *
6 : : * Path costs are measured in arbitrary units established by these basic
7 : : * parameters:
8 : : *
9 : : * seq_page_cost Cost of a sequential page fetch
10 : : * random_page_cost Cost of a non-sequential page fetch
11 : : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : : *
17 : : * We expect that the kernel will typically do some amount of read-ahead
18 : : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : : * is normally considerably less than random_page_cost. (However, if the
20 : : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : : *
22 : : * We also use a rough estimate "effective_cache_size" of the number of
23 : : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : : * NBuffers for this purpose because that would ignore the effects of
25 : : * the kernel's disk cache.)
26 : : *
27 : : * Obviously, taking constants for these values is an oversimplification,
28 : : * but it's tough enough to get any useful estimates even at this level of
29 : : * detail. Note that all of these parameters are user-settable, in case
30 : : * the default values are drastically off for a particular platform.
31 : : *
32 : : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : : * an external sort or a materialize node that overflows work_mem.
36 : : *
37 : : * We compute two separate costs for each path:
38 : : * total_cost: total estimated cost to fetch all tuples
39 : : * startup_cost: cost that is expended before first tuple is fetched
40 : : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : : * path's result. A caller can estimate the cost of fetching a partial
43 : : * result by interpolating between startup_cost and total_cost. In detail:
44 : : * actual_cost = startup_cost +
45 : : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : : * that this equation works properly. (Note: while path->rows is never zero
49 : : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : : * plan node.
52 : : *
53 : : * Each path stores the total number of disabled nodes that exist at or
54 : : * below that point in the plan tree. This is regarded as a component of
55 : : * the cost, and paths with fewer disabled nodes should be regarded as
56 : : * cheaper than those with more. Disabled nodes occur when the user sets
57 : : * a GUC like enable_seqscan=false. We can't necessarily respect such a
58 : : * setting in every part of the plan tree, but we want to respect in as many
59 : : * parts of the plan tree as possible. Simpler schemes like storing a Boolean
60 : : * here rather than a count fail to do that. We used to disable nodes by
61 : : * adding a large constant to the startup cost, but that distorted planning
62 : : * in other ways.
63 : : *
64 : : * For largely historical reasons, most of the routines in this module use
65 : : * the passed result Path only to store their results (rows, startup_cost and
66 : : * total_cost) into. All the input data they need is passed as separate
67 : : * parameters, even though much of it could be extracted from the Path.
68 : : * An exception is made for the cost_XXXjoin() routines, which expect all
69 : : * the other fields of the passed XXXPath to be filled in, and similarly
70 : : * cost_index() assumes the passed IndexPath is valid except for its output
71 : : * values.
72 : : *
73 : : *
74 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
75 : : * Portions Copyright (c) 1994, Regents of the University of California
76 : : *
77 : : * IDENTIFICATION
78 : : * src/backend/optimizer/path/costsize.c
79 : : *
80 : : *-------------------------------------------------------------------------
81 : : */
82 : :
83 : : #include "postgres.h"
84 : :
85 : : #include <limits.h>
86 : : #include <math.h>
87 : :
88 : : #include "access/amapi.h"
89 : : #include "access/htup_details.h"
90 : : #include "access/tsmapi.h"
91 : : #include "executor/executor.h"
92 : : #include "executor/nodeAgg.h"
93 : : #include "executor/nodeHash.h"
94 : : #include "executor/nodeMemoize.h"
95 : : #include "miscadmin.h"
96 : : #include "nodes/makefuncs.h"
97 : : #include "nodes/nodeFuncs.h"
98 : : #include "nodes/tidbitmap.h"
99 : : #include "optimizer/clauses.h"
100 : : #include "optimizer/cost.h"
101 : : #include "optimizer/optimizer.h"
102 : : #include "optimizer/pathnode.h"
103 : : #include "optimizer/paths.h"
104 : : #include "optimizer/placeholder.h"
105 : : #include "optimizer/plancat.h"
106 : : #include "optimizer/restrictinfo.h"
107 : : #include "parser/parsetree.h"
108 : : #include "utils/lsyscache.h"
109 : : #include "utils/selfuncs.h"
110 : : #include "utils/spccache.h"
111 : : #include "utils/tuplesort.h"
112 : :
113 : :
114 : : #define LOG2(x) (log(x) / 0.693147180559945)
115 : :
116 : : /*
117 : : * Append and MergeAppend nodes are less expensive than some other operations
118 : : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
119 : : * per-tuple cost as cpu_tuple_cost multiplied by this value.
120 : : */
121 : : #define APPEND_CPU_COST_MULTIPLIER 0.5
122 : :
123 : : /*
124 : : * Maximum value for row estimates. We cap row estimates to this to help
125 : : * ensure that costs based on these estimates remain within the range of what
126 : : * double can represent. add_path() wouldn't act sanely given infinite or NaN
127 : : * cost values.
128 : : */
129 : : #define MAXIMUM_ROWCOUNT 1e100
130 : :
131 : : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
132 : : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
133 : : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
134 : : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
135 : : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
136 : : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
137 : : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
138 : : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
139 : :
140 : : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
141 : :
142 : : Cost disable_cost = 1.0e10;
143 : :
144 : : int max_parallel_workers_per_gather = 2;
145 : :
146 : : bool enable_seqscan = true;
147 : : bool enable_indexscan = true;
148 : : bool enable_indexonlyscan = true;
149 : : bool enable_bitmapscan = true;
150 : : bool enable_tidscan = true;
151 : : bool enable_sort = true;
152 : : bool enable_incremental_sort = true;
153 : : bool enable_hashagg = true;
154 : : bool enable_nestloop = true;
155 : : bool enable_material = true;
156 : : bool enable_memoize = true;
157 : : bool enable_mergejoin = true;
158 : : bool enable_hashjoin = true;
159 : : bool enable_gathermerge = true;
160 : : bool enable_partitionwise_join = false;
161 : : bool enable_partitionwise_aggregate = false;
162 : : bool enable_parallel_append = true;
163 : : bool enable_parallel_hash = true;
164 : : bool enable_partition_pruning = true;
165 : : bool enable_presorted_aggregate = true;
166 : : bool enable_async_append = true;
167 : :
168 : : typedef struct
169 : : {
170 : : PlannerInfo *root;
171 : : QualCost total;
172 : : } cost_qual_eval_context;
173 : :
174 : : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
175 : : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
176 : : RestrictInfo *rinfo,
177 : : PathKey *pathkey);
178 : : static void cost_rescan(PlannerInfo *root, Path *path,
179 : : Cost *rescan_startup_cost, Cost *rescan_total_cost);
180 : : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
181 : : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
182 : : ParamPathInfo *param_info,
183 : : QualCost *qpqual_cost);
184 : : static bool has_indexed_join_quals(NestPath *path);
185 : : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
186 : : List *quals);
187 : : static double calc_joinrel_size_estimate(PlannerInfo *root,
188 : : RelOptInfo *joinrel,
189 : : RelOptInfo *outer_rel,
190 : : RelOptInfo *inner_rel,
191 : : double outer_rows,
192 : : double inner_rows,
193 : : SpecialJoinInfo *sjinfo,
194 : : List *restrictlist);
195 : : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
196 : : Relids outer_relids,
197 : : Relids inner_relids,
198 : : SpecialJoinInfo *sjinfo,
199 : : List **restrictlist);
200 : : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
201 : : int parallel_workers);
202 : : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
203 : : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
204 : : static double relation_byte_size(double tuples, int width);
205 : : static double page_size(double tuples, int width);
206 : : static double get_parallel_divisor(Path *path);
207 : :
208 : :
209 : : /*
210 : : * clamp_row_est
211 : : * Force a row-count estimate to a sane value.
212 : : */
213 : : double
8156 tgl@sss.pgh.pa.us 214 :CBC 8092310 : clamp_row_est(double nrows)
215 : : {
216 : : /*
217 : : * Avoid infinite and NaN row estimates. Costs derived from such values
218 : : * are going to be useless. Also force the estimate to be at least one
219 : : * row, to make explain output look better and to avoid possible
220 : : * divide-by-zero when interpolating costs. Make it an integer, too.
221 : : */
2024 drowley@postgresql.o 222 [ + - - + ]: 8092310 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
2024 drowley@postgresql.o 223 :UBC 0 : nrows = MAXIMUM_ROWCOUNT;
2024 drowley@postgresql.o 224 [ + + ]:CBC 8092310 : else if (nrows <= 1.0)
8156 tgl@sss.pgh.pa.us 225 : 2547515 : nrows = 1.0;
226 : : else
7684 227 : 5544795 : nrows = rint(nrows);
228 : :
8156 229 : 8092310 : return nrows;
230 : : }
231 : :
232 : : /*
233 : : * clamp_width_est
234 : : * Force a tuple-width estimate to a sane value.
235 : : *
236 : : * The planner represents datatype width and tuple width estimates as int32.
237 : : * When summing column width estimates to create a tuple width estimate,
238 : : * it's possible to reach integer overflow in edge cases. To ensure sane
239 : : * behavior, we form such sums in int64 arithmetic and then apply this routine
240 : : * to clamp to int32 range.
241 : : */
242 : : int32
868 243 : 1543889 : clamp_width_est(int64 tuple_width)
244 : : {
245 : : /*
246 : : * Anything more than MaxAllocSize is clearly bogus, since we could not
247 : : * create a tuple that large.
248 : : */
249 [ - + ]: 1543889 : if (tuple_width > MaxAllocSize)
868 tgl@sss.pgh.pa.us 250 :UBC 0 : return (int32) MaxAllocSize;
251 : :
252 : : /*
253 : : * Unlike clamp_row_est, we just Assert that the value isn't negative,
254 : : * rather than masking such errors.
255 : : */
868 tgl@sss.pgh.pa.us 256 [ - + ]:CBC 1543889 : Assert(tuple_width >= 0);
257 : :
258 : 1543889 : return (int32) tuple_width;
259 : : }
260 : :
261 : :
262 : : /*
263 : : * cost_seqscan
264 : : * Determines and returns the cost of scanning a relation sequentially.
265 : : *
266 : : * 'baserel' is the relation to be scanned
267 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
268 : : */
269 : : void
7639 270 : 348234 : cost_seqscan(Path *path, PlannerInfo *root,
271 : : RelOptInfo *baserel, ParamPathInfo *param_info)
272 : : {
9576 273 : 348234 : Cost startup_cost = 0;
274 : : Cost cpu_run_cost;
275 : : Cost disk_run_cost;
276 : : double spc_seq_page_cost;
277 : : QualCost qpqual_cost;
278 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 279 :GNC 348234 : uint64 enable_mask = PGS_SEQSCAN;
280 : :
281 : : /* Should only be applied to base relations */
8487 tgl@sss.pgh.pa.us 282 [ - + ]:CBC 348234 : Assert(baserel->relid > 0);
8759 283 [ - + ]: 348234 : Assert(baserel->rtekind == RTE_RELATION);
284 : :
285 : : /* Mark the path with the correct row estimate */
5129 286 [ + + ]: 348234 : if (param_info)
287 : 1210 : path->rows = param_info->ppi_rows;
288 : : else
289 : 347024 : path->rows = baserel->rows;
290 : :
291 : : /* fetch estimated page cost for tablespace containing table */
5964 rhaas@postgresql.org 292 : 348234 : get_tablespace_page_costs(baserel->reltablespace,
293 : : NULL,
294 : : &spc_seq_page_cost);
295 : :
296 : : /*
297 : : * disk costs
298 : : */
3758 299 : 348234 : disk_run_cost = spc_seq_page_cost * baserel->pages;
300 : :
301 : : /* CPU costs */
5129 tgl@sss.pgh.pa.us 302 : 348234 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
303 : :
304 : 348234 : startup_cost += qpqual_cost.startup;
305 : 348234 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
3758 rhaas@postgresql.org 306 : 348234 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
307 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 tgl@sss.pgh.pa.us 308 : 348234 : startup_cost += path->pathtarget->cost.startup;
309 : 348234 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
310 : :
311 : : /* Adjust costing for parallelism, if used. */
3617 rhaas@postgresql.org 312 [ + + ]: 348234 : if (path->parallel_workers > 0)
313 : : {
3399 314 : 24248 : double parallel_divisor = get_parallel_divisor(path);
315 : :
316 : : /* The CPU cost is divided among all the workers. */
3758 317 : 24248 : cpu_run_cost /= parallel_divisor;
318 : :
319 : : /*
320 : : * It may be possible to amortize some of the I/O cost, but probably
321 : : * not very much, because most operating systems already do aggressive
322 : : * prefetching. For now, we assume that the disk run cost can't be
323 : : * amortized at all.
324 : : */
325 : :
326 : : /*
327 : : * In the case of a parallel plan, the row count needs to represent
328 : : * the number of tuples processed per worker.
329 : : */
3399 330 : 24248 : path->rows = clamp_row_est(path->rows / parallel_divisor);
331 : : }
332 : : else
97 rhaas@postgresql.org 333 :GNC 323986 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
334 : :
335 : 348234 : path->disabled_nodes =
336 : 348234 : (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
4008 simon@2ndQuadrant.co 337 :CBC 348234 : path->startup_cost = startup_cost;
3758 rhaas@postgresql.org 338 : 348234 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
4008 simon@2ndQuadrant.co 339 : 348234 : }
340 : :
341 : : /*
342 : : * cost_samplescan
343 : : * Determines and returns the cost of scanning a relation using sampling.
344 : : *
345 : : * 'baserel' is the relation to be scanned
346 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
347 : : */
348 : : void
3937 tgl@sss.pgh.pa.us 349 : 245 : cost_samplescan(Path *path, PlannerInfo *root,
350 : : RelOptInfo *baserel, ParamPathInfo *param_info)
351 : : {
4008 simon@2ndQuadrant.co 352 : 245 : Cost startup_cost = 0;
353 : 245 : Cost run_cost = 0;
354 : : RangeTblEntry *rte;
355 : : TableSampleClause *tsc;
356 : : TsmRoutine *tsm;
357 : : double spc_seq_page_cost,
358 : : spc_random_page_cost,
359 : : spc_page_cost;
360 : : QualCost qpqual_cost;
361 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 362 :GNC 245 : uint64 enable_mask = 0;
363 : :
364 : : /* Should only be applied to base relations with tablesample clauses */
4008 simon@2ndQuadrant.co 365 [ - + ]:CBC 245 : Assert(baserel->relid > 0);
3937 tgl@sss.pgh.pa.us 366 [ + - ]: 245 : rte = planner_rt_fetch(baserel->relid, root);
367 [ - + ]: 245 : Assert(rte->rtekind == RTE_RELATION);
368 : 245 : tsc = rte->tablesample;
369 [ - + ]: 245 : Assert(tsc != NULL);
370 : 245 : tsm = GetTsmRoutine(tsc->tsmhandler);
371 : :
372 : : /* Mark the path with the correct row estimate */
373 [ + + ]: 245 : if (param_info)
374 : 60 : path->rows = param_info->ppi_rows;
375 : : else
4008 simon@2ndQuadrant.co 376 : 185 : path->rows = baserel->rows;
377 : :
378 : : /* fetch estimated page cost for tablespace containing table */
379 : 245 : get_tablespace_page_costs(baserel->reltablespace,
380 : : &spc_random_page_cost,
381 : : &spc_seq_page_cost);
382 : :
383 : : /* if NextSampleBlock is used, assume random access, else sequential */
3937 tgl@sss.pgh.pa.us 384 : 490 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
385 [ + + ]: 245 : spc_random_page_cost : spc_seq_page_cost;
386 : :
387 : : /*
388 : : * disk costs (recall that baserel->pages has already been set to the
389 : : * number of pages the sampling method will visit)
390 : : */
391 : 245 : run_cost += spc_page_cost * baserel->pages;
392 : :
393 : : /*
394 : : * CPU costs (recall that baserel->tuples has already been set to the
395 : : * number of tuples the sampling method will select). Note that we ignore
396 : : * execution cost of the TABLESAMPLE parameter expressions; they will be
397 : : * evaluated only once per scan, and in most usages they'll likely be
398 : : * simple constants anyway. We also don't charge anything for the
399 : : * calculations the sampling method might do internally.
400 : : */
401 : 245 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
402 : :
4008 simon@2ndQuadrant.co 403 : 245 : startup_cost += qpqual_cost.startup;
404 : 245 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
3937 tgl@sss.pgh.pa.us 405 : 245 : run_cost += cpu_per_tuple * baserel->tuples;
406 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 407 : 245 : startup_cost += path->pathtarget->cost.startup;
408 : 245 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
409 : :
97 rhaas@postgresql.org 410 [ + - ]:GNC 245 : if (path->parallel_workers == 0)
411 : 245 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
412 : :
413 : 245 : path->disabled_nodes =
414 : 245 : (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
9576 tgl@sss.pgh.pa.us 415 :CBC 245 : path->startup_cost = startup_cost;
416 : 245 : path->total_cost = startup_cost + run_cost;
10892 scrappy@hub.org 417 : 245 : }
418 : :
419 : : /*
420 : : * cost_gather
421 : : * Determines and returns the cost of gather path.
422 : : *
423 : : * 'rel' is the relation to be operated upon
424 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
425 : : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
426 : : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
427 : : * correspond to any particular RelOptInfo.
428 : : */
429 : : void
3870 rhaas@postgresql.org 430 : 21942 : cost_gather(GatherPath *path, PlannerInfo *root,
431 : : RelOptInfo *rel, ParamPathInfo *param_info,
432 : : double *rows)
433 : : {
434 : 21942 : Cost startup_cost = 0;
435 : 21942 : Cost run_cost = 0;
436 : :
437 : : /* Mark the path with the correct row estimate */
3697 438 [ + + ]: 21942 : if (rows)
439 : 5815 : path->path.rows = *rows;
440 [ - + ]: 16127 : else if (param_info)
3870 rhaas@postgresql.org 441 :UBC 0 : path->path.rows = param_info->ppi_rows;
442 : : else
3870 rhaas@postgresql.org 443 :CBC 16127 : path->path.rows = rel->rows;
444 : :
445 : 21942 : startup_cost = path->subpath->startup_cost;
446 : :
447 : 21942 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
448 : :
449 : : /* Parallel setup and communication cost. */
450 : 21942 : startup_cost += parallel_setup_cost;
3828 451 : 21942 : run_cost += parallel_tuple_cost * path->path.rows;
452 : :
97 rhaas@postgresql.org 453 :GNC 21942 : path->path.disabled_nodes = path->subpath->disabled_nodes
454 : 21942 : + ((rel->pgs_mask & PGS_GATHER) != 0 ? 0 : 1);
3870 rhaas@postgresql.org 455 :CBC 21942 : path->path.startup_cost = startup_cost;
456 : 21942 : path->path.total_cost = (startup_cost + run_cost);
457 : 21942 : }
458 : :
459 : : /*
460 : : * cost_gather_merge
461 : : * Determines and returns the cost of gather merge path.
462 : : *
463 : : * GatherMerge merges several pre-sorted input streams, using a heap that at
464 : : * any given instant holds the next tuple from each stream. If there are N
465 : : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
466 : : * startup, and then for each output tuple, about log2(N) comparisons to
467 : : * replace the top heap entry with the next tuple from the same stream.
468 : : */
469 : : void
3344 470 : 15861 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
471 : : RelOptInfo *rel, ParamPathInfo *param_info,
472 : : int input_disabled_nodes,
473 : : Cost input_startup_cost, Cost input_total_cost,
474 : : double *rows)
475 : : {
476 : 15861 : Cost startup_cost = 0;
477 : 15861 : Cost run_cost = 0;
478 : : Cost comparison_cost;
479 : : double N;
480 : : double logN;
481 : :
482 : : /* Mark the path with the correct row estimate */
483 [ + + ]: 15861 : if (rows)
484 : 9483 : path->path.rows = *rows;
485 [ - + ]: 6378 : else if (param_info)
3344 rhaas@postgresql.org 486 :UBC 0 : path->path.rows = param_info->ppi_rows;
487 : : else
3344 rhaas@postgresql.org 488 :CBC 6378 : path->path.rows = rel->rows;
489 : :
490 : : /*
491 : : * Add one to the number of workers to account for the leader. This might
492 : : * be overgenerous since the leader will do less work than other workers
493 : : * in typical cases, but we'll go with it for now.
494 : : */
495 [ - + ]: 15861 : Assert(path->num_workers > 0);
496 : 15861 : N = (double) path->num_workers + 1;
497 : 15861 : logN = LOG2(N);
498 : :
499 : : /* Assumed cost per tuple comparison */
500 : 15861 : comparison_cost = 2.0 * cpu_operator_cost;
501 : :
502 : : /* Heap creation cost */
503 : 15861 : startup_cost += comparison_cost * N * logN;
504 : :
505 : : /* Per-tuple heap maintenance cost */
506 : 15861 : run_cost += path->path.rows * comparison_cost * logN;
507 : :
508 : : /* small cost for heap management, like cost_merge_append */
509 : 15861 : run_cost += cpu_operator_cost * path->path.rows;
510 : :
511 : : /*
512 : : * Parallel setup and communication cost. Since Gather Merge, unlike
513 : : * Gather, requires us to block until a tuple is available from every
514 : : * worker, we bump the IPC cost up a little bit as compared with Gather.
515 : : * For lack of a better idea, charge an extra 5%.
516 : : */
517 : 15861 : startup_cost += parallel_setup_cost;
518 : 15861 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
519 : :
97 rhaas@postgresql.org 520 :GNC 15861 : path->path.disabled_nodes = path->subpath->disabled_nodes
521 : 15861 : + ((rel->pgs_mask & PGS_GATHER_MERGE) != 0 ? 0 : 1);
3344 rhaas@postgresql.org 522 :CBC 15861 : path->path.startup_cost = startup_cost + input_startup_cost;
523 : 15861 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
524 : 15861 : }
525 : :
526 : : /*
527 : : * cost_index
528 : : * Determines and returns the cost of scanning a relation using an index.
529 : : *
530 : : * 'path' describes the indexscan under consideration, and is complete
531 : : * except for the fields to be set by this routine
532 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
533 : : * estimates of caching behavior
534 : : *
535 : : * In addition to rows, startup_cost and total_cost, cost_index() sets the
536 : : * path's indextotalcost and indexselectivity fields. These values will be
537 : : * needed if the IndexPath is used in a BitmapIndexScan.
538 : : *
539 : : * NOTE: path->indexquals must contain only clauses usable as index
540 : : * restrictions. Any additional quals evaluated as qpquals may reduce the
541 : : * number of returned tuples, but they won't reduce the number of tuples
542 : : * we have to fetch from the table, so they don't reduce the scan cost.
543 : : */
544 : : void
3366 545 : 668468 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
546 : : bool partial_path)
547 : : {
5246 tgl@sss.pgh.pa.us 548 : 668468 : IndexOptInfo *index = path->indexinfo;
7709 549 : 668468 : RelOptInfo *baserel = index->rel;
5246 550 : 668468 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
551 : : amcostestimate_function amcostestimate;
552 : : List *qpquals;
9576 553 : 668468 : Cost startup_cost = 0;
554 : 668468 : Cost run_cost = 0;
3366 rhaas@postgresql.org 555 : 668468 : Cost cpu_run_cost = 0;
556 : : Cost indexStartupCost;
557 : : Cost indexTotalCost;
558 : : Selectivity indexSelectivity;
559 : : double indexCorrelation,
560 : : csquared;
561 : : double spc_seq_page_cost,
562 : : spc_random_page_cost;
563 : : Cost min_IO_cost,
564 : : max_IO_cost;
565 : : QualCost qpqual_cost;
566 : : Cost cpu_per_tuple;
567 : : double tuples_fetched;
568 : : double pages_fetched;
569 : : double rand_heap_pages;
570 : : double index_pages;
571 : : uint64 enable_mask;
572 : :
573 : : /* Should only be applied to base relations */
8759 tgl@sss.pgh.pa.us 574 [ + - - + ]: 668468 : Assert(IsA(baserel, RelOptInfo) &&
575 : : IsA(index, IndexOptInfo));
8487 576 [ - + ]: 668468 : Assert(baserel->relid > 0);
8759 577 [ - + ]: 668468 : Assert(baserel->rtekind == RTE_RELATION);
578 : :
579 : : /*
580 : : * Mark the path with the correct row estimate, and identify which quals
581 : : * will need to be enforced as qpquals. We need not check any quals that
582 : : * are implied by the index's predicate, so we can use indrestrictinfo not
583 : : * baserestrictinfo as the list of relevant restriction clauses for the
584 : : * rel.
585 : : */
5129 586 [ + + ]: 668468 : if (path->path.param_info)
587 : : {
588 : 135933 : path->path.rows = path->path.param_info->ppi_rows;
589 : : /* qpquals come from the rel's restriction clauses and ppi_clauses */
2642 590 : 135933 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
591 : : path->indexclauses),
3240 592 : 135933 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
593 : : path->indexclauses));
594 : : }
595 : : else
596 : : {
5129 597 : 532535 : path->path.rows = baserel->rows;
598 : : /* qpquals come from just the rel's restriction clauses */
3687 599 : 532535 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
600 : : path->indexclauses);
601 : : }
602 : :
603 : : /* is this scan type disabled? */
97 rhaas@postgresql.org 604 [ + + ]:GNC 668468 : enable_mask = (indexonly ? PGS_INDEXONLYSCAN : PGS_INDEXSCAN)
605 [ + + ]: 668468 : | (partial_path ? 0 : PGS_CONSIDER_NONPARTIAL);
606 : 668468 : path->path.disabled_nodes =
607 : 668468 : (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
608 : :
609 : : /*
610 : : * Call index-access-method-specific code to estimate the processing cost
611 : : * for scanning the index, as well as the selectivity of the index (ie,
612 : : * the fraction of main-table tuples we will have to retrieve) and its
613 : : * correlation to the main-table tuple order. We need a cast here because
614 : : * pathnodes.h uses a weak function type to avoid including amapi.h.
615 : : */
3761 tgl@sss.pgh.pa.us 616 :CBC 668468 : amcostestimate = (amcostestimate_function) index->amcostestimate;
617 : 668468 : amcostestimate(root, path, loop_count,
618 : : &indexStartupCost, &indexTotalCost,
619 : : &indexSelectivity, &indexCorrelation,
620 : : &index_pages);
621 : :
622 : : /*
623 : : * Save amcostestimate's results for possible use in bitmap scan planning.
624 : : * We don't bother to save indexStartupCost or indexCorrelation, because a
625 : : * bitmap scan doesn't care about either.
626 : : */
7684 627 : 668468 : path->indextotalcost = indexTotalCost;
628 : 668468 : path->indexselectivity = indexSelectivity;
629 : :
630 : : /* all costs for touching index itself included here */
9576 631 : 668468 : startup_cost += indexStartupCost;
632 : 668468 : run_cost += indexTotalCost - indexStartupCost;
633 : :
634 : : /* estimate number of main-table tuples fetched */
7273 635 : 668468 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
636 : :
637 : : /* fetch estimated page costs for tablespace containing table */
5964 rhaas@postgresql.org 638 : 668468 : get_tablespace_page_costs(baserel->reltablespace,
639 : : &spc_random_page_cost,
640 : : &spc_seq_page_cost);
641 : :
642 : : /*----------
643 : : * Estimate number of main-table pages fetched, and compute I/O cost.
644 : : *
645 : : * When the index ordering is uncorrelated with the table ordering,
646 : : * we use an approximation proposed by Mackert and Lohman (see
647 : : * index_pages_fetched() for details) to compute the number of pages
648 : : * fetched, and then charge spc_random_page_cost per page fetched.
649 : : *
650 : : * When the index ordering is exactly correlated with the table ordering
651 : : * (just after a CLUSTER, for example), the number of pages fetched should
652 : : * be exactly selectivity * table_size. What's more, all but the first
653 : : * will be sequential fetches, not the random fetches that occur in the
654 : : * uncorrelated case. So if the number of pages is more than 1, we
655 : : * ought to charge
656 : : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
657 : : * For partially-correlated indexes, we ought to charge somewhere between
658 : : * these two estimates. We currently interpolate linearly between the
659 : : * estimates based on the correlation squared (XXX is that appropriate?).
660 : : *
661 : : * If it's an index-only scan, then we will not need to fetch any heap
662 : : * pages for which the visibility map shows all tuples are visible.
663 : : * Hence, reduce the estimated number of heap fetches accordingly.
664 : : * We use the measured fraction of the entire heap that is all-visible,
665 : : * which might not be particularly relevant to the subset of the heap
666 : : * that this query will fetch; but it's not clear how to do better.
667 : : *----------
668 : : */
5212 tgl@sss.pgh.pa.us 669 [ + + ]: 668468 : if (loop_count > 1)
670 : : {
671 : : /*
672 : : * For repeated indexscans, the appropriate estimate for the
673 : : * uncorrelated case is to scale up the number of tuples fetched in
674 : : * the Mackert and Lohman formula by the number of scans, so that we
675 : : * estimate the number of pages fetched by all the scans; then
676 : : * pro-rate the costs for one scan. In this case we assume all the
677 : : * fetches are random accesses.
678 : : */
679 : 75964 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
680 : : baserel->pages,
7168 681 : 75964 : (double) index->pages,
682 : : root);
683 : :
5323 684 [ + + ]: 75964 : if (indexonly)
5317 685 : 9534 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
686 : :
3366 rhaas@postgresql.org 687 : 75964 : rand_heap_pages = pages_fetched;
688 : :
5212 tgl@sss.pgh.pa.us 689 : 75964 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
690 : :
691 : : /*
692 : : * In the perfectly correlated case, the number of pages touched by
693 : : * each scan is selectivity * table_size, and we can use the Mackert
694 : : * and Lohman formula at the page level to estimate how much work is
695 : : * saved by caching across scans. We still assume all the fetches are
696 : : * random, though, which is an overestimate that's hard to correct for
697 : : * without double-counting the cache effects. (But in most cases
698 : : * where such a plan is actually interesting, only one page would get
699 : : * fetched per scan anyway, so it shouldn't matter much.)
700 : : */
7081 701 : 75964 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
702 : :
5212 703 : 75964 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
704 : : baserel->pages,
7081 705 : 75964 : (double) index->pages,
706 : : root);
707 : :
5323 708 [ + + ]: 75964 : if (indexonly)
5317 709 : 9534 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
710 : :
5212 711 : 75964 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
712 : : }
713 : : else
714 : : {
715 : : /*
716 : : * Normal case: apply the Mackert and Lohman formula, and then
717 : : * interpolate between that and the correlation-derived result.
718 : : */
7273 719 : 592504 : pages_fetched = index_pages_fetched(tuples_fetched,
720 : : baserel->pages,
7168 721 : 592504 : (double) index->pages,
722 : : root);
723 : :
5323 724 [ + + ]: 592504 : if (indexonly)
5317 725 : 53388 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
726 : :
3366 rhaas@postgresql.org 727 : 592504 : rand_heap_pages = pages_fetched;
728 : :
729 : : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
5964 730 : 592504 : max_IO_cost = pages_fetched * spc_random_page_cost;
731 : :
732 : : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
7273 tgl@sss.pgh.pa.us 733 : 592504 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
734 : :
5323 735 [ + + ]: 592504 : if (indexonly)
5317 736 : 53388 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
737 : :
5315 738 [ + + ]: 592504 : if (pages_fetched > 0)
739 : : {
740 : 510637 : min_IO_cost = spc_random_page_cost;
741 [ + + ]: 510637 : if (pages_fetched > 1)
742 : 154052 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
743 : : }
744 : : else
745 : 81867 : min_IO_cost = 0;
746 : : }
747 : :
3366 rhaas@postgresql.org 748 [ + + ]: 668468 : if (partial_path)
749 : : {
750 : : /*
751 : : * For index only scans compute workers based on number of index pages
752 : : * fetched; the number of heap pages we fetch might be so small as to
753 : : * effectively rule out parallelism, which we don't want to do.
754 : : */
3339 755 [ + + ]: 229022 : if (indexonly)
756 : 19521 : rand_heap_pages = -1;
757 : :
758 : : /*
759 : : * Estimate the number of parallel workers required to scan index. Use
760 : : * the number of heap pages computed considering heap fetches won't be
761 : : * sequential as for parallel scans the pages are accessed in random
762 : : * order.
763 : : */
3366 764 : 229022 : path->path.parallel_workers = compute_parallel_worker(baserel,
765 : : rand_heap_pages,
766 : : index_pages,
767 : : max_parallel_workers_per_gather);
768 : :
769 : : /*
770 : : * Fall out if workers can't be assigned for parallel scan, because in
771 : : * such a case this path will be rejected. So there is no benefit in
772 : : * doing extra computation.
773 : : */
774 [ + + ]: 229022 : if (path->path.parallel_workers <= 0)
775 : 221358 : return;
776 : :
777 : 7664 : path->path.parallel_aware = true;
778 : : }
779 : :
780 : : /*
781 : : * Now interpolate based on estimated index order correlation to get total
782 : : * disk I/O cost for main table accesses.
783 : : */
7081 tgl@sss.pgh.pa.us 784 : 447110 : csquared = indexCorrelation * indexCorrelation;
785 : :
786 : 447110 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
787 : :
788 : : /*
789 : : * Estimate CPU costs per tuple.
790 : : *
791 : : * What we want here is cpu_tuple_cost plus the evaluation costs of any
792 : : * qual clauses that we have to evaluate as qpquals.
793 : : */
4081 794 : 447110 : cost_qual_eval(&qpqual_cost, qpquals, root);
795 : :
5137 796 : 447110 : startup_cost += qpqual_cost.startup;
797 : 447110 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
798 : :
3366 rhaas@postgresql.org 799 : 447110 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
800 : :
801 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 tgl@sss.pgh.pa.us 802 : 447110 : startup_cost += path->path.pathtarget->cost.startup;
3366 rhaas@postgresql.org 803 : 447110 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
804 : :
805 : : /* Adjust costing for parallelism, if used. */
806 [ + + ]: 447110 : if (path->path.parallel_workers > 0)
807 : : {
808 : 7664 : double parallel_divisor = get_parallel_divisor(&path->path);
809 : :
810 : 7664 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
811 : :
812 : : /* The CPU cost is divided among all the workers. */
813 : 7664 : cpu_run_cost /= parallel_divisor;
814 : : }
815 : :
816 : 447110 : run_cost += cpu_run_cost;
817 : :
7684 tgl@sss.pgh.pa.us 818 : 447110 : path->path.startup_cost = startup_cost;
819 : 447110 : path->path.total_cost = startup_cost + run_cost;
820 : : }
821 : :
822 : : /*
823 : : * extract_nonindex_conditions
824 : : *
825 : : * Given a list of quals to be enforced in an indexscan, extract the ones that
826 : : * will have to be applied as qpquals (ie, the index machinery won't handle
827 : : * them). Here we detect only whether a qual clause is directly redundant
828 : : * with some indexclause. If the index path is chosen for use, createplan.c
829 : : * will try a bit harder to get rid of redundant qual conditions; specifically
830 : : * it will see if quals can be proven to be implied by the indexquals. But
831 : : * it does not seem worth the cycles to try to factor that in at this stage,
832 : : * since we're only trying to estimate qual eval costs. Otherwise this must
833 : : * match the logic in create_indexscan_plan().
834 : : *
835 : : * qual_clauses, and the result, are lists of RestrictInfos.
836 : : * indexclauses is a list of IndexClauses.
837 : : */
838 : : static List *
2642 839 : 804401 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
840 : : {
4081 841 : 804401 : List *result = NIL;
842 : : ListCell *lc;
843 : :
844 [ + + + + : 1657887 : foreach(lc, qual_clauses)
+ + ]
845 : : {
3312 846 : 853486 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
847 : :
4081 848 [ + + ]: 853486 : if (rinfo->pseudoconstant)
849 : 3414 : continue; /* we may drop pseudoconstants here */
2642 850 [ + + ]: 850072 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
851 : 485068 : continue; /* dup or derived from same EquivalenceClass */
852 : : /* ... skip the predicate proof attempt createplan.c will try ... */
4081 853 : 365004 : result = lappend(result, rinfo);
854 : : }
855 : 804401 : return result;
856 : : }
857 : :
858 : : /*
859 : : * index_pages_fetched
860 : : * Estimate the number of pages actually fetched after accounting for
861 : : * cache effects.
862 : : *
863 : : * We use an approximation proposed by Mackert and Lohman, "Index Scans
864 : : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
865 : : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
866 : : * The Mackert and Lohman approximation is that the number of pages
867 : : * fetched is
868 : : * PF =
869 : : * min(2TNs/(2T+Ns), T) when T <= b
870 : : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
871 : : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
872 : : * where
873 : : * T = # pages in table
874 : : * N = # tuples in table
875 : : * s = selectivity = fraction of table to be scanned
876 : : * b = # buffer pages available (we include kernel space here)
877 : : *
878 : : * We assume that effective_cache_size is the total number of buffer pages
879 : : * available for the whole query, and pro-rate that space across all the
880 : : * tables in the query and the index currently under consideration. (This
881 : : * ignores space needed for other indexes used by the query, but since we
882 : : * don't know which indexes will get used, we can't estimate that very well;
883 : : * and in any case counting all the tables may well be an overestimate, since
884 : : * depending on the join plan not all the tables may be scanned concurrently.)
885 : : *
886 : : * The product Ns is the number of tuples fetched; we pass in that
887 : : * product rather than calculating it here. "pages" is the number of pages
888 : : * in the object under consideration (either an index or a table).
889 : : * "index_pages" is the amount to add to the total table space, which was
890 : : * computed for us by make_one_rel.
891 : : *
892 : : * Caller is expected to have ensured that tuples_fetched is greater than zero
893 : : * and rounded to integer (see clamp_row_est). The result will likewise be
894 : : * greater than zero and integral.
895 : : */
896 : : double
7273 897 : 951919 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
898 : : double index_pages, PlannerInfo *root)
899 : : {
900 : : double pages_fetched;
901 : : double total_pages;
902 : : double T,
903 : : b;
904 : :
905 : : /* T is # pages in table, but don't allow it to be zero */
906 [ + + ]: 951919 : T = (pages > 1) ? (double) pages : 1.0;
907 : :
908 : : /* Compute number of pages assumed to be competing for cache space */
7168 909 : 951919 : total_pages = root->total_table_pages + index_pages;
910 [ + + ]: 951919 : total_pages = Max(total_pages, 1.0);
911 [ - + ]: 951919 : Assert(T <= total_pages);
912 : :
913 : : /* b is pro-rated share of effective_cache_size */
3240 914 : 951919 : b = (double) effective_cache_size * T / total_pages;
915 : :
916 : : /* force it positive and integral */
7273 917 [ - + ]: 951919 : if (b <= 1.0)
7273 tgl@sss.pgh.pa.us 918 :UBC 0 : b = 1.0;
919 : : else
7273 tgl@sss.pgh.pa.us 920 :CBC 951919 : b = ceil(b);
921 : :
922 : : /* This part is the Mackert and Lohman formula */
923 [ + - ]: 951919 : if (T <= b)
924 : : {
925 : 951919 : pages_fetched =
926 : 951919 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
927 [ + + ]: 951919 : if (pages_fetched >= T)
928 : 573860 : pages_fetched = T;
929 : : else
930 : 378059 : pages_fetched = ceil(pages_fetched);
931 : : }
932 : : else
933 : : {
934 : : double lim;
935 : :
7273 tgl@sss.pgh.pa.us 936 :UBC 0 : lim = (2.0 * T * b) / (2.0 * T - b);
937 [ # # ]: 0 : if (tuples_fetched <= lim)
938 : : {
939 : 0 : pages_fetched =
940 : 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
941 : : }
942 : : else
943 : : {
944 : 0 : pages_fetched =
945 : 0 : b + (tuples_fetched - lim) * (T - b) / T;
946 : : }
947 : 0 : pages_fetched = ceil(pages_fetched);
948 : : }
7273 tgl@sss.pgh.pa.us 949 :CBC 951919 : return pages_fetched;
950 : : }
951 : :
952 : : /*
953 : : * get_indexpath_pages
954 : : * Determine the total size of the indexes used in a bitmap index path.
955 : : *
956 : : * Note: if the same index is used more than once in a bitmap tree, we will
957 : : * count it multiple times, which perhaps is the wrong thing ... but it's
958 : : * not completely clear, and detecting duplicates is difficult, so ignore it
959 : : * for now.
960 : : */
961 : : static double
7168 962 : 170435 : get_indexpath_pages(Path *bitmapqual)
963 : : {
964 : 170435 : double result = 0;
965 : : ListCell *l;
966 : :
967 [ + + ]: 170435 : if (IsA(bitmapqual, BitmapAndPath))
968 : : {
969 : 21299 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
970 : :
971 [ + - + + : 63897 : foreach(l, apath->bitmapquals)
+ + ]
972 : : {
973 : 42598 : result += get_indexpath_pages((Path *) lfirst(l));
974 : : }
975 : : }
976 [ + + ]: 149136 : else if (IsA(bitmapqual, BitmapOrPath))
977 : : {
978 : 97 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
979 : :
980 [ + - + + : 301 : foreach(l, opath->bitmapquals)
+ + ]
981 : : {
982 : 204 : result += get_indexpath_pages((Path *) lfirst(l));
983 : : }
984 : : }
985 [ + - ]: 149039 : else if (IsA(bitmapqual, IndexPath))
986 : : {
987 : 149039 : IndexPath *ipath = (IndexPath *) bitmapqual;
988 : :
989 : 149039 : result = (double) ipath->indexinfo->pages;
990 : : }
991 : : else
7168 tgl@sss.pgh.pa.us 992 [ # # ]:UBC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
993 : :
7168 tgl@sss.pgh.pa.us 994 :CBC 170435 : return result;
995 : : }
996 : :
997 : : /*
998 : : * cost_bitmap_heap_scan
999 : : * Determines and returns the cost of scanning a relation using a bitmap
1000 : : * index-then-heap plan.
1001 : : *
1002 : : * 'baserel' is the relation to be scanned
1003 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1004 : : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1005 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
1006 : : * estimates of caching behavior
1007 : : *
1008 : : * Note: the component IndexPaths in bitmapqual should have been costed
1009 : : * using the same loop_count.
1010 : : */
1011 : : void
7639 1012 : 449641 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
1013 : : ParamPathInfo *param_info,
1014 : : Path *bitmapqual, double loop_count)
1015 : : {
7686 1016 : 449641 : Cost startup_cost = 0;
1017 : 449641 : Cost run_cost = 0;
1018 : : Cost indexTotalCost;
1019 : : QualCost qpqual_cost;
1020 : : Cost cpu_per_tuple;
1021 : : Cost cost_per_page;
1022 : : Cost cpu_run_cost;
1023 : : double tuples_fetched;
1024 : : double pages_fetched;
1025 : : double spc_seq_page_cost,
1026 : : spc_random_page_cost;
1027 : : double T;
97 rhaas@postgresql.org 1028 :GNC 449641 : uint64 enable_mask = PGS_BITMAPSCAN;
1029 : :
1030 : : /* Should only be applied to base relations */
7686 tgl@sss.pgh.pa.us 1031 [ - + ]:CBC 449641 : Assert(IsA(baserel, RelOptInfo));
1032 [ - + ]: 449641 : Assert(baserel->relid > 0);
1033 [ - + ]: 449641 : Assert(baserel->rtekind == RTE_RELATION);
1034 : :
1035 : : /* Mark the path with the correct row estimate */
5129 1036 [ + + ]: 449641 : if (param_info)
1037 : 207028 : path->rows = param_info->ppi_rows;
1038 : : else
5212 1039 : 242613 : path->rows = baserel->rows;
1040 : :
3385 rhaas@postgresql.org 1041 : 449641 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1042 : : loop_count, &indexTotalCost,
1043 : : &tuples_fetched);
1044 : :
7684 tgl@sss.pgh.pa.us 1045 : 449641 : startup_cost += indexTotalCost;
3385 rhaas@postgresql.org 1046 [ + + ]: 449641 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1047 : :
1048 : : /* Fetch estimated page costs for tablespace containing table. */
5964 1049 : 449641 : get_tablespace_page_costs(baserel->reltablespace,
1050 : : &spc_random_page_cost,
1051 : : &spc_seq_page_cost);
1052 : :
1053 : : /*
1054 : : * For small numbers of pages we should charge spc_random_page_cost
1055 : : * apiece, while if nearly all the table's pages are being read, it's more
1056 : : * appropriate to charge spc_seq_page_cost apiece. The effect is
1057 : : * nonlinear, too. For lack of a better idea, interpolate like this to
1058 : : * determine the cost per page.
1059 : : */
7683 tgl@sss.pgh.pa.us 1060 [ + + ]: 449641 : if (pages_fetched >= 2.0)
5964 rhaas@postgresql.org 1061 : 89118 : cost_per_page = spc_random_page_cost -
1062 : 89118 : (spc_random_page_cost - spc_seq_page_cost)
1063 : 89118 : * sqrt(pages_fetched / T);
1064 : : else
1065 : 360523 : cost_per_page = spc_random_page_cost;
1066 : :
7684 tgl@sss.pgh.pa.us 1067 : 449641 : run_cost += pages_fetched * cost_per_page;
1068 : :
1069 : : /*
1070 : : * Estimate CPU costs per tuple.
1071 : : *
1072 : : * Often the indexquals don't need to be rechecked at each tuple ... but
1073 : : * not always, especially not if there are enough tuples involved that the
1074 : : * bitmaps become lossy. For the moment, just assume they will be
1075 : : * rechecked always. This means we charge the full freight for all the
1076 : : * scan clauses.
1077 : : */
5129 1078 : 449641 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1079 : :
1080 : 449641 : startup_cost += qpqual_cost.startup;
1081 : 449641 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
3345 rhaas@postgresql.org 1082 : 449641 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1083 : :
1084 : : /* Adjust costing for parallelism, if used. */
1085 [ + + ]: 449641 : if (path->parallel_workers > 0)
1086 : : {
1087 : 3244 : double parallel_divisor = get_parallel_divisor(path);
1088 : :
1089 : : /* The CPU cost is divided among all the workers. */
1090 : 3244 : cpu_run_cost /= parallel_divisor;
1091 : :
1092 : 3244 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1093 : : }
1094 : : else
97 rhaas@postgresql.org 1095 :GNC 446397 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1096 : :
1097 : :
3345 rhaas@postgresql.org 1098 :CBC 449641 : run_cost += cpu_run_cost;
1099 : :
1100 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 tgl@sss.pgh.pa.us 1101 : 449641 : startup_cost += path->pathtarget->cost.startup;
1102 : 449641 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1103 : :
97 rhaas@postgresql.org 1104 :GNC 449641 : path->disabled_nodes =
1105 : 449641 : (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
7686 tgl@sss.pgh.pa.us 1106 :CBC 449641 : path->startup_cost = startup_cost;
1107 : 449641 : path->total_cost = startup_cost + run_cost;
1108 : 449641 : }
1109 : :
1110 : : /*
1111 : : * cost_bitmap_tree_node
1112 : : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1113 : : */
1114 : : void
7684 1115 : 842561 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1116 : : {
1117 [ + + ]: 842561 : if (IsA(path, IndexPath))
1118 : : {
1119 : 796340 : *cost = ((IndexPath *) path)->indextotalcost;
1120 : 796340 : *selec = ((IndexPath *) path)->indexselectivity;
1121 : :
1122 : : /*
1123 : : * Charge a small amount per retrieved tuple to reflect the costs of
1124 : : * manipulating the bitmap. This is mostly to make sure that a bitmap
1125 : : * scan doesn't look to be the same cost as an indexscan to retrieve a
1126 : : * single tuple.
1127 : : */
5212 1128 : 796340 : *cost += 0.1 * cpu_operator_cost * path->rows;
1129 : : }
7684 1130 [ + + ]: 46221 : else if (IsA(path, BitmapAndPath))
1131 : : {
1132 : 41867 : *cost = path->total_cost;
1133 : 41867 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1134 : : }
1135 [ + - ]: 4354 : else if (IsA(path, BitmapOrPath))
1136 : : {
1137 : 4354 : *cost = path->total_cost;
1138 : 4354 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1139 : : }
1140 : : else
1141 : : {
7684 tgl@sss.pgh.pa.us 1142 [ # # ]:UBC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1143 : : *cost = *selec = 0; /* keep compiler quiet */
1144 : : }
7684 tgl@sss.pgh.pa.us 1145 :CBC 842561 : }
1146 : :
1147 : : /*
1148 : : * cost_bitmap_and_node
1149 : : * Estimate the cost of a BitmapAnd node
1150 : : *
1151 : : * Note that this considers only the costs of index scanning and bitmap
1152 : : * creation, not the eventual heap access. In that sense the object isn't
1153 : : * truly a Path, but it has enough path-like properties (costs in particular)
1154 : : * to warrant treating it as one. We don't bother to set the path rows field,
1155 : : * however.
1156 : : */
1157 : : void
7639 1158 : 41734 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1159 : : {
1160 : : Cost totalCost;
1161 : : Selectivity selec;
1162 : : ListCell *l;
1163 : :
1164 : : /*
1165 : : * We estimate AND selectivity on the assumption that the inputs are
1166 : : * independent. This is probably often wrong, but we don't have the info
1167 : : * to do better.
1168 : : *
1169 : : * The runtime cost of the BitmapAnd itself is estimated at 100x
1170 : : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1171 : : * definitely too simplistic?
1172 : : */
7684 1173 : 41734 : totalCost = 0.0;
1174 : 41734 : selec = 1.0;
1175 [ + - + + : 125202 : foreach(l, path->bitmapquals)
+ + ]
1176 : : {
7507 bruce@momjian.us 1177 : 83468 : Path *subpath = (Path *) lfirst(l);
1178 : : Cost subCost;
1179 : : Selectivity subselec;
1180 : :
7684 tgl@sss.pgh.pa.us 1181 : 83468 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1182 : :
1183 : 83468 : selec *= subselec;
1184 : :
1185 : 83468 : totalCost += subCost;
1186 [ + + ]: 83468 : if (l != list_head(path->bitmapquals))
1187 : 41734 : totalCost += 100.0 * cpu_operator_cost;
1188 : : }
1189 : 41734 : path->bitmapselectivity = selec;
5212 1190 : 41734 : path->path.rows = 0; /* per above, not used */
622 rhaas@postgresql.org 1191 : 41734 : path->path.disabled_nodes = 0;
7684 tgl@sss.pgh.pa.us 1192 : 41734 : path->path.startup_cost = totalCost;
1193 : 41734 : path->path.total_cost = totalCost;
1194 : 41734 : }
1195 : :
1196 : : /*
1197 : : * cost_bitmap_or_node
1198 : : * Estimate the cost of a BitmapOr node
1199 : : *
1200 : : * See comments for cost_bitmap_and_node.
1201 : : */
1202 : : void
7639 1203 : 1752 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1204 : : {
1205 : : Cost totalCost;
1206 : : Selectivity selec;
1207 : : ListCell *l;
1208 : :
1209 : : /*
1210 : : * We estimate OR selectivity on the assumption that the inputs are
1211 : : * non-overlapping, since that's often the case in "x IN (list)" type
1212 : : * situations. Of course, we clamp to 1.0 at the end.
1213 : : *
1214 : : * The runtime cost of the BitmapOr itself is estimated at 100x
1215 : : * cpu_operator_cost for each tbm_union needed. Probably too small,
1216 : : * definitely too simplistic? We are aware that the tbm_unions are
1217 : : * optimized out when the inputs are BitmapIndexScans.
1218 : : */
7684 1219 : 1752 : totalCost = 0.0;
1220 : 1752 : selec = 0.0;
1221 [ + - + + : 4097 : foreach(l, path->bitmapquals)
+ + ]
1222 : : {
7507 bruce@momjian.us 1223 : 2345 : Path *subpath = (Path *) lfirst(l);
1224 : : Cost subCost;
1225 : : Selectivity subselec;
1226 : :
7684 tgl@sss.pgh.pa.us 1227 : 2345 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1228 : :
1229 : 2345 : selec += subselec;
1230 : :
1231 : 2345 : totalCost += subCost;
1232 [ + + ]: 2345 : if (l != list_head(path->bitmapquals) &&
1233 [ - + ]: 593 : !IsA(subpath, IndexPath))
7684 tgl@sss.pgh.pa.us 1234 :LBC (3) : totalCost += 100.0 * cpu_operator_cost;
1235 : : }
7684 tgl@sss.pgh.pa.us 1236 [ + - ]:CBC 1752 : path->bitmapselectivity = Min(selec, 1.0);
5212 1237 : 1752 : path->path.rows = 0; /* per above, not used */
7684 1238 : 1752 : path->path.startup_cost = totalCost;
1239 : 1752 : path->path.total_cost = totalCost;
1240 : 1752 : }
1241 : :
1242 : : /*
1243 : : * cost_tidscan
1244 : : * Determines and returns the cost of scanning a relation using TIDs.
1245 : : *
1246 : : * 'baserel' is the relation to be scanned
1247 : : * 'tidquals' is the list of TID-checkable quals
1248 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1249 : : */
1250 : : void
7639 1251 : 643 : cost_tidscan(Path *path, PlannerInfo *root,
1252 : : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1253 : : {
9576 1254 : 643 : Cost startup_cost = 0;
1255 : 643 : Cost run_cost = 0;
1256 : : QualCost qpqual_cost;
1257 : : Cost cpu_per_tuple;
1258 : : QualCost tid_qual_cost;
1259 : : double ntuples;
1260 : : ListCell *l;
1261 : : double spc_random_page_cost;
97 rhaas@postgresql.org 1262 :GNC 643 : uint64 enable_mask = 0;
1263 : :
1264 : : /* Should only be applied to base relations */
8487 tgl@sss.pgh.pa.us 1265 [ - + ]:CBC 643 : Assert(baserel->relid > 0);
8759 1266 [ - + ]: 643 : Assert(baserel->rtekind == RTE_RELATION);
622 rhaas@postgresql.org 1267 [ - + ]: 643 : Assert(tidquals != NIL);
1268 : :
1269 : : /* Mark the path with the correct row estimate */
5000 tgl@sss.pgh.pa.us 1270 [ + + ]: 643 : if (param_info)
1271 : 97 : path->rows = param_info->ppi_rows;
1272 : : else
1273 : 546 : path->rows = baserel->rows;
1274 : :
1275 : : /* Count how many tuples we expect to retrieve */
7465 1276 : 643 : ntuples = 0;
1277 [ + - + + : 1307 : foreach(l, tidquals)
+ + ]
1278 : : {
2683 1279 : 664 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1280 : 664 : Expr *qual = rinfo->clause;
1281 : :
1282 : : /*
1283 : : * We must use a TID scan for CurrentOfExpr; in any other case, we
1284 : : * should be generating a TID scan only if TID scans are allowed.
1285 : : * Also, if CurrentOfExpr is the qual, there should be only one.
1286 : : */
97 rhaas@postgresql.org 1287 [ - + - - ]:GNC 664 : Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0 || IsA(qual, CurrentOfExpr));
622 rhaas@postgresql.org 1288 [ + + - + ]:CBC 664 : Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
1289 : :
2683 tgl@sss.pgh.pa.us 1290 [ + + ]: 664 : if (IsA(qual, ScalarArrayOpExpr))
1291 : : {
1292 : : /* Each element of the array yields 1 tuple */
1293 : 41 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
7153 bruce@momjian.us 1294 : 41 : Node *arraynode = (Node *) lsecond(saop->args);
1295 : :
852 tgl@sss.pgh.pa.us 1296 : 41 : ntuples += estimate_array_length(root, arraynode);
1297 : : }
2683 1298 [ + + ]: 623 : else if (IsA(qual, CurrentOfExpr))
1299 : : {
1300 : : /* CURRENT OF yields 1 tuple */
6768 1301 : 344 : ntuples++;
1302 : : }
1303 : : else
1304 : : {
1305 : : /* It's just CTID = something, count 1 tuple */
7465 1306 : 279 : ntuples++;
1307 : : }
1308 : : }
1309 : :
1310 : : /*
1311 : : * The TID qual expressions will be computed once, any other baserestrict
1312 : : * quals once per retrieved tuple.
1313 : : */
6903 1314 : 643 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1315 : :
1316 : : /* fetch estimated page cost for tablespace containing table */
5964 rhaas@postgresql.org 1317 : 643 : get_tablespace_page_costs(baserel->reltablespace,
1318 : : &spc_random_page_cost,
1319 : : NULL);
1320 : :
1321 : : /* disk costs --- assume each tuple on a different page */
1322 : 643 : run_cost += spc_random_page_cost * ntuples;
1323 : :
1324 : : /* Add scanning CPU costs */
5000 tgl@sss.pgh.pa.us 1325 : 643 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1326 : :
1327 : : /* XXX currently we assume TID quals are a subset of qpquals */
1328 : 643 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1329 : 643 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
6903 1330 : 643 : tid_qual_cost.per_tuple;
9576 1331 : 643 : run_cost += cpu_per_tuple * ntuples;
1332 : :
1333 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 1334 : 643 : startup_cost += path->pathtarget->cost.startup;
1335 : 643 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1336 : :
1337 : : /*
1338 : : * There are assertions above verifying that we only reach this function
1339 : : * either when baserel->pgs_mask includes PGS_TIDSCAN or when the TID scan
1340 : : * is the only legal path, so we only need to consider the effects of
1341 : : * PGS_CONSIDER_NONPARTIAL here.
1342 : : */
97 rhaas@postgresql.org 1343 [ + - ]:GNC 643 : if (path->parallel_workers == 0)
1344 : 643 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1345 : 643 : path->disabled_nodes =
1346 : 643 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
9576 tgl@sss.pgh.pa.us 1347 :CBC 643 : path->startup_cost = startup_cost;
1348 : 643 : path->total_cost = startup_cost + run_cost;
9660 bruce@momjian.us 1349 : 643 : }
1350 : :
1351 : : /*
1352 : : * cost_tidrangescan
1353 : : * Determines and sets the costs of scanning a relation using a range of
1354 : : * TIDs for 'path'
1355 : : *
1356 : : * 'baserel' is the relation to be scanned
1357 : : * 'tidrangequals' is the list of TID-checkable range quals
1358 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1359 : : */
1360 : : void
1893 drowley@postgresql.o 1361 : 1704 : cost_tidrangescan(Path *path, PlannerInfo *root,
1362 : : RelOptInfo *baserel, List *tidrangequals,
1363 : : ParamPathInfo *param_info)
1364 : : {
1365 : : Selectivity selectivity;
1366 : : double pages;
1367 : : Cost startup_cost;
1368 : : Cost cpu_run_cost;
1369 : : Cost disk_run_cost;
1370 : : QualCost qpqual_cost;
1371 : : Cost cpu_per_tuple;
1372 : : QualCost tid_qual_cost;
1373 : : double ntuples;
1374 : : double nseqpages;
1375 : : double spc_random_page_cost;
1376 : : double spc_seq_page_cost;
97 rhaas@postgresql.org 1377 :GNC 1704 : uint64 enable_mask = PGS_TIDSCAN;
1378 : :
1379 : : /* Should only be applied to base relations */
1893 drowley@postgresql.o 1380 [ - + ]:CBC 1704 : Assert(baserel->relid > 0);
1381 [ - + ]: 1704 : Assert(baserel->rtekind == RTE_RELATION);
1382 : :
1383 : : /* Mark the path with the correct row estimate */
1384 [ - + ]: 1704 : if (param_info)
1893 drowley@postgresql.o 1385 :UBC 0 : path->rows = param_info->ppi_rows;
1386 : : else
1893 drowley@postgresql.o 1387 :CBC 1704 : path->rows = baserel->rows;
1388 : :
1389 : : /* Count how many tuples and pages we expect to scan */
1390 : 1704 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1391 : : JOIN_INNER, NULL);
1392 : 1704 : pages = ceil(selectivity * baserel->pages);
1393 : :
1394 [ + + ]: 1704 : if (pages <= 0.0)
1395 : 35 : pages = 1.0;
1396 : :
1397 : : /*
1398 : : * The first page in a range requires a random seek, but each subsequent
1399 : : * page is just a normal sequential page read. NOTE: it's desirable for
1400 : : * TID Range Scans to cost more than the equivalent Sequential Scans,
1401 : : * because Seq Scans have some performance advantages such as scan
1402 : : * synchronization, and we'd prefer one of them to be picked unless a TID
1403 : : * Range Scan really is better.
1404 : : */
1405 : 1704 : ntuples = selectivity * baserel->tuples;
1406 : 1704 : nseqpages = pages - 1.0;
1407 : :
1408 : : /*
1409 : : * The TID qual expressions will be computed once, any other baserestrict
1410 : : * quals once per retrieved tuple.
1411 : : */
1412 : 1704 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1413 : :
1414 : : /* fetch estimated page cost for tablespace containing table */
1415 : 1704 : get_tablespace_page_costs(baserel->reltablespace,
1416 : : &spc_random_page_cost,
1417 : : &spc_seq_page_cost);
1418 : :
1419 : : /* disk costs; 1 random page and the remainder as seq pages */
159 drowley@postgresql.o 1420 :GNC 1704 : disk_run_cost = spc_random_page_cost + spc_seq_page_cost * nseqpages;
1421 : :
1422 : : /* Add scanning CPU costs */
1893 drowley@postgresql.o 1423 :CBC 1704 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1424 : :
1425 : : /*
1426 : : * XXX currently we assume TID quals are a subset of qpquals at this
1427 : : * point; they will be removed (if possible) when we create the plan, so
1428 : : * we subtract their cost from the total qpqual cost. (If the TID quals
1429 : : * can't be removed, this is a mistake and we're going to underestimate
1430 : : * the CPU cost a bit.)
1431 : : */
159 drowley@postgresql.o 1432 :GNC 1704 : startup_cost = qpqual_cost.startup + tid_qual_cost.per_tuple;
1893 drowley@postgresql.o 1433 :CBC 1704 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1434 : 1704 : tid_qual_cost.per_tuple;
159 drowley@postgresql.o 1435 :GNC 1704 : cpu_run_cost = cpu_per_tuple * ntuples;
1436 : :
1437 : : /* tlist eval costs are paid per output row, not per tuple scanned */
1893 drowley@postgresql.o 1438 :CBC 1704 : startup_cost += path->pathtarget->cost.startup;
159 drowley@postgresql.o 1439 :GNC 1704 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
1440 : :
1441 : : /* Adjust costing for parallelism, if used. */
1442 [ + + ]: 1704 : if (path->parallel_workers > 0)
1443 : : {
1444 : 40 : double parallel_divisor = get_parallel_divisor(path);
1445 : :
1446 : : /* The CPU cost is divided among all the workers. */
1447 : 40 : cpu_run_cost /= parallel_divisor;
1448 : :
1449 : : /*
1450 : : * In the case of a parallel plan, the row count needs to represent
1451 : : * the number of tuples processed per worker.
1452 : : */
1453 : 40 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1454 : : }
1455 : :
1456 : : /*
1457 : : * We should not generate this path type when PGS_TIDSCAN is unset, but we
1458 : : * might need to disable this path due to PGS_CONSIDER_NONPARTIAL.
1459 : : */
97 rhaas@postgresql.org 1460 [ - + ]: 1704 : Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0);
1461 [ + + ]: 1704 : if (path->parallel_workers == 0)
1462 : 1664 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1463 : 1704 : path->disabled_nodes =
1464 : 1704 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1893 drowley@postgresql.o 1465 :CBC 1704 : path->startup_cost = startup_cost;
159 drowley@postgresql.o 1466 :GNC 1704 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
1893 drowley@postgresql.o 1467 :CBC 1704 : }
1468 : :
1469 : : /*
1470 : : * cost_subqueryscan
1471 : : * Determines and returns the cost of scanning a subquery RTE.
1472 : : *
1473 : : * 'baserel' is the relation to be scanned
1474 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1475 : : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1476 : : */
1477 : : void
3711 tgl@sss.pgh.pa.us 1478 : 49514 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1479 : : RelOptInfo *baserel, ParamPathInfo *param_info,
1480 : : bool trivial_pathtarget)
1481 : : {
1482 : : Cost startup_cost;
1483 : : Cost run_cost;
1484 : : List *qpquals;
1485 : : QualCost qpqual_cost;
1486 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 1487 :GNC 49514 : uint64 enable_mask = 0;
1488 : :
1489 : : /* Should only be applied to base relations that are subqueries */
8331 tgl@sss.pgh.pa.us 1490 [ - + ]:CBC 49514 : Assert(baserel->relid > 0);
1491 [ - + ]: 49514 : Assert(baserel->rtekind == RTE_SUBQUERY);
1492 : :
1493 : : /*
1494 : : * We compute the rowcount estimate as the subplan's estimate times the
1495 : : * selectivity of relevant restriction clauses. In simple cases this will
1496 : : * come out the same as baserel->rows; but when dealing with parallelized
1497 : : * paths we must do it like this to get the right answer.
1498 : : */
5129 1499 [ + + ]: 49514 : if (param_info)
1462 1500 : 922 : qpquals = list_concat_copy(param_info->ppi_clauses,
1501 : 922 : baserel->baserestrictinfo);
1502 : : else
1503 : 48592 : qpquals = baserel->baserestrictinfo;
1504 : :
1505 : 49514 : path->path.rows = clamp_row_est(path->subpath->rows *
1506 : 49514 : clauselist_selectivity(root,
1507 : : qpquals,
1508 : : 0,
1509 : : JOIN_INNER,
1510 : : NULL));
1511 : :
1512 : : /*
1513 : : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1514 : : * any restriction clauses and tlist that will be attached to the
1515 : : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1516 : : * projection overhead.
1517 : : */
97 rhaas@postgresql.org 1518 [ + + ]:GNC 49514 : if (path->path.parallel_workers == 0)
1519 : 49454 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1520 : 49514 : path->path.disabled_nodes = path->subpath->disabled_nodes
1521 : 49514 : + (((baserel->pgs_mask & enable_mask) != enable_mask) ? 1 : 0);
3711 tgl@sss.pgh.pa.us 1522 :CBC 49514 : path->path.startup_cost = path->subpath->startup_cost;
1523 : 49514 : path->path.total_cost = path->subpath->total_cost;
1524 : :
1525 : : /*
1526 : : * However, if there are no relevant restriction clauses and the
1527 : : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1528 : : * the SubqueryScan plan node altogether, so we should just make its cost
1529 : : * and rowcount equal to the input path's.
1530 : : *
1531 : : * Note: there are some edge cases where createplan.c will apply a
1532 : : * different targetlist to the SubqueryScan node, thus falsifying our
1533 : : * current estimate of whether the target is trivial, and making the cost
1534 : : * estimate (though not the rowcount) wrong. It does not seem worth the
1535 : : * extra complication to try to account for that exactly, especially since
1536 : : * that behavior falsifies other cost estimates as well.
1537 : : */
1386 1538 [ + + + + ]: 49514 : if (qpquals == NIL && trivial_pathtarget)
1539 : 21871 : return;
1540 : :
5129 1541 : 27643 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1542 : :
1543 : 27643 : startup_cost = qpqual_cost.startup;
1544 : 27643 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1462 1545 : 27643 : run_cost = cpu_per_tuple * path->subpath->rows;
1546 : :
1547 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3711 1548 : 27643 : startup_cost += path->path.pathtarget->cost.startup;
1549 : 27643 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1550 : :
1551 : 27643 : path->path.startup_cost += startup_cost;
1552 : 27643 : path->path.total_cost += startup_cost + run_cost;
1553 : : }
1554 : :
1555 : : /*
1556 : : * cost_functionscan
1557 : : * Determines and returns the cost of scanning a function RTE.
1558 : : *
1559 : : * 'baserel' is the relation to be scanned
1560 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1561 : : */
1562 : : void
5019 1563 : 35764 : cost_functionscan(Path *path, PlannerInfo *root,
1564 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1565 : : {
8759 1566 : 35764 : Cost startup_cost = 0;
1567 : 35764 : Cost run_cost = 0;
1568 : : QualCost qpqual_cost;
1569 : : Cost cpu_per_tuple;
1570 : : RangeTblEntry *rte;
1571 : : QualCost exprcost;
97 rhaas@postgresql.org 1572 :GNC 35764 : uint64 enable_mask = 0;
1573 : :
1574 : : /* Should only be applied to base relations that are functions */
8487 tgl@sss.pgh.pa.us 1575 [ - + ]:CBC 35764 : Assert(baserel->relid > 0);
6954 1576 [ + - ]: 35764 : rte = planner_rt_fetch(baserel->relid, root);
7043 1577 [ - + ]: 35764 : Assert(rte->rtekind == RTE_FUNCTION);
1578 : :
1579 : : /* Mark the path with the correct row estimate */
5019 1580 [ + + ]: 35764 : if (param_info)
1581 : 4356 : path->rows = param_info->ppi_rows;
1582 : : else
1583 : 31408 : path->rows = baserel->rows;
1584 : :
1585 : : /*
1586 : : * Estimate costs of executing the function expression(s).
1587 : : *
1588 : : * Currently, nodeFunctionscan.c always executes the functions to
1589 : : * completion before returning any rows, and caches the results in a
1590 : : * tuplestore. So the function eval cost is all startup cost, and per-row
1591 : : * costs are minimal.
1592 : : *
1593 : : * XXX in principle we ought to charge tuplestore spill costs if the
1594 : : * number of rows is large. However, given how phony our rowcount
1595 : : * estimates for functions tend to be, there's not a lot of point in that
1596 : : * refinement right now.
1597 : : */
4548 1598 : 35764 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1599 : :
6079 1600 : 35764 : startup_cost += exprcost.startup + exprcost.per_tuple;
1601 : :
1602 : : /* Add scanning CPU costs */
5019 1603 : 35764 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1604 : :
1605 : 35764 : startup_cost += qpqual_cost.startup;
1606 : 35764 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
8759 1607 : 35764 : run_cost += cpu_per_tuple * baserel->tuples;
1608 : :
1609 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 1610 : 35764 : startup_cost += path->pathtarget->cost.startup;
1611 : 35764 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1612 : :
97 rhaas@postgresql.org 1613 [ + - ]:GNC 35764 : if (path->parallel_workers == 0)
1614 : 35764 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1615 : 35764 : path->disabled_nodes =
1616 : 35764 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
8759 tgl@sss.pgh.pa.us 1617 :CBC 35764 : path->startup_cost = startup_cost;
1618 : 35764 : path->total_cost = startup_cost + run_cost;
1619 : 35764 : }
1620 : :
1621 : : /*
1622 : : * cost_tablefuncscan
1623 : : * Determines and returns the cost of scanning a table function.
1624 : : *
1625 : : * 'baserel' is the relation to be scanned
1626 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1627 : : */
1628 : : void
3345 alvherre@alvh.no-ip. 1629 : 517 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1630 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1631 : : {
1632 : 517 : Cost startup_cost = 0;
1633 : 517 : Cost run_cost = 0;
1634 : : QualCost qpqual_cost;
1635 : : Cost cpu_per_tuple;
1636 : : RangeTblEntry *rte;
1637 : : QualCost exprcost;
97 rhaas@postgresql.org 1638 :GNC 517 : uint64 enable_mask = 0;
1639 : :
1640 : : /* Should only be applied to base relations that are functions */
3345 alvherre@alvh.no-ip. 1641 [ - + ]:CBC 517 : Assert(baserel->relid > 0);
1642 [ + - ]: 517 : rte = planner_rt_fetch(baserel->relid, root);
1643 [ - + ]: 517 : Assert(rte->rtekind == RTE_TABLEFUNC);
1644 : :
1645 : : /* Mark the path with the correct row estimate */
1646 [ + + ]: 517 : if (param_info)
1647 : 195 : path->rows = param_info->ppi_rows;
1648 : : else
1649 : 322 : path->rows = baserel->rows;
1650 : :
1651 : : /*
1652 : : * Estimate costs of executing the table func expression(s).
1653 : : *
1654 : : * XXX in principle we ought to charge tuplestore spill costs if the
1655 : : * number of rows is large. However, given how phony our rowcount
1656 : : * estimates for tablefuncs tend to be, there's not a lot of point in that
1657 : : * refinement right now.
1658 : : */
1659 : 517 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1660 : :
1661 : 517 : startup_cost += exprcost.startup + exprcost.per_tuple;
1662 : :
1663 : : /* Add scanning CPU costs */
1664 : 517 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1665 : :
1666 : 517 : startup_cost += qpqual_cost.startup;
1667 : 517 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1668 : 517 : run_cost += cpu_per_tuple * baserel->tuples;
1669 : :
1670 : : /* tlist eval costs are paid per output row, not per tuple scanned */
1671 : 517 : startup_cost += path->pathtarget->cost.startup;
1672 : 517 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1673 : :
97 rhaas@postgresql.org 1674 [ + - ]:GNC 517 : if (path->parallel_workers == 0)
1675 : 517 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1676 : 517 : path->disabled_nodes =
1677 : 517 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
3345 alvherre@alvh.no-ip. 1678 :CBC 517 : path->startup_cost = startup_cost;
1679 : 517 : path->total_cost = startup_cost + run_cost;
1680 : 517 : }
1681 : :
1682 : : /*
1683 : : * cost_valuesscan
1684 : : * Determines and returns the cost of scanning a VALUES RTE.
1685 : : *
1686 : : * 'baserel' is the relation to be scanned
1687 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1688 : : */
1689 : : void
5014 tgl@sss.pgh.pa.us 1690 : 6858 : cost_valuesscan(Path *path, PlannerInfo *root,
1691 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1692 : : {
7216 mail@joeconway.com 1693 : 6858 : Cost startup_cost = 0;
1694 : 6858 : Cost run_cost = 0;
1695 : : QualCost qpqual_cost;
1696 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 1697 :GNC 6858 : uint64 enable_mask = 0;
1698 : :
1699 : : /* Should only be applied to base relations that are values lists */
7216 mail@joeconway.com 1700 [ - + ]:CBC 6858 : Assert(baserel->relid > 0);
1701 [ - + ]: 6858 : Assert(baserel->rtekind == RTE_VALUES);
1702 : :
1703 : : /* Mark the path with the correct row estimate */
5014 tgl@sss.pgh.pa.us 1704 [ + + ]: 6858 : if (param_info)
1705 : 55 : path->rows = param_info->ppi_rows;
1706 : : else
1707 : 6803 : path->rows = baserel->rows;
1708 : :
1709 : : /*
1710 : : * For now, estimate list evaluation cost at one operator eval per list
1711 : : * (probably pretty bogus, but is it worth being smarter?)
1712 : : */
7216 mail@joeconway.com 1713 : 6858 : cpu_per_tuple = cpu_operator_cost;
1714 : :
1715 : : /* Add scanning CPU costs */
5014 tgl@sss.pgh.pa.us 1716 : 6858 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1717 : :
1718 : 6858 : startup_cost += qpqual_cost.startup;
1719 : 6858 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
7216 mail@joeconway.com 1720 : 6858 : run_cost += cpu_per_tuple * baserel->tuples;
1721 : :
1722 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 tgl@sss.pgh.pa.us 1723 : 6858 : startup_cost += path->pathtarget->cost.startup;
1724 : 6858 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1725 : :
97 rhaas@postgresql.org 1726 [ + - ]:GNC 6858 : if (path->parallel_workers == 0)
1727 : 6858 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1728 : 6858 : path->disabled_nodes =
1729 : 6858 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
7216 mail@joeconway.com 1730 :CBC 6858 : path->startup_cost = startup_cost;
1731 : 6858 : path->total_cost = startup_cost + run_cost;
1732 : 6858 : }
1733 : :
1734 : : /*
1735 : : * cost_ctescan
1736 : : * Determines and returns the cost of scanning a CTE RTE.
1737 : : *
1738 : : * Note: this is used for both self-reference and regular CTEs; the
1739 : : * possible cost differences are below the threshold of what we could
1740 : : * estimate accurately anyway. Note that the costs of evaluating the
1741 : : * referenced CTE query are added into the final plan as initplan costs,
1742 : : * and should NOT be counted here.
1743 : : */
1744 : : void
5000 tgl@sss.pgh.pa.us 1745 : 3607 : cost_ctescan(Path *path, PlannerInfo *root,
1746 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1747 : : {
6422 1748 : 3607 : Cost startup_cost = 0;
1749 : 3607 : Cost run_cost = 0;
1750 : : QualCost qpqual_cost;
1751 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 1752 :GNC 3607 : uint64 enable_mask = 0;
1753 : :
1754 : : /* Should only be applied to base relations that are CTEs */
6422 tgl@sss.pgh.pa.us 1755 [ - + ]:CBC 3607 : Assert(baserel->relid > 0);
1756 [ - + ]: 3607 : Assert(baserel->rtekind == RTE_CTE);
1757 : :
1758 : : /* Mark the path with the correct row estimate */
5000 1759 [ - + ]: 3607 : if (param_info)
5000 tgl@sss.pgh.pa.us 1760 :UBC 0 : path->rows = param_info->ppi_rows;
1761 : : else
5000 tgl@sss.pgh.pa.us 1762 :CBC 3607 : path->rows = baserel->rows;
1763 : :
1764 : : /* Charge one CPU tuple cost per row for tuplestore manipulation */
6422 1765 : 3607 : cpu_per_tuple = cpu_tuple_cost;
1766 : :
1767 : : /* Add scanning CPU costs */
5000 1768 : 3607 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1769 : :
1770 : 3607 : startup_cost += qpqual_cost.startup;
1771 : 3607 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
6422 1772 : 3607 : run_cost += cpu_per_tuple * baserel->tuples;
1773 : :
1774 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 1775 : 3607 : startup_cost += path->pathtarget->cost.startup;
1776 : 3607 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1777 : :
97 rhaas@postgresql.org 1778 [ + - ]:GNC 3607 : if (path->parallel_workers == 0)
1779 : 3607 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1780 : 3607 : path->disabled_nodes =
1781 : 3607 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
6422 tgl@sss.pgh.pa.us 1782 :CBC 3607 : path->startup_cost = startup_cost;
1783 : 3607 : path->total_cost = startup_cost + run_cost;
1784 : 3607 : }
1785 : :
1786 : : /*
1787 : : * cost_namedtuplestorescan
1788 : : * Determines and returns the cost of scanning a named tuplestore.
1789 : : */
1790 : : void
3322 kgrittn@postgresql.o 1791 : 438 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1792 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1793 : : {
1794 : 438 : Cost startup_cost = 0;
1795 : 438 : Cost run_cost = 0;
1796 : : QualCost qpqual_cost;
1797 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 1798 :GNC 438 : uint64 enable_mask = 0;
1799 : :
1800 : : /* Should only be applied to base relations that are Tuplestores */
3322 kgrittn@postgresql.o 1801 [ - + ]:CBC 438 : Assert(baserel->relid > 0);
1802 [ - + ]: 438 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1803 : :
1804 : : /* Mark the path with the correct row estimate */
1805 [ - + ]: 438 : if (param_info)
3322 kgrittn@postgresql.o 1806 :UBC 0 : path->rows = param_info->ppi_rows;
1807 : : else
3322 kgrittn@postgresql.o 1808 :CBC 438 : path->rows = baserel->rows;
1809 : :
1810 : : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1811 : 438 : cpu_per_tuple = cpu_tuple_cost;
1812 : :
1813 : : /* Add scanning CPU costs */
1814 : 438 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1815 : :
1816 : 438 : startup_cost += qpqual_cost.startup;
1817 : 438 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1818 : 438 : run_cost += cpu_per_tuple * baserel->tuples;
1819 : :
97 rhaas@postgresql.org 1820 [ + - ]:GNC 438 : if (path->parallel_workers == 0)
1821 : 438 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1822 : 438 : path->disabled_nodes =
1823 : 438 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
3322 kgrittn@postgresql.o 1824 :CBC 438 : path->startup_cost = startup_cost;
1825 : 438 : path->total_cost = startup_cost + run_cost;
1826 : 438 : }
1827 : :
1828 : : /*
1829 : : * cost_resultscan
1830 : : * Determines and returns the cost of scanning an RTE_RESULT relation.
1831 : : */
1832 : : void
2654 tgl@sss.pgh.pa.us 1833 : 3681 : cost_resultscan(Path *path, PlannerInfo *root,
1834 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1835 : : {
1836 : 3681 : Cost startup_cost = 0;
1837 : 3681 : Cost run_cost = 0;
1838 : : QualCost qpqual_cost;
1839 : : Cost cpu_per_tuple;
97 rhaas@postgresql.org 1840 :GNC 3681 : uint64 enable_mask = 0;
1841 : :
1842 : : /* Should only be applied to RTE_RESULT base relations */
2654 tgl@sss.pgh.pa.us 1843 [ - + ]:CBC 3681 : Assert(baserel->relid > 0);
1844 [ - + ]: 3681 : Assert(baserel->rtekind == RTE_RESULT);
1845 : :
1846 : : /* Mark the path with the correct row estimate */
1847 [ + + ]: 3681 : if (param_info)
1848 : 165 : path->rows = param_info->ppi_rows;
1849 : : else
1850 : 3516 : path->rows = baserel->rows;
1851 : :
1852 : : /* We charge qual cost plus cpu_tuple_cost */
1853 : 3681 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1854 : :
1855 : 3681 : startup_cost += qpqual_cost.startup;
1856 : 3681 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1857 : 3681 : run_cost += cpu_per_tuple * baserel->tuples;
1858 : :
97 rhaas@postgresql.org 1859 [ + - ]:GNC 3681 : if (path->parallel_workers == 0)
1860 : 3681 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1861 : 3681 : path->disabled_nodes =
1862 : 3681 : (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
2654 tgl@sss.pgh.pa.us 1863 :CBC 3681 : path->startup_cost = startup_cost;
1864 : 3681 : path->total_cost = startup_cost + run_cost;
1865 : 3681 : }
1866 : :
1867 : : /*
1868 : : * cost_recursive_union
1869 : : * Determines and returns the cost of performing a recursive union,
1870 : : * and also the estimated output size.
1871 : : *
1872 : : * We are given Paths for the nonrecursive and recursive terms.
1873 : : */
1874 : : void
3711 1875 : 690 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1876 : : {
1877 : : Cost startup_cost;
1878 : : Cost total_cost;
1879 : : double total_rows;
97 rhaas@postgresql.org 1880 :GNC 690 : uint64 enable_mask = 0;
1881 : :
1882 : : /* We probably have decent estimates for the non-recursive term */
6422 tgl@sss.pgh.pa.us 1883 :CBC 690 : startup_cost = nrterm->startup_cost;
1884 : 690 : total_cost = nrterm->total_cost;
3711 1885 : 690 : total_rows = nrterm->rows;
1886 : :
1887 : : /*
1888 : : * We arbitrarily assume that about 10 recursive iterations will be
1889 : : * needed, and that we've managed to get a good fix on the cost and output
1890 : : * size of each one of them. These are mighty shaky assumptions but it's
1891 : : * hard to see how to do better.
1892 : : */
6422 1893 : 690 : total_cost += 10 * rterm->total_cost;
3711 1894 : 690 : total_rows += 10 * rterm->rows;
1895 : :
1896 : : /*
1897 : : * Also charge cpu_tuple_cost per row to account for the costs of
1898 : : * manipulating the tuplestores. (We don't worry about possible
1899 : : * spill-to-disk costs.)
1900 : : */
6422 1901 : 690 : total_cost += cpu_tuple_cost * total_rows;
1902 : :
97 rhaas@postgresql.org 1903 [ + - ]:GNC 690 : if (runion->parallel_workers == 0)
1904 : 690 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
1905 : 690 : runion->disabled_nodes =
1906 : 690 : (runion->parent->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
6422 tgl@sss.pgh.pa.us 1907 :CBC 690 : runion->startup_cost = startup_cost;
1908 : 690 : runion->total_cost = total_cost;
3711 1909 : 690 : runion->rows = total_rows;
1910 : 690 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1911 : : rterm->pathtarget->width);
6422 1912 : 690 : }
1913 : :
1914 : : /*
1915 : : * cost_tuplesort
1916 : : * Determines and returns the cost of sorting a relation using tuplesort,
1917 : : * not including the cost of reading the input data.
1918 : : *
1919 : : * If the total volume of data to sort is less than sort_mem, we will do
1920 : : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1921 : : * comparisons for t tuples.
1922 : : *
1923 : : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1924 : : * algorithm. There will still be about t*log2(t) tuple comparisons in
1925 : : * total, but we will also need to write and read each tuple once per
1926 : : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1927 : : * number of initial runs formed and M is the merge order used by tuplesort.c.
1928 : : * Since the average initial run should be about sort_mem, we have
1929 : : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1930 : : * cpu = comparison_cost * t * log2(t)
1931 : : *
1932 : : * If the sort is bounded (i.e., only the first k result tuples are needed)
1933 : : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1934 : : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1935 : : *
1936 : : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1937 : : * accesses (XXX can't we refine that guess?)
1938 : : *
1939 : : * By default, we charge two operator evals per tuple comparison, which should
1940 : : * be in the right ballpark in most cases. The caller can tweak this by
1941 : : * specifying nonzero comparison_cost; typically that's used for any extra
1942 : : * work that has to be done to prepare the inputs to the comparison operators.
1943 : : *
1944 : : * 'tuples' is the number of tuples in the relation
1945 : : * 'width' is the average tuple width in bytes
1946 : : * 'comparison_cost' is the extra cost per comparison, if any
1947 : : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1948 : : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1949 : : */
1950 : : static void
1310 1951 : 1592809 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1952 : : double tuples, int width,
1953 : : Cost comparison_cost, int sort_mem,
1954 : : double limit_tuples)
1955 : : {
6941 1956 : 1592809 : double input_bytes = relation_byte_size(tuples, width);
1957 : : double output_bytes;
1958 : : double output_tuples;
459 1959 : 1592809 : int64 sort_mem_bytes = sort_mem * (int64) 1024;
1960 : :
1961 : : /*
1962 : : * We want to be sure the cost of a sort is never estimated as zero, even
1963 : : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1964 : : */
9613 1965 [ + + ]: 1592809 : if (tuples < 2.0)
1966 : 427441 : tuples = 2.0;
1967 : :
1968 : : /* Include the default cost-per-comparison */
1310 1969 : 1592809 : comparison_cost += 2.0 * cpu_operator_cost;
1970 : :
1971 : : /* Do we have a useful LIMIT? */
6941 1972 [ + + + + ]: 1592809 : if (limit_tuples > 0 && limit_tuples < tuples)
1973 : : {
1974 : 1314 : output_tuples = limit_tuples;
1975 : 1314 : output_bytes = relation_byte_size(output_tuples, width);
1976 : : }
1977 : : else
1978 : : {
1979 : 1591495 : output_tuples = tuples;
1980 : 1591495 : output_bytes = input_bytes;
1981 : : }
1982 : :
5689 1983 [ + + ]: 1592809 : if (output_bytes > sort_mem_bytes)
1984 : : {
1985 : : /*
1986 : : * We'll have to use a disk-based sort of all the tuples
1987 : : */
6941 1988 : 12615 : double npages = ceil(input_bytes / BLCKSZ);
3679 rhaas@postgresql.org 1989 : 12615 : double nruns = input_bytes / sort_mem_bytes;
5689 tgl@sss.pgh.pa.us 1990 : 12615 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1991 : : double log_runs;
1992 : : double npageaccesses;
1993 : :
1994 : : /*
1995 : : * CPU costs
1996 : : *
1997 : : * Assume about N log2 N comparisons
1998 : : */
1310 1999 : 12615 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
2000 : :
2001 : : /* Disk costs */
2002 : :
2003 : : /* Compute logM(r) as log(r) / log(M) */
7380 2004 [ + + ]: 12615 : if (nruns > mergeorder)
2005 : 3435 : log_runs = ceil(log(nruns) / log(mergeorder));
2006 : : else
9613 2007 : 9180 : log_runs = 1.0;
9576 2008 : 12615 : npageaccesses = 2.0 * npages * log_runs;
2009 : : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
2220 tomas.vondra@postgre 2010 : 12615 : *startup_cost += npageaccesses *
7274 tgl@sss.pgh.pa.us 2011 : 12615 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
2012 : : }
5689 2013 [ + + - + ]: 1580194 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
2014 : : {
2015 : : /*
2016 : : * We'll use a bounded heap-sort keeping just K tuples in memory, for
2017 : : * a total number of tuple comparisons of N log2 K; but the constant
2018 : : * factor is a bit higher than for quicksort. Tweak it so that the
2019 : : * cost curve is continuous at the crossover point.
2020 : : */
1310 2021 : 877 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
2022 : : }
2023 : : else
2024 : : {
2025 : : /* We'll use plain quicksort on all the input tuples */
2026 : 1579317 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
2027 : : }
2028 : :
2029 : : /*
2030 : : * Also charge a small amount (arbitrarily set equal to operator cost) per
2031 : : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
2032 : : * doesn't do qual-checking or projection, so it has less overhead than
2033 : : * most plan nodes. Note it's correct to use tuples not output_tuples
2034 : : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
2035 : : * counting the LIMIT otherwise.
2036 : : */
2220 tomas.vondra@postgre 2037 : 1592809 : *run_cost = cpu_operator_cost * tuples;
2038 : 1592809 : }
2039 : :
2040 : : /*
2041 : : * cost_incremental_sort
2042 : : * Determines and returns the cost of sorting a relation incrementally, when
2043 : : * the input path is presorted by a prefix of the pathkeys.
2044 : : *
2045 : : * 'presorted_keys' is the number of leading pathkeys by which the input path
2046 : : * is sorted.
2047 : : *
2048 : : * We estimate the number of groups into which the relation is divided by the
2049 : : * leading pathkeys, and then calculate the cost of sorting a single group
2050 : : * with tuplesort using cost_tuplesort().
2051 : : */
2052 : : void
2053 : 10353 : cost_incremental_sort(Path *path,
2054 : : PlannerInfo *root, List *pathkeys, int presorted_keys,
2055 : : int input_disabled_nodes,
2056 : : Cost input_startup_cost, Cost input_total_cost,
2057 : : double input_tuples, int width, Cost comparison_cost, int sort_mem,
2058 : : double limit_tuples)
2059 : : {
2060 : : Cost startup_cost,
2061 : : run_cost,
2062 : 10353 : input_run_cost = input_total_cost - input_startup_cost;
2063 : : double group_tuples,
2064 : : input_groups;
2065 : : Cost group_startup_cost,
2066 : : group_run_cost,
2067 : : group_input_run_cost;
2068 : 10353 : List *presortedExprs = NIL;
2069 : : ListCell *l;
2203 2070 : 10353 : bool unknown_varno = false;
2071 : :
1236 drowley@postgresql.o 2072 [ + - - + ]: 10353 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2073 : :
2074 : : /*
2075 : : * We want to be sure the cost of a sort is never estimated as zero, even
2076 : : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2077 : : */
2220 tomas.vondra@postgre 2078 [ + + ]: 10353 : if (input_tuples < 2.0)
2079 : 5350 : input_tuples = 2.0;
2080 : :
2081 : : /* Default estimate of number of groups, capped to one group per row. */
2203 2082 [ + + ]: 10353 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2083 : :
2084 : : /*
2085 : : * Extract presorted keys as list of expressions.
2086 : : *
2087 : : * We need to be careful about Vars containing "varno 0" which might have
2088 : : * been introduced by generate_append_tlist, which would confuse
2089 : : * estimate_num_groups (in fact it'd fail for such expressions). See
2090 : : * recurse_set_operations which has to deal with the same issue.
2091 : : *
2092 : : * Unlike recurse_set_operations we can't access the original target list
2093 : : * here, and even if we could it's not very clear how useful would that be
2094 : : * for a set operation combining multiple tables. So we simply detect if
2095 : : * there are any expressions with "varno 0" and use the default
2096 : : * DEFAULT_NUM_DISTINCT in that case.
2097 : : *
2098 : : * We might also use either 1.0 (a single group) or input_tuples (each row
2099 : : * being a separate group), pretty much the worst and best case for
2100 : : * incremental sort. But those are extreme cases and using something in
2101 : : * between seems reasonable. Furthermore, generate_append_tlist is used
2102 : : * for set operations, which are likely to produce mostly unique output
2103 : : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2104 : : * while maintaining lower startup cost.
2105 : : */
2220 2106 [ + - + - : 10708 : foreach(l, pathkeys)
+ - ]
2107 : : {
2108 : 10708 : PathKey *key = (PathKey *) lfirst(l);
2109 : 10708 : EquivalenceMember *member = (EquivalenceMember *)
1082 tgl@sss.pgh.pa.us 2110 : 10708 : linitial(key->pk_eclass->ec_members);
2111 : :
2112 : : /*
2113 : : * Check if the expression contains Var with "varno 0" so that we
2114 : : * don't call estimate_num_groups in that case.
2115 : : */
1930 2116 [ + + ]: 10708 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2117 : : {
2203 tomas.vondra@postgre 2118 : 7 : unknown_varno = true;
2119 : 7 : break;
2120 : : }
2121 : :
2122 : : /* expression not containing any Vars with "varno 0" */
2220 2123 : 10701 : presortedExprs = lappend(presortedExprs, member->em_expr);
2124 : :
1236 drowley@postgresql.o 2125 [ + + ]: 10701 : if (foreach_current_index(l) + 1 >= presorted_keys)
2220 tomas.vondra@postgre 2126 : 10346 : break;
2127 : : }
2128 : :
2129 : : /* Estimate the number of groups with equal presorted keys. */
2203 2130 [ + + ]: 10353 : if (!unknown_varno)
1862 drowley@postgresql.o 2131 : 10346 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2132 : : NULL, NULL);
2133 : :
2220 tomas.vondra@postgre 2134 : 10353 : group_tuples = input_tuples / input_groups;
2135 : 10353 : group_input_run_cost = input_run_cost / input_groups;
2136 : :
2137 : : /*
2138 : : * Estimate the average cost of sorting of one group where presorted keys
2139 : : * are equal.
2140 : : */
1310 tgl@sss.pgh.pa.us 2141 : 10353 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2142 : : group_tuples, width, comparison_cost, sort_mem,
2143 : : limit_tuples);
2144 : :
2145 : : /*
2146 : : * Startup cost of incremental sort is the startup cost of its first group
2147 : : * plus the cost of its input.
2148 : : */
1236 drowley@postgresql.o 2149 : 10353 : startup_cost = group_startup_cost + input_startup_cost +
2150 : : group_input_run_cost;
2151 : :
2152 : : /*
2153 : : * After we started producing tuples from the first group, the cost of
2154 : : * producing all the tuples is given by the cost to finish processing this
2155 : : * group, plus the total cost to process the remaining groups, plus the
2156 : : * remaining cost of input.
2157 : : */
2158 : 10353 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2159 : 10353 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2160 : :
2161 : : /*
2162 : : * Incremental sort adds some overhead by itself. Firstly, it has to
2163 : : * detect the sort groups. This is roughly equal to one extra copy and
2164 : : * comparison per tuple.
2165 : : */
2220 tomas.vondra@postgre 2166 : 10353 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2167 : :
2168 : : /*
2169 : : * Additionally, we charge double cpu_tuple_cost for each input group to
2170 : : * account for the tuplesort_reset that's performed after each group.
2171 : : */
2172 : 10353 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2173 : :
2174 : 10353 : path->rows = input_tuples;
2175 : :
2176 : : /*
2177 : : * We should not generate these paths when enable_incremental_sort=false.
2178 : : * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2179 : : * it will have already affected the input path.
2180 : : */
622 rhaas@postgresql.org 2181 [ - + ]: 10353 : Assert(enable_incremental_sort);
2182 : 10353 : path->disabled_nodes = input_disabled_nodes;
2183 : :
2220 tomas.vondra@postgre 2184 : 10353 : path->startup_cost = startup_cost;
2185 : 10353 : path->total_cost = startup_cost + run_cost;
2186 : 10353 : }
2187 : :
2188 : : /*
2189 : : * cost_sort
2190 : : * Determines and returns the cost of sorting a relation, including
2191 : : * the cost of reading the input data.
2192 : : *
2193 : : * NOTE: some callers currently pass NIL for pathkeys because they
2194 : : * can't conveniently supply the sort keys. Since this routine doesn't
2195 : : * currently do anything with pathkeys anyway, that doesn't matter...
2196 : : * but if it ever does, it should react gracefully to lack of key data.
2197 : : * (Actually, the thing we'd most likely be interested in is just the number
2198 : : * of sort keys, which all callers *could* supply.)
2199 : : */
2200 : : void
2201 : 1582456 : cost_sort(Path *path, PlannerInfo *root,
2202 : : List *pathkeys, int input_disabled_nodes,
2203 : : Cost input_cost, double tuples, int width,
2204 : : Cost comparison_cost, int sort_mem,
2205 : : double limit_tuples)
2206 : :
2207 : : {
2208 : : Cost startup_cost;
2209 : : Cost run_cost;
2210 : :
1310 tgl@sss.pgh.pa.us 2211 : 1582456 : cost_tuplesort(&startup_cost, &run_cost,
2212 : : tuples, width,
2213 : : comparison_cost, sort_mem,
2214 : : limit_tuples);
2215 : :
2220 tomas.vondra@postgre 2216 : 1582456 : startup_cost += input_cost;
2217 : :
2218 : : /*
2219 : : * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2220 : : * it will have already affected the input path.
2221 : : */
2222 : 1582456 : path->rows = tuples;
622 rhaas@postgresql.org 2223 : 1582456 : path->disabled_nodes = input_disabled_nodes + (enable_sort ? 0 : 1);
9576 tgl@sss.pgh.pa.us 2224 : 1582456 : path->startup_cost = startup_cost;
2225 : 1582456 : path->total_cost = startup_cost + run_cost;
10892 scrappy@hub.org 2226 : 1582456 : }
2227 : :
2228 : : /*
2229 : : * append_nonpartial_cost
2230 : : * Estimate the cost of the non-partial paths in a Parallel Append.
2231 : : * The non-partial paths are assumed to be the first "numpaths" paths
2232 : : * from the subpaths list, and to be in order of decreasing cost.
2233 : : */
2234 : : static Cost
3073 rhaas@postgresql.org 2235 : 21741 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2236 : : {
2237 : : Cost *costarr;
2238 : : int arrlen;
2239 : : ListCell *l;
2240 : : ListCell *cell;
2241 : : int path_index;
2242 : : int min_index;
2243 : : int max_index;
2244 : :
2245 [ + + ]: 21741 : if (numpaths == 0)
2246 : 17482 : return 0;
2247 : :
2248 : : /*
2249 : : * Array length is number of workers or number of relevant paths,
2250 : : * whichever is less.
2251 : : */
2252 : 4259 : arrlen = Min(parallel_workers, numpaths);
146 michael@paquier.xyz 2253 :GNC 4259 : costarr = palloc_array(Cost, arrlen);
2254 : :
2255 : : /* The first few paths will each be claimed by a different worker. */
3073 rhaas@postgresql.org 2256 :CBC 4259 : path_index = 0;
2257 [ + - + + : 12378 : foreach(cell, subpaths)
+ + ]
2258 : : {
2259 : 9294 : Path *subpath = (Path *) lfirst(cell);
2260 : :
2261 [ + + ]: 9294 : if (path_index == arrlen)
2262 : 1175 : break;
2263 : 8119 : costarr[path_index++] = subpath->total_cost;
2264 : : }
2265 : :
2266 : : /*
2267 : : * Since subpaths are sorted by decreasing cost, the last one will have
2268 : : * the minimum cost.
2269 : : */
2270 : 4259 : min_index = arrlen - 1;
2271 : :
2272 : : /*
2273 : : * For each of the remaining subpaths, add its cost to the array element
2274 : : * with minimum cost.
2275 : : */
2486 tgl@sss.pgh.pa.us 2276 [ + - + + : 8035 : for_each_cell(l, subpaths, cell)
+ + ]
2277 : : {
3073 rhaas@postgresql.org 2278 : 4231 : Path *subpath = (Path *) lfirst(l);
2279 : :
2280 : : /* Consider only the non-partial paths */
2281 [ + + ]: 4231 : if (path_index++ == numpaths)
2282 : 455 : break;
2283 : :
2284 : 3776 : costarr[min_index] += subpath->total_cost;
2285 : :
2286 : : /* Update the new min cost array index */
1350 drowley@postgresql.o 2287 : 3776 : min_index = 0;
2288 [ + + ]: 11358 : for (int i = 0; i < arrlen; i++)
2289 : : {
3073 rhaas@postgresql.org 2290 [ + + ]: 7582 : if (costarr[i] < costarr[min_index])
2291 : 1057 : min_index = i;
2292 : : }
2293 : : }
2294 : :
2295 : : /* Return the highest cost from the array */
1350 drowley@postgresql.o 2296 : 4259 : max_index = 0;
2297 [ + + ]: 12378 : for (int i = 0; i < arrlen; i++)
2298 : : {
3073 rhaas@postgresql.org 2299 [ + + ]: 8119 : if (costarr[i] > costarr[max_index])
2300 : 518 : max_index = i;
2301 : : }
2302 : :
2303 : 4259 : return costarr[max_index];
2304 : : }
2305 : :
2306 : : /*
2307 : : * cost_append
2308 : : * Determines and returns the cost of an Append node.
2309 : : */
2310 : : void
301 rguo@postgresql.org 2311 :GNC 58867 : cost_append(AppendPath *apath, PlannerInfo *root)
2312 : : {
97 rhaas@postgresql.org 2313 : 58867 : RelOptInfo *rel = apath->path.parent;
2314 : : ListCell *l;
2315 : 58867 : uint64 enable_mask = PGS_APPEND;
2316 : :
2317 [ + + ]: 58867 : if (apath->path.parallel_workers == 0)
2318 : 37086 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
2319 : :
2320 : 58867 : apath->path.disabled_nodes =
2321 : 58867 : (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
3073 rhaas@postgresql.org 2322 :CBC 58867 : apath->path.startup_cost = 0;
2323 : 58867 : apath->path.total_cost = 0;
2587 tgl@sss.pgh.pa.us 2324 : 58867 : apath->path.rows = 0;
2325 : :
3073 rhaas@postgresql.org 2326 [ + + ]: 58867 : if (apath->subpaths == NIL)
2327 : 1793 : return;
2328 : :
2329 [ + + ]: 57074 : if (!apath->path.parallel_aware)
2330 : : {
2587 tgl@sss.pgh.pa.us 2331 : 35333 : List *pathkeys = apath->path.pathkeys;
2332 : :
2333 [ + + ]: 35333 : if (pathkeys == NIL)
2334 : : {
1308 drowley@postgresql.o 2335 : 33524 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2336 : :
2337 : : /*
2338 : : * For an unordered, non-parallel-aware Append we take the startup
2339 : : * cost as the startup cost of the first subpath.
2340 : : */
2341 : 33524 : apath->path.startup_cost = firstsubpath->startup_cost;
2342 : :
2343 : : /*
2344 : : * Compute rows, number of disabled nodes, and total cost as sums
2345 : : * of underlying subplan values.
2346 : : */
2587 tgl@sss.pgh.pa.us 2347 [ + - + + : 132523 : foreach(l, apath->subpaths)
+ + ]
2348 : : {
2349 : 98999 : Path *subpath = (Path *) lfirst(l);
2350 : :
2351 : 98999 : apath->path.rows += subpath->rows;
622 rhaas@postgresql.org 2352 : 98999 : apath->path.disabled_nodes += subpath->disabled_nodes;
2587 tgl@sss.pgh.pa.us 2353 : 98999 : apath->path.total_cost += subpath->total_cost;
2354 : : }
2355 : : }
2356 : : else
2357 : : {
2358 : : /*
2359 : : * For an ordered, non-parallel-aware Append we take the startup
2360 : : * cost as the sum of the subpath startup costs. This ensures
2361 : : * that we don't underestimate the startup cost when a query's
2362 : : * LIMIT is such that several of the children have to be run to
2363 : : * satisfy it. This might be overkill --- another plausible hack
2364 : : * would be to take the Append's startup cost as the maximum of
2365 : : * the child startup costs. But we don't want to risk believing
2366 : : * that an ORDER BY LIMIT query can be satisfied at small cost
2367 : : * when the first child has small startup cost but later ones
2368 : : * don't. (If we had the ability to deal with nonlinear cost
2369 : : * interpolation for partial retrievals, we would not need to be
2370 : : * so conservative about this.)
2371 : : *
2372 : : * This case is also different from the above in that we have to
2373 : : * account for possibly injecting sorts into subpaths that aren't
2374 : : * natively ordered.
2375 : : */
2376 [ + - + + : 7041 : foreach(l, apath->subpaths)
+ + ]
2377 : : {
2378 : 5232 : Path *subpath = (Path *) lfirst(l);
2379 : : int presorted_keys;
2380 : : Path sort_path; /* dummy for result of
2381 : : * cost_sort/cost_incremental_sort */
2382 : :
301 rguo@postgresql.org 2383 [ + + ]:GNC 5232 : if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
2384 : : &presorted_keys))
2385 : : {
2386 : : /*
2387 : : * We'll need to insert a Sort node, so include costs for
2388 : : * that. We choose to use incremental sort if it is
2389 : : * enabled and there are presorted keys; otherwise we use
2390 : : * full sort.
2391 : : *
2392 : : * We can use the parent's LIMIT if any, since we
2393 : : * certainly won't pull more than that many tuples from
2394 : : * any child.
2395 : : */
2396 [ + - + + ]: 30 : if (enable_incremental_sort && presorted_keys > 0)
2397 : : {
2398 : 10 : cost_incremental_sort(&sort_path,
2399 : : root,
2400 : : pathkeys,
2401 : : presorted_keys,
2402 : : subpath->disabled_nodes,
2403 : : subpath->startup_cost,
2404 : : subpath->total_cost,
2405 : : subpath->rows,
2406 : 10 : subpath->pathtarget->width,
2407 : : 0.0,
2408 : : work_mem,
2409 : : apath->limit_tuples);
2410 : : }
2411 : : else
2412 : : {
2413 : 20 : cost_sort(&sort_path,
2414 : : root,
2415 : : pathkeys,
2416 : : subpath->disabled_nodes,
2417 : : subpath->total_cost,
2418 : : subpath->rows,
2419 : 20 : subpath->pathtarget->width,
2420 : : 0.0,
2421 : : work_mem,
2422 : : apath->limit_tuples);
2423 : : }
2424 : :
2587 tgl@sss.pgh.pa.us 2425 :CBC 30 : subpath = &sort_path;
2426 : : }
2427 : :
2428 : 5232 : apath->path.rows += subpath->rows;
622 rhaas@postgresql.org 2429 : 5232 : apath->path.disabled_nodes += subpath->disabled_nodes;
2587 tgl@sss.pgh.pa.us 2430 : 5232 : apath->path.startup_cost += subpath->startup_cost;
2431 : 5232 : apath->path.total_cost += subpath->total_cost;
2432 : : }
2433 : : }
2434 : : }
2435 : : else /* parallel-aware */
2436 : : {
3073 rhaas@postgresql.org 2437 : 21741 : int i = 0;
2438 : 21741 : double parallel_divisor = get_parallel_divisor(&apath->path);
2439 : :
2440 : : /* Parallel-aware Append never produces ordered output. */
2587 tgl@sss.pgh.pa.us 2441 [ - + ]: 21741 : Assert(apath->path.pathkeys == NIL);
2442 : :
2443 : : /* Calculate startup cost. */
3073 rhaas@postgresql.org 2444 [ + - + + : 87421 : foreach(l, apath->subpaths)
+ + ]
2445 : : {
2446 : 65680 : Path *subpath = (Path *) lfirst(l);
2447 : :
2448 : : /*
2449 : : * Append will start returning tuples when the child node having
2450 : : * lowest startup cost is done setting up. We consider only the
2451 : : * first few subplans that immediately get a worker assigned.
2452 : : */
2453 [ + + ]: 65680 : if (i == 0)
2454 : 21741 : apath->path.startup_cost = subpath->startup_cost;
2455 [ + + ]: 43939 : else if (i < apath->path.parallel_workers)
2456 [ + + ]: 21276 : apath->path.startup_cost = Min(apath->path.startup_cost,
2457 : : subpath->startup_cost);
2458 : :
2459 : : /*
2460 : : * Apply parallel divisor to subpaths. Scale the number of rows
2461 : : * for each partial subpath based on the ratio of the parallel
2462 : : * divisor originally used for the subpath to the one we adopted.
2463 : : * Also add the cost of partial paths to the total cost, but
2464 : : * ignore non-partial paths for now.
2465 : : */
2466 [ + + ]: 65680 : if (i < apath->first_partial_path)
2467 : 11895 : apath->path.rows += subpath->rows / parallel_divisor;
2468 : : else
2469 : : {
2470 : : double subpath_parallel_divisor;
2471 : :
3043 2472 : 53785 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2473 : 53785 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2474 : : parallel_divisor);
3073 2475 : 53785 : apath->path.total_cost += subpath->total_cost;
2476 : : }
2477 : :
622 2478 : 65680 : apath->path.disabled_nodes += subpath->disabled_nodes;
3043 2479 : 65680 : apath->path.rows = clamp_row_est(apath->path.rows);
2480 : :
3073 2481 : 65680 : i++;
2482 : : }
2483 : :
2484 : : /* Add cost for non-partial subpaths. */
2485 : 21741 : apath->path.total_cost +=
2486 : 21741 : append_nonpartial_cost(apath->subpaths,
2487 : : apath->first_partial_path,
2488 : : apath->path.parallel_workers);
2489 : : }
2490 : :
2491 : : /*
2492 : : * Although Append does not do any selection or projection, it's not free;
2493 : : * add a small per-tuple overhead.
2494 : : */
2995 2495 : 57074 : apath->path.total_cost +=
2496 : 57074 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2497 : : }
2498 : :
2499 : : /*
2500 : : * cost_merge_append
2501 : : * Determines and returns the cost of a MergeAppend node.
2502 : : *
2503 : : * MergeAppend merges several pre-sorted input streams, using a heap that
2504 : : * at any given instant holds the next tuple from each stream. If there
2505 : : * are N streams, we need about N*log2(N) tuple comparisons to construct
2506 : : * the heap at startup, and then for each output tuple, about log2(N)
2507 : : * comparisons to replace the top entry.
2508 : : *
2509 : : * (The effective value of N will drop once some of the input streams are
2510 : : * exhausted, but it seems unlikely to be worth trying to account for that.)
2511 : : *
2512 : : * The heap is never spilled to disk, since we assume N is not very large.
2513 : : * So this is much simpler than cost_sort.
2514 : : *
2515 : : * As in cost_sort, we charge two operator evals per tuple comparison.
2516 : : *
2517 : : * 'pathkeys' is a list of sort keys
2518 : : * 'n_streams' is the number of input streams
2519 : : * 'input_disabled_nodes' is the sum of the input streams' disabled node counts
2520 : : * 'input_startup_cost' is the sum of the input streams' startup costs
2521 : : * 'input_total_cost' is the sum of the input streams' total costs
2522 : : * 'tuples' is the number of tuples in all the streams
2523 : : */
2524 : : void
5682 tgl@sss.pgh.pa.us 2525 : 7365 : cost_merge_append(Path *path, PlannerInfo *root,
2526 : : List *pathkeys, int n_streams,
2527 : : int input_disabled_nodes,
2528 : : Cost input_startup_cost, Cost input_total_cost,
2529 : : double tuples)
2530 : : {
97 rhaas@postgresql.org 2531 :GNC 7365 : RelOptInfo *rel = path->parent;
5682 tgl@sss.pgh.pa.us 2532 :CBC 7365 : Cost startup_cost = 0;
2533 : 7365 : Cost run_cost = 0;
2534 : : Cost comparison_cost;
2535 : : double N;
2536 : : double logN;
97 rhaas@postgresql.org 2537 :GNC 7365 : uint64 enable_mask = PGS_MERGE_APPEND;
2538 : :
2539 [ + - ]: 7365 : if (path->parallel_workers == 0)
2540 : 7365 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
2541 : :
2542 : : /*
2543 : : * Avoid log(0)...
2544 : : */
5682 tgl@sss.pgh.pa.us 2545 [ + - ]:CBC 7365 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2546 : 7365 : logN = LOG2(N);
2547 : :
2548 : : /* Assumed cost per tuple comparison */
2549 : 7365 : comparison_cost = 2.0 * cpu_operator_cost;
2550 : :
2551 : : /* Heap creation cost */
2552 : 7365 : startup_cost += comparison_cost * N * logN;
2553 : :
2554 : : /* Per-tuple heap maintenance cost */
3468 2555 : 7365 : run_cost += tuples * comparison_cost * logN;
2556 : :
2557 : : /*
2558 : : * Although MergeAppend does not do any selection or projection, it's not
2559 : : * free; add a small per-tuple overhead.
2560 : : */
2995 rhaas@postgresql.org 2561 : 7365 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2562 : :
97 rhaas@postgresql.org 2563 :GNC 7365 : path->disabled_nodes =
2564 : 7365 : (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
2565 : 7365 : path->disabled_nodes += input_disabled_nodes;
5682 tgl@sss.pgh.pa.us 2566 :CBC 7365 : path->startup_cost = startup_cost + input_startup_cost;
2567 : 7365 : path->total_cost = startup_cost + run_cost + input_total_cost;
2568 : 7365 : }
2569 : :
2570 : : /*
2571 : : * cost_material
2572 : : * Determines and returns the cost of materializing a relation, including
2573 : : * the cost of reading the input data.
2574 : : *
2575 : : * If the total volume of data to materialize exceeds work_mem, we will need
2576 : : * to write it to disk, so the cost is much higher in that case.
2577 : : *
2578 : : * Note that here we are estimating the costs for the first scan of the
2579 : : * relation, so the materialization is all overhead --- any savings will
2580 : : * occur only on rescan, which is estimated in cost_rescan.
2581 : : */
2582 : : void
8557 2583 : 504029 : cost_material(Path *path,
2584 : : bool enabled, int input_disabled_nodes,
2585 : : Cost input_startup_cost, Cost input_total_cost,
2586 : : double tuples, int width)
2587 : : {
6079 2588 : 504029 : Cost startup_cost = input_startup_cost;
2589 : 504029 : Cost run_cost = input_total_cost - input_startup_cost;
8557 2590 : 504029 : double nbytes = relation_byte_size(tuples, width);
459 2591 : 504029 : double work_mem_bytes = work_mem * (Size) 1024;
2592 : :
5212 2593 : 504029 : path->rows = tuples;
2594 : :
2595 : : /*
2596 : : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2597 : : * reflect bookkeeping overhead. (This rate must be more than what
2598 : : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2599 : : * if it is exactly the same then there will be a cost tie between
2600 : : * nestloop with A outer, materialized B inner and nestloop with B outer,
2601 : : * materialized A inner. The extra cost ensures we'll prefer
2602 : : * materializing the smaller rel.) Note that this is normally a good deal
2603 : : * less than cpu_tuple_cost; which is OK because a Material plan node
2604 : : * doesn't do qual-checking or projection, so it's got less overhead than
2605 : : * most plan nodes.
2606 : : */
5919 2607 : 504029 : run_cost += 2 * cpu_operator_cost * tuples;
2608 : :
2609 : : /*
2610 : : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2611 : : * This cost is assumed to be evenly spread through the plan run phase,
2612 : : * which isn't exactly accurate but our cost model doesn't allow for
2613 : : * nonuniform costs within the run phase.
2614 : : */
8127 2615 [ + + ]: 504029 : if (nbytes > work_mem_bytes)
2616 : : {
8557 2617 : 3687 : double npages = ceil(nbytes / BLCKSZ);
2618 : :
7274 2619 : 3687 : run_cost += seq_page_cost * npages;
2620 : : }
2621 : :
97 rhaas@postgresql.org 2622 :GNC 504029 : path->disabled_nodes = input_disabled_nodes + (enabled ? 0 : 1);
8557 tgl@sss.pgh.pa.us 2623 :CBC 504029 : path->startup_cost = startup_cost;
2624 : 504029 : path->total_cost = startup_cost + run_cost;
2625 : 504029 : }
2626 : :
2627 : : /*
2628 : : * cost_memoize_rescan
2629 : : * Determines the estimated cost of rescanning a Memoize node.
2630 : : *
2631 : : * In order to estimate this, we must gain knowledge of how often we expect to
2632 : : * be called and how many distinct sets of parameters we are likely to be
2633 : : * called with. If we expect a good cache hit ratio, then we can set our
2634 : : * costs to account for that hit ratio, plus a little bit of cost for the
2635 : : * caching itself. Caching will not work out well if we expect to be called
2636 : : * with too many distinct parameter values. The worst-case here is that we
2637 : : * never see any parameter value twice, in which case we'd never get a cache
2638 : : * hit and caching would be a complete waste of effort.
2639 : : */
2640 : : static void
1756 drowley@postgresql.o 2641 : 203115 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2642 : : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2643 : : {
2644 : : EstimationInfo estinfo;
2645 : : ListCell *lc;
2646 : 203115 : Cost input_startup_cost = mpath->subpath->startup_cost;
2647 : 203115 : Cost input_total_cost = mpath->subpath->total_cost;
2648 : 203115 : double tuples = mpath->subpath->rows;
280 drowley@postgresql.o 2649 :GNC 203115 : Cardinality est_calls = mpath->est_calls;
1756 drowley@postgresql.o 2650 :CBC 203115 : int width = mpath->subpath->pathtarget->width;
2651 : :
2652 : : double hash_mem_bytes;
2653 : : double est_entry_bytes;
2654 : : Cardinality est_cache_entries;
2655 : : Cardinality ndistinct;
2656 : : double evict_ratio;
2657 : : double hit_ratio;
2658 : : Cost startup_cost;
2659 : : Cost total_cost;
2660 : :
2661 : : /* available cache space */
1745 tgl@sss.pgh.pa.us 2662 : 203115 : hash_mem_bytes = get_hash_memory_limit();
2663 : :
2664 : : /*
2665 : : * Set the number of bytes each cache entry should consume in the cache.
2666 : : * To provide us with better estimations on how many cache entries we can
2667 : : * store at once, we make a call to the executor here to ask it what
2668 : : * memory overheads there are for a single cache entry.
2669 : : */
1859 drowley@postgresql.o 2670 : 203115 : est_entry_bytes = relation_byte_size(tuples, width) +
2671 : 203115 : ExecEstimateCacheEntryOverheadBytes(tuples);
2672 : :
2673 : : /* include the estimated width for the cache keys */
1142 2674 [ + - + + : 432827 : foreach(lc, mpath->param_exprs)
+ + ]
2675 : 229712 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2676 : :
2677 : : /* estimate on the upper limit of cache entries we can hold at once */
1859 2678 : 203115 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2679 : :
2680 : : /* estimate on the distinct number of parameter values */
280 drowley@postgresql.o 2681 :GNC 203115 : ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
2682 : : &estinfo);
2683 : :
2684 : : /*
2685 : : * When the estimation fell back on using a default value, it's a bit too
2686 : : * risky to assume that it's ok to use a Memoize node. The use of a
2687 : : * default could cause us to use a Memoize node when it's really
2688 : : * inappropriate to do so. If we see that this has been done, then we'll
2689 : : * assume that every call will have unique parameters, which will almost
2690 : : * certainly mean a MemoizePath will never survive add_path().
2691 : : */
1859 drowley@postgresql.o 2692 [ + + ]:CBC 203115 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
280 drowley@postgresql.o 2693 :GNC 15871 : ndistinct = est_calls;
2694 : :
2695 : : /* Remember the ndistinct estimate for EXPLAIN */
2696 : 203115 : mpath->est_unique_keys = ndistinct;
2697 : :
2698 : : /*
2699 : : * Since we've already estimated the maximum number of entries we can
2700 : : * store at once and know the estimated number of distinct values we'll be
2701 : : * called with, we'll take this opportunity to set the path's est_entries.
2702 : : * This will ultimately determine the hash table size that the executor
2703 : : * will use. If we leave this at zero, the executor will just choose the
2704 : : * size itself. Really this is not the right place to do this, but it's
2705 : : * convenient since everything is already calculated.
2706 : : */
1756 drowley@postgresql.o 2707 [ + + + - :CBC 203115 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
+ + ]
2708 : : PG_UINT32_MAX);
2709 : :
2710 : : /*
2711 : : * When the number of distinct parameter values is above the amount we can
2712 : : * store in the cache, then we'll have to evict some entries from the
2713 : : * cache. This is not free. Here we estimate how often we'll incur the
2714 : : * cost of that eviction.
2715 : : */
1859 2716 [ + + ]: 203115 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2717 : :
2718 : : /*
2719 : : * In order to estimate how costly a single scan will be, we need to
2720 : : * attempt to estimate what the cache hit ratio will be. To do that we
2721 : : * must look at how many scans are estimated in total for this node and
2722 : : * how many of those scans we expect to get a cache hit.
2723 : : */
280 drowley@postgresql.o 2724 :GNC 406230 : hit_ratio = ((est_calls - ndistinct) / est_calls) *
1140 drowley@postgresql.o 2725 [ + + ]:CBC 203115 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2726 : :
2727 : : /* Remember the hit ratio estimate for EXPLAIN */
280 drowley@postgresql.o 2728 :GNC 203115 : mpath->est_hit_ratio = hit_ratio;
2729 : :
1140 drowley@postgresql.o 2730 [ + - - + ]:CBC 203115 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2731 : :
2732 : : /*
2733 : : * Set the total_cost accounting for the expected cache hit ratio. We
2734 : : * also add on a cpu_operator_cost to account for a cache lookup. This
2735 : : * will happen regardless of whether it's a cache hit or not.
2736 : : */
1859 2737 : 203115 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2738 : :
2739 : : /* Now adjust the total cost to account for cache evictions */
2740 : :
2741 : : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2742 : 203115 : total_cost += cpu_tuple_cost * evict_ratio;
2743 : :
2744 : : /*
2745 : : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2746 : : * The per-tuple eviction is really just a pfree, so charging a whole
2747 : : * cpu_operator_cost seems a little excessive.
2748 : : */
2749 : 203115 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2750 : :
2751 : : /*
2752 : : * Now adjust for storing things in the cache, since that's not free
2753 : : * either. Everything must go in the cache. We don't proportion this
2754 : : * over any ratio, just apply it once for the scan. We charge a
2755 : : * cpu_tuple_cost for the creation of the cache entry and also a
2756 : : * cpu_operator_cost for each tuple we expect to cache.
2757 : : */
2758 : 203115 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2759 : :
2760 : : /*
2761 : : * Getting the first row must be also be proportioned according to the
2762 : : * expected cache hit ratio.
2763 : : */
2764 : 203115 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2765 : :
2766 : : /*
2767 : : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2768 : : * which we'll do regardless of whether it was a cache hit or not.
2769 : : */
2770 : 203115 : startup_cost += cpu_tuple_cost;
2771 : :
2772 : 203115 : *rescan_startup_cost = startup_cost;
2773 : 203115 : *rescan_total_cost = total_cost;
2774 : 203115 : }
2775 : :
2776 : : /*
2777 : : * cost_agg
2778 : : * Determines and returns the cost of performing an Agg plan node,
2779 : : * including the cost of its input.
2780 : : *
2781 : : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2782 : : * we are using a hashed Agg node just to do grouping).
2783 : : *
2784 : : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2785 : : * are for appropriately-sorted input.
2786 : : */
2787 : : void
7639 tgl@sss.pgh.pa.us 2788 : 74481 : cost_agg(Path *path, PlannerInfo *root,
2789 : : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2790 : : int numGroupCols, double numGroups,
2791 : : List *quals,
2792 : : int disabled_nodes,
2793 : : Cost input_startup_cost, Cost input_total_cost,
2794 : : double input_tuples, double input_width)
2795 : : {
2796 : : double output_tuples;
2797 : : Cost startup_cost;
2798 : : Cost total_cost;
411 peter@eisentraut.org 2799 : 74481 : const AggClauseCosts dummy_aggcosts = {0};
2800 : :
2801 : : /* Use all-zero per-aggregate costs if NULL is passed */
5490 tgl@sss.pgh.pa.us 2802 [ + + ]: 74481 : if (aggcosts == NULL)
2803 : : {
2804 [ - + ]: 15272 : Assert(aggstrategy == AGG_HASHED);
2805 : 15272 : aggcosts = &dummy_aggcosts;
2806 : : }
2807 : :
2808 : : /*
2809 : : * The transCost.per_tuple component of aggcosts should be charged once
2810 : : * per input tuple, corresponding to the costs of evaluating the aggregate
2811 : : * transfns and their input expressions. The finalCost.per_tuple component
2812 : : * is charged once per output tuple, corresponding to the costs of
2813 : : * evaluating the finalfns. Startup costs are of course charged but once.
2814 : : *
2815 : : * If we are grouping, we charge an additional cpu_operator_cost per
2816 : : * grouping column per input tuple for grouping comparisons.
2817 : : *
2818 : : * We will produce a single output tuple if not grouping, and a tuple per
2819 : : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2820 : : *
2821 : : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2822 : : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2823 : : * input path is already sorted appropriately, AGG_SORTED should be
2824 : : * preferred (since it has no risk of memory overflow). This will happen
2825 : : * as long as the computed total costs are indeed exactly equal --- but if
2826 : : * there's roundoff error we might do the wrong thing. So be sure that
2827 : : * the computations below form the same intermediate values in the same
2828 : : * order.
2829 : : */
8566 2830 [ + + ]: 74481 : if (aggstrategy == AGG_PLAIN)
2831 : : {
2832 : 33248 : startup_cost = input_total_cost;
5490 2833 : 33248 : startup_cost += aggcosts->transCost.startup;
2834 : 33248 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2642 2835 : 33248 : startup_cost += aggcosts->finalCost.startup;
2836 : 33248 : startup_cost += aggcosts->finalCost.per_tuple;
2837 : : /* we aren't grouping */
7556 2838 : 33248 : total_cost = startup_cost + cpu_tuple_cost;
5212 2839 : 33248 : output_tuples = 1;
2840 : : }
3326 rhodiumtoad@postgres 2841 [ + + + + ]: 41233 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2842 : : {
2843 : : /* Here we are able to deliver output on-the-fly */
8566 tgl@sss.pgh.pa.us 2844 : 15356 : startup_cost = input_startup_cost;
2845 : 15356 : total_cost = input_total_cost;
3326 rhodiumtoad@postgres 2846 [ + + + + ]: 15356 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
622 rhaas@postgresql.org 2847 : 460 : ++disabled_nodes;
2848 : : /* calcs phrased this way to match HASHED case, see note above */
5490 tgl@sss.pgh.pa.us 2849 : 15356 : total_cost += aggcosts->transCost.startup;
2850 : 15356 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2851 : 15356 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2642 2852 : 15356 : total_cost += aggcosts->finalCost.startup;
2853 : 15356 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
7556 2854 : 15356 : total_cost += cpu_tuple_cost * numGroups;
5212 2855 : 15356 : output_tuples = numGroups;
2856 : : }
2857 : : else
2858 : : {
2859 : : /* must be AGG_HASHED */
8566 2860 : 25877 : startup_cost = input_total_cost;
3697 rhaas@postgresql.org 2861 [ + + ]: 25877 : if (!enable_hashagg)
622 2862 : 1569 : ++disabled_nodes;
5490 tgl@sss.pgh.pa.us 2863 : 25877 : startup_cost += aggcosts->transCost.startup;
2864 : 25877 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2865 : : /* cost of computing hash value */
2866 : 25877 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2642 2867 : 25877 : startup_cost += aggcosts->finalCost.startup;
2868 : :
8566 2869 : 25877 : total_cost = startup_cost;
2642 2870 : 25877 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2871 : : /* cost of retrieving from hash table */
7556 2872 : 25877 : total_cost += cpu_tuple_cost * numGroups;
5212 2873 : 25877 : output_tuples = numGroups;
2874 : : }
2875 : :
2876 : : /*
2877 : : * Add the disk costs of hash aggregation that spills to disk.
2878 : : *
2879 : : * Groups that go into the hash table stay in memory until finalized, so
2880 : : * spilling and reprocessing tuples doesn't incur additional invocations
2881 : : * of transCost or finalCost. Furthermore, the computed hash value is
2882 : : * stored with the spilled tuples, so we don't incur extra invocations of
2883 : : * the hash function.
2884 : : *
2885 : : * Hash Agg begins returning tuples after the first batch is complete.
2886 : : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2887 : : * accrue reads only to total_cost.
2888 : : */
2239 jdavis@postgresql.or 2889 [ + + + + ]: 74481 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2890 : : {
2891 : : double pages;
2182 tgl@sss.pgh.pa.us 2892 : 26843 : double pages_written = 0.0;
2893 : 26843 : double pages_read = 0.0;
2894 : : double spill_cost;
2895 : : double hashentrysize;
2896 : : double nbatches;
2897 : : Size mem_limit;
2898 : : uint64 ngroups_limit;
2899 : : int num_partitions;
2900 : : int depth;
2901 : :
2902 : : /*
2903 : : * Estimate number of batches based on the computed limits. If less
2904 : : * than or equal to one, all groups are expected to fit in memory;
2905 : : * otherwise we expect to spill.
2906 : : */
1988 heikki.linnakangas@i 2907 : 26843 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2908 : : input_width,
2182 tgl@sss.pgh.pa.us 2909 : 26843 : aggcosts->transitionSpace);
2239 jdavis@postgresql.or 2910 : 26843 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2911 : : &ngroups_limit, &num_partitions);
2912 : :
2182 tgl@sss.pgh.pa.us 2913 [ - + ]: 26843 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2914 : : numGroups / ngroups_limit);
2915 : :
2229 jdavis@postgresql.or 2916 [ + + ]: 26843 : nbatches = Max(ceil(nbatches), 1.0);
2917 : 26843 : num_partitions = Max(num_partitions, 2);
2918 : :
2919 : : /*
2920 : : * The number of partitions can change at different levels of
2921 : : * recursion; but for the purposes of this calculation assume it stays
2922 : : * constant.
2923 : : */
2182 tgl@sss.pgh.pa.us 2924 : 26843 : depth = ceil(log(nbatches) / log(num_partitions));
2925 : :
2926 : : /*
2927 : : * Estimate number of pages read and written. For each level of
2928 : : * recursion, a tuple must be written and then later read.
2929 : : */
2229 jdavis@postgresql.or 2930 : 26843 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2931 : 26843 : pages_written = pages_read = pages * depth;
2932 : :
2933 : : /*
2934 : : * HashAgg has somewhat worse IO behavior than Sort on typical
2935 : : * hardware/OS combinations. Account for this with a generic penalty.
2936 : : */
2066 2937 : 26843 : pages_read *= 2.0;
2938 : 26843 : pages_written *= 2.0;
2939 : :
2239 2940 : 26843 : startup_cost += pages_written * random_page_cost;
2941 : 26843 : total_cost += pages_written * random_page_cost;
2942 : 26843 : total_cost += pages_read * seq_page_cost;
2943 : :
2944 : : /* account for CPU cost of spilling a tuple and reading it back */
2066 2945 : 26843 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2946 : 26843 : startup_cost += spill_cost;
2947 : 26843 : total_cost += spill_cost;
2948 : : }
2949 : :
2950 : : /*
2951 : : * If there are quals (HAVING quals), account for their cost and
2952 : : * selectivity.
2953 : : */
3106 tgl@sss.pgh.pa.us 2954 [ + + ]: 74481 : if (quals)
2955 : : {
2956 : : QualCost qual_cost;
2957 : :
2958 : 3897 : cost_qual_eval(&qual_cost, quals, root);
2959 : 3897 : startup_cost += qual_cost.startup;
2960 : 3897 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2961 : :
2962 : 3897 : output_tuples = clamp_row_est(output_tuples *
2963 : 3897 : clauselist_selectivity(root,
2964 : : quals,
2965 : : 0,
2966 : : JOIN_INNER,
2967 : : NULL));
2968 : : }
2969 : :
5212 2970 : 74481 : path->rows = output_tuples;
622 rhaas@postgresql.org 2971 : 74481 : path->disabled_nodes = disabled_nodes;
8566 tgl@sss.pgh.pa.us 2972 : 74481 : path->startup_cost = startup_cost;
2973 : 74481 : path->total_cost = total_cost;
2974 : 74481 : }
2975 : :
2976 : : /*
2977 : : * get_windowclause_startup_tuples
2978 : : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2979 : : * subnode before we can output the first WindowAgg tuple.
2980 : : *
2981 : : * How many tuples need to be read depends on the WindowClause. For example,
2982 : : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2983 : : * subnode tuples are read and aggregated before the WindowAgg can output
2984 : : * anything. If there's a PARTITION BY, then we only need to look at tuples
2985 : : * in the first partition. Here we attempt to estimate just how many
2986 : : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2987 : : * before the first tuple can be output.
2988 : : */
2989 : : static double
1005 drowley@postgresql.o 2990 : 2539 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2991 : : double input_tuples)
2992 : : {
2993 : 2539 : int frameOptions = wc->frameOptions;
2994 : : double partition_tuples;
2995 : : double return_tuples;
2996 : : double peer_tuples;
2997 : :
2998 : : /*
2999 : : * First, figure out how many partitions there are likely to be and set
3000 : : * partition_tuples according to that estimate.
3001 : : */
3002 [ + + ]: 2539 : if (wc->partitionClause != NIL)
3003 : : {
3004 : : double num_partitions;
3005 : 605 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
3006 : 605 : root->parse->targetList);
3007 : :
3008 : 605 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
3009 : : NULL, NULL);
3010 : 605 : list_free(partexprs);
3011 : :
3012 : 605 : partition_tuples = input_tuples / num_partitions;
3013 : : }
3014 : : else
3015 : : {
3016 : : /* all tuples belong to the same partition */
3017 : 1934 : partition_tuples = input_tuples;
3018 : : }
3019 : :
3020 : : /* estimate the number of tuples in each peer group */
3021 [ + + ]: 2539 : if (wc->orderClause != NIL)
3022 : : {
3023 : : double num_groups;
3024 : : List *orderexprs;
3025 : :
3026 : 1979 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
3027 : 1979 : root->parse->targetList);
3028 : :
3029 : : /* estimate out how many peer groups there are in the partition */
3030 : 1979 : num_groups = estimate_num_groups(root, orderexprs,
3031 : : partition_tuples, NULL,
3032 : : NULL);
3033 : 1979 : list_free(orderexprs);
3034 : 1979 : peer_tuples = partition_tuples / num_groups;
3035 : : }
3036 : : else
3037 : : {
3038 : : /* no ORDER BY so only 1 tuple belongs in each peer group */
3039 : 560 : peer_tuples = 1.0;
3040 : : }
3041 : :
3042 [ + + ]: 2539 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
3043 : : {
3044 : : /* include all partition rows */
3045 : 304 : return_tuples = partition_tuples;
3046 : : }
3047 [ + + ]: 2235 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
3048 : : {
3049 [ + + ]: 1352 : if (frameOptions & FRAMEOPTION_ROWS)
3050 : : {
3051 : : /* just count the current row */
3052 : 599 : return_tuples = 1.0;
3053 : : }
3054 [ + - ]: 753 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3055 : : {
3056 : : /*
3057 : : * When in RANGE/GROUPS mode, it's more complex. If there's no
3058 : : * ORDER BY, then all rows in the partition are peers, otherwise
3059 : : * we'll need to read the first group of peers.
3060 : : */
3061 [ + + ]: 753 : if (wc->orderClause == NIL)
3062 : 318 : return_tuples = partition_tuples;
3063 : : else
3064 : 435 : return_tuples = peer_tuples;
3065 : : }
3066 : : else
3067 : : {
3068 : : /*
3069 : : * Something new we don't support yet? This needs attention.
3070 : : * We'll just return 1.0 in the meantime.
3071 : : */
1005 drowley@postgresql.o 3072 :UBC 0 : Assert(false);
3073 : : return_tuples = 1.0;
3074 : : }
3075 : : }
1005 drowley@postgresql.o 3076 [ + + ]:CBC 883 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
3077 : : {
3078 : : /*
3079 : : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
3080 : : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
3081 : : * so we'll just assume only the current row needs to be read to fetch
3082 : : * the first WindowAgg row.
3083 : : */
3084 : 90 : return_tuples = 1.0;
3085 : : }
3086 [ + - ]: 793 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
3087 : : {
3088 : 793 : Const *endOffset = (Const *) wc->endOffset;
3089 : : double end_offset_value;
3090 : :
3091 : : /* try and figure out the value specified in the endOffset. */
3092 [ + - ]: 793 : if (IsA(endOffset, Const))
3093 : : {
3094 [ - + ]: 793 : if (endOffset->constisnull)
3095 : : {
3096 : : /*
3097 : : * NULLs are not allowed, but currently, there's no code to
3098 : : * error out if there's a NULL Const. We'll only discover
3099 : : * this during execution. For now, just pretend everything is
3100 : : * fine and assume that just the first row/range/group will be
3101 : : * needed.
3102 : : */
1005 drowley@postgresql.o 3103 :UBC 0 : end_offset_value = 1.0;
3104 : : }
3105 : : else
3106 : : {
1005 drowley@postgresql.o 3107 [ + + + + ]:CBC 793 : switch (endOffset->consttype)
3108 : : {
3109 : 20 : case INT2OID:
3110 : 20 : end_offset_value =
3111 : 20 : (double) DatumGetInt16(endOffset->constvalue);
3112 : 20 : break;
3113 : 110 : case INT4OID:
3114 : 110 : end_offset_value =
3115 : 110 : (double) DatumGetInt32(endOffset->constvalue);
3116 : 110 : break;
3117 : 378 : case INT8OID:
3118 : 378 : end_offset_value =
3119 : 378 : (double) DatumGetInt64(endOffset->constvalue);
3120 : 378 : break;
3121 : 285 : default:
3122 : 285 : end_offset_value =
3123 : 285 : partition_tuples / peer_tuples *
3124 : : DEFAULT_INEQ_SEL;
3125 : 285 : break;
3126 : : }
3127 : : }
3128 : : }
3129 : : else
3130 : : {
3131 : : /*
3132 : : * When the end bound is not a Const, we'll just need to guess. We
3133 : : * just make use of DEFAULT_INEQ_SEL.
3134 : : */
1005 drowley@postgresql.o 3135 :UBC 0 : end_offset_value =
3136 : 0 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
3137 : : }
3138 : :
1005 drowley@postgresql.o 3139 [ + + ]:CBC 793 : if (frameOptions & FRAMEOPTION_ROWS)
3140 : : {
3141 : : /* include the N FOLLOWING and the current row */
3142 : 238 : return_tuples = end_offset_value + 1.0;
3143 : : }
3144 [ + - ]: 555 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3145 : : {
3146 : : /* include N FOLLOWING ranges/group and the initial range/group */
3147 : 555 : return_tuples = peer_tuples * (end_offset_value + 1.0);
3148 : : }
3149 : : else
3150 : : {
3151 : : /*
3152 : : * Something new we don't support yet? This needs attention.
3153 : : * We'll just return 1.0 in the meantime.
3154 : : */
1005 drowley@postgresql.o 3155 :UBC 0 : Assert(false);
3156 : : return_tuples = 1.0;
3157 : : }
3158 : : }
3159 : : else
3160 : : {
3161 : : /*
3162 : : * Something new we don't support yet? This needs attention. We'll
3163 : : * just return 1.0 in the meantime.
3164 : : */
3165 : 0 : Assert(false);
3166 : : return_tuples = 1.0;
3167 : : }
3168 : :
1005 drowley@postgresql.o 3169 [ + + + + ]:CBC 2539 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3170 : : {
3171 : : /*
3172 : : * Cap the return value to the estimated partition tuples and account
3173 : : * for the extra tuple WindowAgg will need to read to confirm the next
3174 : : * tuple does not belong to the same partition or peer group.
3175 : : */
3176 [ + + ]: 2153 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3177 : : }
3178 : : else
3179 : : {
3180 : : /*
3181 : : * Cap the return value so it's never higher than the expected tuples
3182 : : * in the partition.
3183 : : */
3184 [ + + ]: 386 : return_tuples = Min(return_tuples, partition_tuples);
3185 : : }
3186 : :
3187 : : /*
3188 : : * We needn't worry about any EXCLUDE options as those only exclude rows
3189 : : * from being aggregated, not from being read from the WindowAgg's
3190 : : * subnode.
3191 : : */
3192 : :
3193 : 2539 : return clamp_row_est(return_tuples);
3194 : : }
3195 : :
3196 : : /*
3197 : : * cost_windowagg
3198 : : * Determines and returns the cost of performing a WindowAgg plan node,
3199 : : * including the cost of its input.
3200 : : *
3201 : : * Input is assumed already properly sorted.
3202 : : */
3203 : : void
6337 tgl@sss.pgh.pa.us 3204 : 2539 : cost_windowagg(Path *path, PlannerInfo *root,
3205 : : List *windowFuncs, WindowClause *winclause,
3206 : : int input_disabled_nodes,
3207 : : Cost input_startup_cost, Cost input_total_cost,
3208 : : double input_tuples)
3209 : : {
3210 : : Cost startup_cost;
3211 : : Cost total_cost;
3212 : : double startup_tuples;
3213 : : int numPartCols;
3214 : : int numOrderCols;
3215 : : ListCell *lc;
3216 : :
1005 drowley@postgresql.o 3217 : 2539 : numPartCols = list_length(winclause->partitionClause);
3218 : 2539 : numOrderCols = list_length(winclause->orderClause);
3219 : :
6337 tgl@sss.pgh.pa.us 3220 : 2539 : startup_cost = input_startup_cost;
3221 : 2539 : total_cost = input_total_cost;
3222 : :
3223 : : /*
3224 : : * Window functions are assumed to cost their stated execution cost, plus
3225 : : * the cost of evaluating their input expressions, per tuple. Since they
3226 : : * may in fact evaluate their inputs at multiple rows during each cycle,
3227 : : * this could be a drastic underestimate; but without a way to know how
3228 : : * many rows the window function will fetch, it's hard to do better. In
3229 : : * any case, it's a good estimate for all the built-in window functions,
3230 : : * so we'll just do this for now.
3231 : : */
5490 3232 [ + - + + : 5811 : foreach(lc, windowFuncs)
+ + ]
3233 : : {
3312 3234 : 3272 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3235 : : Cost wfunccost;
3236 : : QualCost argcosts;
3237 : :
2642 3238 : 3272 : argcosts.startup = argcosts.per_tuple = 0;
3239 : 3272 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3240 : : &argcosts);
3241 : 3272 : startup_cost += argcosts.startup;
3242 : 3272 : wfunccost = argcosts.per_tuple;
3243 : :
3244 : : /* also add the input expressions' cost to per-input-row costs */
5490 3245 : 3272 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3246 : 3272 : startup_cost += argcosts.startup;
3247 : 3272 : wfunccost += argcosts.per_tuple;
3248 : :
3249 : : /*
3250 : : * Add the filter's cost to per-input-row costs. XXX We should reduce
3251 : : * input expression costs according to filter selectivity.
3252 : : */
4676 noah@leadboat.com 3253 : 3272 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3254 : 3272 : startup_cost += argcosts.startup;
3255 : 3272 : wfunccost += argcosts.per_tuple;
3256 : :
5490 tgl@sss.pgh.pa.us 3257 : 3272 : total_cost += wfunccost * input_tuples;
3258 : : }
3259 : :
3260 : : /*
3261 : : * We also charge cpu_operator_cost per grouping column per tuple for
3262 : : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3263 : : * overhead.
3264 : : *
3265 : : * XXX this neglects costs of spooling the data to disk when it overflows
3266 : : * work_mem. Sooner or later that should get accounted for.
3267 : : */
3268 : 2539 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
6337 3269 : 2539 : total_cost += cpu_tuple_cost * input_tuples;
3270 : :
5212 3271 : 2539 : path->rows = input_tuples;
622 rhaas@postgresql.org 3272 : 2539 : path->disabled_nodes = input_disabled_nodes;
6337 tgl@sss.pgh.pa.us 3273 : 2539 : path->startup_cost = startup_cost;
3274 : 2539 : path->total_cost = total_cost;
3275 : :
3276 : : /*
3277 : : * Also, take into account how many tuples we need to read from the
3278 : : * subnode in order to produce the first tuple from the WindowAgg. To do
3279 : : * this we proportion the run cost (total cost not including startup cost)
3280 : : * over the estimated startup tuples. We already included the startup
3281 : : * cost of the subnode, so we only need to do this when the estimated
3282 : : * startup tuples is above 1.0.
3283 : : */
1005 drowley@postgresql.o 3284 : 2539 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3285 : : input_tuples);
3286 : :
3287 [ + + ]: 2539 : if (startup_tuples > 1.0)
3288 : 2155 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3289 : 2155 : (startup_tuples - 1.0);
6337 tgl@sss.pgh.pa.us 3290 : 2539 : }
3291 : :
3292 : : /*
3293 : : * cost_group
3294 : : * Determines and returns the cost of performing a Group plan node,
3295 : : * including the cost of its input.
3296 : : *
3297 : : * Note: caller must ensure that input costs are for appropriately-sorted
3298 : : * input.
3299 : : */
3300 : : void
7639 3301 : 1033 : cost_group(Path *path, PlannerInfo *root,
3302 : : int numGroupCols, double numGroups,
3303 : : List *quals,
3304 : : int input_disabled_nodes,
3305 : : Cost input_startup_cost, Cost input_total_cost,
3306 : : double input_tuples)
3307 : : {
3308 : : double output_tuples;
3309 : : Cost startup_cost;
3310 : : Cost total_cost;
3311 : :
3106 3312 : 1033 : output_tuples = numGroups;
8566 3313 : 1033 : startup_cost = input_startup_cost;
3314 : 1033 : total_cost = input_total_cost;
3315 : :
3316 : : /*
3317 : : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3318 : : * all columns get compared at most of the tuples.
3319 : : */
3320 : 1033 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3321 : :
3322 : : /*
3323 : : * If there are quals (HAVING quals), account for their cost and
3324 : : * selectivity.
3325 : : */
3106 3326 [ - + ]: 1033 : if (quals)
3327 : : {
3328 : : QualCost qual_cost;
3329 : :
3106 tgl@sss.pgh.pa.us 3330 :UBC 0 : cost_qual_eval(&qual_cost, quals, root);
3331 : 0 : startup_cost += qual_cost.startup;
3332 : 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3333 : :
3334 : 0 : output_tuples = clamp_row_est(output_tuples *
3335 : 0 : clauselist_selectivity(root,
3336 : : quals,
3337 : : 0,
3338 : : JOIN_INNER,
3339 : : NULL));
3340 : : }
3341 : :
3106 tgl@sss.pgh.pa.us 3342 :CBC 1033 : path->rows = output_tuples;
622 rhaas@postgresql.org 3343 : 1033 : path->disabled_nodes = input_disabled_nodes;
8566 tgl@sss.pgh.pa.us 3344 : 1033 : path->startup_cost = startup_cost;
3345 : 1033 : path->total_cost = total_cost;
3346 : 1033 : }
3347 : :
3348 : : /*
3349 : : * initial_cost_nestloop
3350 : : * Preliminary estimate of the cost of a nestloop join path.
3351 : : *
3352 : : * This must quickly produce lower-bound estimates of the path's startup and
3353 : : * total costs. If we are unable to eliminate the proposed path from
3354 : : * consideration using the lower bounds, final_cost_nestloop will be called
3355 : : * to obtain the final estimates.
3356 : : *
3357 : : * The exact division of labor between this function and final_cost_nestloop
3358 : : * is private to them, and represents a tradeoff between speed of the initial
3359 : : * estimate and getting a tight lower bound. We choose to not examine the
3360 : : * join quals here, since that's by far the most expensive part of the
3361 : : * calculations. The end result is that CPU-cost considerations must be
3362 : : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3363 : : * incorporation of the inner path's run cost.
3364 : : *
3365 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3366 : : * other data to be used by final_cost_nestloop
3367 : : * 'jointype' is the type of join to be performed
3368 : : * 'outer_path' is the outer input to the join
3369 : : * 'inner_path' is the inner input to the join
3370 : : * 'extra' contains miscellaneous information about the join
3371 : : */
3372 : : void
5212 3373 : 2586039 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3374 : : JoinType jointype, uint64 enable_mask,
3375 : : Path *outer_path, Path *inner_path,
3376 : : JoinPathExtraData *extra)
3377 : : {
3378 : : int disabled_nodes;
9576 3379 : 2586039 : Cost startup_cost = 0;
3380 : 2586039 : Cost run_cost = 0;
5212 3381 : 2586039 : double outer_path_rows = outer_path->rows;
3382 : : Cost inner_rescan_start_cost;
3383 : : Cost inner_rescan_total_cost;
3384 : : Cost inner_run_cost;
3385 : : Cost inner_rescan_run_cost;
3386 : :
3387 : : /* Count up disabled nodes. */
97 rhaas@postgresql.org 3388 :GNC 2586039 : disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
622 rhaas@postgresql.org 3389 :CBC 2586039 : disabled_nodes += inner_path->disabled_nodes;
3390 : 2586039 : disabled_nodes += outer_path->disabled_nodes;
3391 : :
3392 : : /* estimate costs to rescan the inner relation */
6079 tgl@sss.pgh.pa.us 3393 : 2586039 : cost_rescan(root, inner_path,
3394 : : &inner_rescan_start_cost,
3395 : : &inner_rescan_total_cost);
3396 : :
3397 : : /* cost of source data */
3398 : :
3399 : : /*
3400 : : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3401 : : * before we can start returning tuples, so the join's startup cost is
3402 : : * their sum. We'll also pay the inner path's rescan startup cost
3403 : : * multiple times.
3404 : : */
9576 3405 : 2586039 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3406 : 2586039 : run_cost += outer_path->total_cost - outer_path->startup_cost;
6079 3407 [ + + ]: 2586039 : if (outer_path_rows > 1)
3408 : 1852797 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3409 : :
6205 3410 : 2586039 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
6079 3411 : 2586039 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3412 : :
3315 3413 [ + + + + ]: 2586039 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3414 [ + + ]: 2524386 : extra->inner_unique)
3415 : : {
3416 : : /*
3417 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3418 : : * executor will stop after the first match.
3419 : : *
3420 : : * Getting decent estimates requires inspection of the join quals,
3421 : : * which we choose to postpone to final_cost_nestloop.
3422 : : */
3423 : :
3424 : : /* Save private data for final_cost_nestloop */
3989 3425 : 1063973 : workspace->inner_run_cost = inner_run_cost;
3426 : 1063973 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3427 : : }
3428 : : else
3429 : : {
3430 : : /* Normal case; we'll scan whole input rel for each outer row */
5212 3431 : 1522066 : run_cost += inner_run_cost;
3432 [ + + ]: 1522066 : if (outer_path_rows > 1)
3433 : 1172559 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3434 : : }
3435 : :
3436 : : /* CPU costs left for later */
3437 : :
3438 : : /* Public result fields */
622 rhaas@postgresql.org 3439 : 2586039 : workspace->disabled_nodes = disabled_nodes;
5212 tgl@sss.pgh.pa.us 3440 : 2586039 : workspace->startup_cost = startup_cost;
3441 : 2586039 : workspace->total_cost = startup_cost + run_cost;
3442 : : /* Save private data for final_cost_nestloop */
3443 : 2586039 : workspace->run_cost = run_cost;
3444 : 2586039 : }
3445 : :
3446 : : /*
3447 : : * final_cost_nestloop
3448 : : * Final estimate of the cost and result size of a nestloop join path.
3449 : : *
3450 : : * 'path' is already filled in except for the rows and cost fields
3451 : : * 'workspace' is the result from initial_cost_nestloop
3452 : : * 'extra' contains miscellaneous information about the join
3453 : : */
3454 : : void
3455 : 1154562 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3456 : : JoinCostWorkspace *workspace,
3457 : : JoinPathExtraData *extra)
3458 : : {
1731 peter@eisentraut.org 3459 : 1154562 : Path *outer_path = path->jpath.outerjoinpath;
3460 : 1154562 : Path *inner_path = path->jpath.innerjoinpath;
5212 tgl@sss.pgh.pa.us 3461 : 1154562 : double outer_path_rows = outer_path->rows;
3462 : 1154562 : double inner_path_rows = inner_path->rows;
3463 : 1154562 : Cost startup_cost = workspace->startup_cost;
3464 : 1154562 : Cost run_cost = workspace->run_cost;
3465 : : Cost cpu_per_tuple;
3466 : : QualCost restrict_qual_cost;
3467 : : double ntuples;
3468 : :
3469 : : /* Set the number of disabled nodes. */
622 rhaas@postgresql.org 3470 : 1154562 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3471 : :
3472 : : /* Protect some assumptions below that rowcounts aren't zero */
2024 drowley@postgresql.o 3473 [ - + ]: 1154562 : if (outer_path_rows <= 0)
3692 tgl@sss.pgh.pa.us 3474 :UBC 0 : outer_path_rows = 1;
2024 drowley@postgresql.o 3475 [ + + ]:CBC 1154562 : if (inner_path_rows <= 0)
3692 tgl@sss.pgh.pa.us 3476 : 538 : inner_path_rows = 1;
3477 : : /* Mark the path with the correct row estimate */
1731 peter@eisentraut.org 3478 [ + + ]: 1154562 : if (path->jpath.path.param_info)
3479 : 27485 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3480 : : else
3481 : 1127077 : path->jpath.path.rows = path->jpath.path.parent->rows;
3482 : :
3483 : : /* For partial paths, scale row estimate. */
3484 [ + + ]: 1154562 : if (path->jpath.path.parallel_workers > 0)
3485 : : {
3486 : 38262 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3487 : :
3488 : 38262 : path->jpath.path.rows =
3489 : 38262 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3490 : : }
3491 : :
3492 : : /* cost of inner-relation source data (we already dealt with outer rel) */
3493 : :
3494 [ + + + + ]: 1154562 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3315 tgl@sss.pgh.pa.us 3495 [ + + ]: 1111760 : extra->inner_unique)
5212 3496 : 728558 : {
3497 : : /*
3498 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3499 : : * executor will stop after the first match.
3500 : : */
3989 3501 : 728558 : Cost inner_run_cost = workspace->inner_run_cost;
3502 : 728558 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3503 : : double outer_matched_rows;
3504 : : double outer_unmatched_rows;
3505 : : Selectivity inner_scan_frac;
3506 : :
3507 : : /*
3508 : : * For an outer-rel row that has at least one match, we can expect the
3509 : : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3510 : : * rows, if the matches are evenly distributed. Since they probably
3511 : : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3512 : : * that fraction. (If we used a larger fuzz factor, we'd have to
3513 : : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3514 : : * least 1, no such clamp is needed now.)
3515 : : */
3315 3516 : 728558 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3258 3517 : 728558 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3315 3518 : 728558 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3519 : :
3520 : : /*
3521 : : * Compute number of tuples processed (not number emitted!). First,
3522 : : * account for successfully-matched outer rows.
3523 : : */
6205 3524 : 728558 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3525 : :
3526 : : /*
3527 : : * Now we need to estimate the actual costs of scanning the inner
3528 : : * relation, which may be quite a bit less than N times inner_run_cost
3529 : : * due to early scan stops. We consider two cases. If the inner path
3530 : : * is an indexscan using all the joinquals as indexquals, then an
3531 : : * unmatched outer row results in an indexscan returning no rows,
3532 : : * which is probably quite cheap. Otherwise, the executor will have
3533 : : * to scan the whole inner rel for an unmatched row; not so cheap.
3534 : : */
5129 3535 [ + + ]: 728558 : if (has_indexed_join_quals(path))
3536 : : {
3537 : : /*
3538 : : * Successfully-matched outer rows will only require scanning
3539 : : * inner_scan_frac of the inner relation. In this case, we don't
3540 : : * need to charge the full inner_run_cost even when that's more
3541 : : * than inner_rescan_run_cost, because we can assume that none of
3542 : : * the inner scans ever scan the whole inner relation. So it's
3543 : : * okay to assume that all the inner scan executions can be
3544 : : * fractions of the full cost, even if materialization is reducing
3545 : : * the rescan cost. At this writing, it's impossible to get here
3546 : : * for a materialized inner scan, so inner_run_cost and
3547 : : * inner_rescan_run_cost will be the same anyway; but just in
3548 : : * case, use inner_run_cost for the first matched tuple and
3549 : : * inner_rescan_run_cost for additional ones.
3550 : : */
3989 3551 : 120621 : run_cost += inner_run_cost * inner_scan_frac;
3552 [ + + ]: 120621 : if (outer_matched_rows > 1)
3553 : 14496 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3554 : :
3555 : : /*
3556 : : * Add the cost of inner-scan executions for unmatched outer rows.
3557 : : * We estimate this as the same cost as returning the first tuple
3558 : : * of a nonempty scan. We consider that these are all rescans,
3559 : : * since we used inner_run_cost once already.
3560 : : */
3258 3561 : 120621 : run_cost += outer_unmatched_rows *
6079 3562 : 120621 : inner_rescan_run_cost / inner_path_rows;
3563 : :
3564 : : /*
3565 : : * We won't be evaluating any quals at all for unmatched rows, so
3566 : : * don't add them to ntuples.
3567 : : */
3568 : : }
3569 : : else
3570 : : {
3571 : : /*
3572 : : * Here, a complicating factor is that rescans may be cheaper than
3573 : : * first scans. If we never scan all the way to the end of the
3574 : : * inner rel, it might be (depending on the plan type) that we'd
3575 : : * never pay the whole inner first-scan run cost. However it is
3576 : : * difficult to estimate whether that will happen (and it could
3577 : : * not happen if there are any unmatched outer rows!), so be
3578 : : * conservative and always charge the whole first-scan cost once.
3579 : : * We consider this charge to correspond to the first unmatched
3580 : : * outer row, unless there isn't one in our estimate, in which
3581 : : * case blame it on the first matched row.
3582 : : */
3583 : :
3584 : : /* First, count all unmatched join tuples as being processed */
3258 3585 : 607937 : ntuples += outer_unmatched_rows * inner_path_rows;
3586 : :
3587 : : /* Now add the forced full scan, and decrement appropriate count */
3989 3588 : 607937 : run_cost += inner_run_cost;
3258 3589 [ + + ]: 607937 : if (outer_unmatched_rows >= 1)
3590 : 582266 : outer_unmatched_rows -= 1;
3591 : : else
3592 : 25671 : outer_matched_rows -= 1;
3593 : :
3594 : : /* Add inner run cost for additional outer tuples having matches */
3595 [ + + ]: 607937 : if (outer_matched_rows > 0)
3596 : 204040 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3597 : :
3598 : : /* Add inner run cost for additional unmatched outer tuples */
3599 [ + + ]: 607937 : if (outer_unmatched_rows > 0)
3600 : 372410 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3601 : : }
3602 : : }
3603 : : else
3604 : : {
3605 : : /* Normal-case source costs were included in preliminary estimate */
3606 : :
3607 : : /* Compute number of tuples processed (not number emitted!) */
6205 3608 : 426004 : ntuples = outer_path_rows * inner_path_rows;
3609 : : }
3610 : :
3611 : : /* CPU costs */
1731 peter@eisentraut.org 3612 : 1154562 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
8514 tgl@sss.pgh.pa.us 3613 : 1154562 : startup_cost += restrict_qual_cost.startup;
3614 : 1154562 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
9576 3615 : 1154562 : run_cost += cpu_per_tuple * ntuples;
3616 : :
3617 : : /* tlist eval costs are paid per output row, not per tuple scanned */
1731 peter@eisentraut.org 3618 : 1154562 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3619 : 1154562 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3620 : :
3621 : 1154562 : path->jpath.path.startup_cost = startup_cost;
3622 : 1154562 : path->jpath.path.total_cost = startup_cost + run_cost;
10892 scrappy@hub.org 3623 : 1154562 : }
3624 : :
3625 : : /*
3626 : : * initial_cost_mergejoin
3627 : : * Preliminary estimate of the cost of a mergejoin path.
3628 : : *
3629 : : * This must quickly produce lower-bound estimates of the path's startup and
3630 : : * total costs. If we are unable to eliminate the proposed path from
3631 : : * consideration using the lower bounds, final_cost_mergejoin will be called
3632 : : * to obtain the final estimates.
3633 : : *
3634 : : * The exact division of labor between this function and final_cost_mergejoin
3635 : : * is private to them, and represents a tradeoff between speed of the initial
3636 : : * estimate and getting a tight lower bound. We choose to not examine the
3637 : : * join quals here, except for obtaining the scan selectivity estimate which
3638 : : * is really essential (but fortunately, use of caching keeps the cost of
3639 : : * getting that down to something reasonable).
3640 : : * We also assume that cost_sort/cost_incremental_sort is cheap enough to use
3641 : : * here.
3642 : : *
3643 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3644 : : * other data to be used by final_cost_mergejoin
3645 : : * 'jointype' is the type of join to be performed
3646 : : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3647 : : * 'outer_path' is the outer input to the join
3648 : : * 'inner_path' is the inner input to the join
3649 : : * 'outersortkeys' is the list of sort keys for the outer path
3650 : : * 'innersortkeys' is the list of sort keys for the inner path
3651 : : * 'outer_presorted_keys' is the number of presorted keys of the outer path
3652 : : * 'extra' contains miscellaneous information about the join
3653 : : *
3654 : : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3655 : : * sort is needed because the respective source path is already ordered.
3656 : : */
3657 : : void
5212 tgl@sss.pgh.pa.us 3658 : 1127106 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3659 : : JoinType jointype,
3660 : : List *mergeclauses,
3661 : : Path *outer_path, Path *inner_path,
3662 : : List *outersortkeys, List *innersortkeys,
3663 : : int outer_presorted_keys,
3664 : : JoinPathExtraData *extra)
3665 : : {
3666 : : int disabled_nodes;
9576 3667 : 1127106 : Cost startup_cost = 0;
3668 : 1127106 : Cost run_cost = 0;
5212 3669 : 1127106 : double outer_path_rows = outer_path->rows;
3670 : 1127106 : double inner_path_rows = inner_path->rows;
3671 : : Cost inner_run_cost;
3672 : : double outer_rows,
3673 : : inner_rows,
3674 : : outer_skip_rows,
3675 : : inner_skip_rows;
3676 : : Selectivity outerstartsel,
3677 : : outerendsel,
3678 : : innerstartsel,
3679 : : innerendsel;
3680 : : Path sort_path; /* dummy for result of
3681 : : * cost_sort/cost_incremental_sort */
3682 : :
3683 : : /* Protect some assumptions below that rowcounts aren't zero */
2024 drowley@postgresql.o 3684 [ + + ]: 1127106 : if (outer_path_rows <= 0)
6616 tgl@sss.pgh.pa.us 3685 : 72 : outer_path_rows = 1;
2024 drowley@postgresql.o 3686 [ + + ]: 1127106 : if (inner_path_rows <= 0)
6616 tgl@sss.pgh.pa.us 3687 : 94 : inner_path_rows = 1;
3688 : :
3689 : : /*
3690 : : * A merge join will stop as soon as it exhausts either input stream
3691 : : * (unless it's an outer join, in which case the outer side has to be
3692 : : * scanned all the way anyway). Estimate fraction of the left and right
3693 : : * inputs that will actually need to be scanned. Likewise, we can
3694 : : * estimate the number of rows that will be skipped before the first join
3695 : : * pair is found, which should be factored into startup cost. We use only
3696 : : * the first (most significant) merge clause for this purpose. Since
3697 : : * mergejoinscansel() is a fairly expensive computation, we cache the
3698 : : * results in the merge clause RestrictInfo.
3699 : : */
5212 3700 [ + + + + ]: 1127106 : if (mergeclauses && jointype != JOIN_FULL)
8831 3701 : 1122141 : {
7045 3702 : 1122141 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3703 : : List *opathkeys;
3704 : : List *ipathkeys;
3705 : : PathKey *opathkey;
3706 : : PathKey *ipathkey;
3707 : : MergeScanSelCache *cache;
3708 : :
3709 : : /* Get the input pathkeys to determine the sort-order details */
3710 [ + + ]: 1122141 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3711 [ + + ]: 1122141 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3712 [ - + ]: 1122141 : Assert(opathkeys);
3713 [ - + ]: 1122141 : Assert(ipathkeys);
3714 : 1122141 : opathkey = (PathKey *) linitial(opathkeys);
3715 : 1122141 : ipathkey = (PathKey *) linitial(ipathkeys);
3716 : : /* debugging check */
3717 [ + - ]: 1122141 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
5526 3718 [ + - ]: 1122141 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
396 peter@eisentraut.org 3719 [ + - ]: 1122141 : opathkey->pk_cmptype != ipathkey->pk_cmptype ||
7045 tgl@sss.pgh.pa.us 3720 [ - + ]: 1122141 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
7045 tgl@sss.pgh.pa.us 3721 [ # # ]:UBC 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3722 : :
3723 : : /* Get the selectivity with caching */
7043 tgl@sss.pgh.pa.us 3724 :CBC 1122141 : cache = cached_scansel(root, firstclause, opathkey);
3725 : :
7045 3726 [ + + ]: 1122141 : if (bms_is_subset(firstclause->left_relids,
3727 : 1122141 : outer_path->parent->relids))
3728 : : {
3729 : : /* left side of clause is outer */
6723 3730 : 584429 : outerstartsel = cache->leftstartsel;
3731 : 584429 : outerendsel = cache->leftendsel;
3732 : 584429 : innerstartsel = cache->rightstartsel;
3733 : 584429 : innerendsel = cache->rightendsel;
3734 : : }
3735 : : else
3736 : : {
3737 : : /* left side of clause is inner */
3738 : 537712 : outerstartsel = cache->rightstartsel;
3739 : 537712 : outerendsel = cache->rightendsel;
3740 : 537712 : innerstartsel = cache->leftstartsel;
3741 : 537712 : innerendsel = cache->leftendsel;
3742 : : }
5212 3743 [ + + + + ]: 1122141 : if (jointype == JOIN_LEFT ||
3744 : : jointype == JOIN_ANTI)
3745 : : {
6723 3746 : 138908 : outerstartsel = 0.0;
3747 : 138908 : outerendsel = 1.0;
3748 : : }
1126 3749 [ + + + + ]: 983233 : else if (jointype == JOIN_RIGHT ||
3750 : : jointype == JOIN_RIGHT_ANTI)
3751 : : {
6723 3752 : 134482 : innerstartsel = 0.0;
3753 : 134482 : innerendsel = 1.0;
3754 : : }
3755 : : }
3756 : : else
3757 : : {
3758 : : /* cope with clauseless or full mergejoin */
3759 : 4965 : outerstartsel = innerstartsel = 0.0;
3760 : 4965 : outerendsel = innerendsel = 1.0;
3761 : : }
3762 : :
3763 : : /*
3764 : : * Convert selectivities to row counts. We force outer_rows and
3765 : : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3766 : : */
3767 : 1127106 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3768 : 1127106 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3769 : 1127106 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3770 : 1127106 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3771 : :
3772 [ - + ]: 1127106 : Assert(outer_skip_rows <= outer_rows);
3773 [ - + ]: 1127106 : Assert(inner_skip_rows <= inner_rows);
3774 : :
3775 : : /*
3776 : : * Readjust scan selectivities to account for above rounding. This is
3777 : : * normally an insignificant effect, but when there are only a few rows in
3778 : : * the inputs, failing to do this makes for a large percentage error.
3779 : : */
3780 : 1127106 : outerstartsel = outer_skip_rows / outer_path_rows;
3781 : 1127106 : innerstartsel = inner_skip_rows / inner_path_rows;
3782 : 1127106 : outerendsel = outer_rows / outer_path_rows;
3783 : 1127106 : innerendsel = inner_rows / inner_path_rows;
3784 : :
5240 3785 [ - + ]: 1127106 : Assert(outerstartsel <= outerendsel);
3786 [ - + ]: 1127106 : Assert(innerstartsel <= innerendsel);
3787 : :
3788 : : /*
3789 : : * We don't decide whether to materialize the inner path until we get to
3790 : : * final_cost_mergejoin(), so we don't know whether to check the pgs_mask
3791 : : * against PGS_MERGEJOIN_PLAIN or PGS_MERGEJOIN_MATERIALIZE. Instead, we
3792 : : * just account for any child nodes here and assume that this node is not
3793 : : * itself disabled; we can sort out the details in final_cost_mergejoin().
3794 : : *
3795 : : * (We could be more precise here by setting disabled_nodes to 1 at this
3796 : : * stage if both PGS_MERGEJOIN_PLAIN and PGS_MERGEJOIN_MATERIALIZE are
3797 : : * disabled, but that seems to against the idea of making this function
3798 : : * produce a quick, optimistic approximation of the final cost.)
3799 : : */
97 rhaas@postgresql.org 3800 :GNC 1127106 : disabled_nodes = 0;
3801 : :
3802 : : /* cost of source data */
3803 : :
9576 tgl@sss.pgh.pa.us 3804 [ + + ]:CBC 1127106 : if (outersortkeys) /* do we need to sort outer? */
3805 : : {
3806 : : /*
3807 : : * We can assert that the outer path is not already ordered
3808 : : * appropriately for the mergejoin; otherwise, outersortkeys would
3809 : : * have been set to NIL.
3810 : : */
362 rguo@postgresql.org 3811 [ - + ]: 575489 : Assert(!pathkeys_contained_in(outersortkeys, outer_path->pathkeys));
3812 : :
3813 : : /*
3814 : : * We choose to use incremental sort if it is enabled and there are
3815 : : * presorted keys; otherwise we use full sort.
3816 : : */
3817 [ + + + + ]: 575489 : if (enable_incremental_sort && outer_presorted_keys > 0)
3818 : : {
3819 : 2071 : cost_incremental_sort(&sort_path,
3820 : : root,
3821 : : outersortkeys,
3822 : : outer_presorted_keys,
3823 : : outer_path->disabled_nodes,
3824 : : outer_path->startup_cost,
3825 : : outer_path->total_cost,
3826 : : outer_path_rows,
3827 : 2071 : outer_path->pathtarget->width,
3828 : : 0.0,
3829 : : work_mem,
3830 : : -1.0);
3831 : : }
3832 : : else
3833 : : {
573 3834 : 573418 : cost_sort(&sort_path,
3835 : : root,
3836 : : outersortkeys,
3837 : : outer_path->disabled_nodes,
3838 : : outer_path->total_cost,
3839 : : outer_path_rows,
3840 : 573418 : outer_path->pathtarget->width,
3841 : : 0.0,
3842 : : work_mem,
3843 : : -1.0);
3844 : : }
3845 : :
622 rhaas@postgresql.org 3846 : 575489 : disabled_nodes += sort_path.disabled_nodes;
9576 tgl@sss.pgh.pa.us 3847 : 575489 : startup_cost += sort_path.startup_cost;
6723 3848 : 575489 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3849 : 575489 : * outerstartsel;
8831 3850 : 575489 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
6723 3851 : 575489 : * (outerendsel - outerstartsel);
3852 : : }
3853 : : else
3854 : : {
622 rhaas@postgresql.org 3855 : 551617 : disabled_nodes += outer_path->disabled_nodes;
9576 tgl@sss.pgh.pa.us 3856 : 551617 : startup_cost += outer_path->startup_cost;
6723 3857 : 551617 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3858 : 551617 : * outerstartsel;
8831 3859 : 551617 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
6723 3860 : 551617 : * (outerendsel - outerstartsel);
3861 : : }
3862 : :
9576 3863 [ + + ]: 1127106 : if (innersortkeys) /* do we need to sort inner? */
3864 : : {
3865 : : /*
3866 : : * We can assert that the inner path is not already ordered
3867 : : * appropriately for the mergejoin; otherwise, innersortkeys would
3868 : : * have been set to NIL.
3869 : : */
362 rguo@postgresql.org 3870 [ - + ]: 903301 : Assert(!pathkeys_contained_in(innersortkeys, inner_path->pathkeys));
3871 : :
3872 : : /*
3873 : : * We do not consider incremental sort for inner path, because
3874 : : * incremental sort does not support mark/restore.
3875 : : */
3876 : :
9576 tgl@sss.pgh.pa.us 3877 : 903301 : cost_sort(&sort_path,
3878 : : root,
3879 : : innersortkeys,
3880 : : inner_path->disabled_nodes,
3881 : : inner_path->total_cost,
3882 : : inner_path_rows,
3729 3883 : 903301 : inner_path->pathtarget->width,
3884 : : 0.0,
3885 : : work_mem,
3886 : : -1.0);
622 rhaas@postgresql.org 3887 : 903301 : disabled_nodes += sort_path.disabled_nodes;
9576 tgl@sss.pgh.pa.us 3888 : 903301 : startup_cost += sort_path.startup_cost;
6723 3889 : 903301 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
6015 3890 : 903301 : * innerstartsel;
3891 : 903301 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3892 : 903301 : * (innerendsel - innerstartsel);
3893 : : }
3894 : : else
3895 : : {
622 rhaas@postgresql.org 3896 : 223805 : disabled_nodes += inner_path->disabled_nodes;
9576 tgl@sss.pgh.pa.us 3897 : 223805 : startup_cost += inner_path->startup_cost;
6723 3898 : 223805 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
6015 3899 : 223805 : * innerstartsel;
3900 : 223805 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3901 : 223805 : * (innerendsel - innerstartsel);
3902 : : }
3903 : :
3904 : : /*
3905 : : * We can't yet determine whether rescanning occurs, or whether
3906 : : * materialization of the inner input should be done. The minimum
3907 : : * possible inner input cost, regardless of rescan and materialization
3908 : : * considerations, is inner_run_cost. We include that in
3909 : : * workspace->total_cost, but not yet in run_cost.
3910 : : */
3911 : :
3912 : : /* CPU costs left for later */
3913 : :
3914 : : /* Public result fields */
622 rhaas@postgresql.org 3915 : 1127106 : workspace->disabled_nodes = disabled_nodes;
5212 tgl@sss.pgh.pa.us 3916 : 1127106 : workspace->startup_cost = startup_cost;
3917 : 1127106 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3918 : : /* Save private data for final_cost_mergejoin */
3919 : 1127106 : workspace->run_cost = run_cost;
3920 : 1127106 : workspace->inner_run_cost = inner_run_cost;
3921 : 1127106 : workspace->outer_rows = outer_rows;
3922 : 1127106 : workspace->inner_rows = inner_rows;
3923 : 1127106 : workspace->outer_skip_rows = outer_skip_rows;
3924 : 1127106 : workspace->inner_skip_rows = inner_skip_rows;
3925 : 1127106 : }
3926 : :
3927 : : /*
3928 : : * final_cost_mergejoin
3929 : : * Final estimate of the cost and result size of a mergejoin path.
3930 : : *
3931 : : * Unlike other costsize functions, this routine makes two actual decisions:
3932 : : * whether the executor will need to do mark/restore, and whether we should
3933 : : * materialize the inner path. It would be logically cleaner to build
3934 : : * separate paths testing these alternatives, but that would require repeating
3935 : : * most of the cost calculations, which are not all that cheap. Since the
3936 : : * choice will not affect output pathkeys or startup cost, only total cost,
3937 : : * there is no possibility of wanting to keep more than one path. So it seems
3938 : : * best to make the decisions here and record them in the path's
3939 : : * skip_mark_restore and materialize_inner fields.
3940 : : *
3941 : : * Mark/restore overhead is usually required, but can be skipped if we know
3942 : : * that the executor need find only one match per outer tuple, and that the
3943 : : * mergeclauses are sufficient to identify a match.
3944 : : *
3945 : : * We materialize the inner path if we need mark/restore and either the inner
3946 : : * path can't support mark/restore, or it's cheaper to use an interposed
3947 : : * Material node to handle mark/restore.
3948 : : *
3949 : : * 'path' is already filled in except for the rows and cost fields and
3950 : : * skip_mark_restore and materialize_inner
3951 : : * 'workspace' is the result from initial_cost_mergejoin
3952 : : * 'extra' contains miscellaneous information about the join
3953 : : */
3954 : : void
3955 : 348209 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3956 : : JoinCostWorkspace *workspace,
3957 : : JoinPathExtraData *extra)
3958 : : {
3959 : 348209 : Path *outer_path = path->jpath.outerjoinpath;
3960 : 348209 : Path *inner_path = path->jpath.innerjoinpath;
3961 : 348209 : double inner_path_rows = inner_path->rows;
3962 : 348209 : List *mergeclauses = path->path_mergeclauses;
3963 : 348209 : List *innersortkeys = path->innersortkeys;
3964 : 348209 : Cost startup_cost = workspace->startup_cost;
3965 : 348209 : Cost run_cost = workspace->run_cost;
3966 : 348209 : Cost inner_run_cost = workspace->inner_run_cost;
3967 : 348209 : double outer_rows = workspace->outer_rows;
3968 : 348209 : double inner_rows = workspace->inner_rows;
3969 : 348209 : double outer_skip_rows = workspace->outer_skip_rows;
3970 : 348209 : double inner_skip_rows = workspace->inner_skip_rows;
3971 : : Cost cpu_per_tuple,
3972 : : bare_inner_cost,
3973 : : mat_inner_cost;
3974 : : QualCost merge_qual_cost;
3975 : : QualCost qp_qual_cost;
3976 : : double mergejointuples,
3977 : : rescannedtuples;
3978 : : double rescanratio;
97 rhaas@postgresql.org 3979 :GNC 348209 : uint64 enable_mask = 0;
3980 : :
3981 : : /* Protect some assumptions below that rowcounts aren't zero */
2024 drowley@postgresql.o 3982 [ + + ]:CBC 348209 : if (inner_path_rows <= 0)
5212 tgl@sss.pgh.pa.us 3983 : 64 : inner_path_rows = 1;
3984 : :
3985 : : /* Mark the path with the correct row estimate */
5129 3986 [ + + ]: 348209 : if (path->jpath.path.param_info)
3987 : 1448 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3988 : : else
3989 : 346761 : path->jpath.path.rows = path->jpath.path.parent->rows;
3990 : :
3991 : : /* For partial paths, scale row estimate. */
3399 rhaas@postgresql.org 3992 [ + + ]: 348209 : if (path->jpath.path.parallel_workers > 0)
3993 : : {
3275 bruce@momjian.us 3994 : 47229 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3995 : :
3338 rhaas@postgresql.org 3996 : 47229 : path->jpath.path.rows =
3997 : 47229 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3998 : : }
3999 : :
4000 : : /*
4001 : : * Compute cost of the mergequals and qpquals (other restriction clauses)
4002 : : * separately.
4003 : : */
5212 tgl@sss.pgh.pa.us 4004 : 348209 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
4005 : 348209 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4006 : 348209 : qp_qual_cost.startup -= merge_qual_cost.startup;
4007 : 348209 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
4008 : :
4009 : : /*
4010 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4011 : : * executor will stop scanning for matches after the first match. When
4012 : : * all the joinclauses are merge clauses, this means we don't ever need to
4013 : : * back up the merge, and so we can skip mark/restore overhead.
4014 : : */
3315 4015 [ + + ]: 348209 : if ((path->jpath.jointype == JOIN_SEMI ||
4016 [ + + ]: 343543 : path->jpath.jointype == JOIN_ANTI ||
4017 [ + + + + ]: 455571 : extra->inner_unique) &&
4018 : 119335 : (list_length(path->jpath.joinrestrictinfo) ==
4019 : 119335 : list_length(path->path_mergeclauses)))
4020 : 100623 : path->skip_mark_restore = true;
4021 : : else
4022 : 247586 : path->skip_mark_restore = false;
4023 : :
4024 : : /*
4025 : : * Get approx # tuples passing the mergequals. We use approx_tuple_count
4026 : : * here because we need an estimate done with JOIN_INNER semantics.
4027 : : */
5212 4028 : 348209 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
4029 : :
4030 : : /*
4031 : : * When there are equal merge keys in the outer relation, the mergejoin
4032 : : * must rescan any matching tuples in the inner relation. This means
4033 : : * re-fetching inner tuples; we have to estimate how often that happens.
4034 : : *
4035 : : * For regular inner and outer joins, the number of re-fetches can be
4036 : : * estimated approximately as size of merge join output minus size of
4037 : : * inner relation. Assume that the distinct key values are 1, 2, ..., and
4038 : : * denote the number of values of each key in the outer relation as m1,
4039 : : * m2, ...; in the inner relation, n1, n2, ... Then we have
4040 : : *
4041 : : * size of join = m1 * n1 + m2 * n2 + ...
4042 : : *
4043 : : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
4044 : : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
4045 : : * relation
4046 : : *
4047 : : * This equation works correctly for outer tuples having no inner match
4048 : : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
4049 : : * are effectively subtracting those from the number of rescanned tuples,
4050 : : * when we should not. Can we do better without expensive selectivity
4051 : : * computations?
4052 : : *
4053 : : * The whole issue is moot if we know we don't need to mark/restore at
4054 : : * all, or if we are working from a unique-ified outer input.
4055 : : */
259 rguo@postgresql.org 4056 [ + + ]:GNC 348209 : if (path->skip_mark_restore ||
4057 [ + + + + : 247586 : RELATION_WAS_MADE_UNIQUE(outer_path->parent, extra->sjinfo,
+ + ]
4058 : : path->jpath.jointype))
5212 tgl@sss.pgh.pa.us 4059 :CBC 103251 : rescannedtuples = 0;
4060 : : else
4061 : : {
4062 : 244958 : rescannedtuples = mergejointuples - inner_path_rows;
4063 : : /* Must clamp because of possible underestimate */
4064 [ + + ]: 244958 : if (rescannedtuples < 0)
4065 : 66706 : rescannedtuples = 0;
4066 : : }
4067 : :
4068 : : /*
4069 : : * We'll inflate various costs this much to account for rescanning. Note
4070 : : * that this is to be multiplied by something involving inner_rows, or
4071 : : * another number related to the portion of the inner rel we'll scan.
4072 : : */
2695 4073 : 348209 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
4074 : :
4075 : : /*
4076 : : * Decide whether we want to materialize the inner input to shield it from
4077 : : * mark/restore and performing re-fetches. Our cost model for regular
4078 : : * re-fetches is that a re-fetch costs the same as an original fetch,
4079 : : * which is probably an overestimate; but on the other hand we ignore the
4080 : : * bookkeeping costs of mark/restore. Not clear if it's worth developing
4081 : : * a more refined model. So we just need to inflate the inner run cost by
4082 : : * rescanratio.
4083 : : */
6015 4084 : 348209 : bare_inner_cost = inner_run_cost * rescanratio;
4085 : :
4086 : : /*
4087 : : * When we interpose a Material node the re-fetch cost is assumed to be
4088 : : * just cpu_operator_cost per tuple, independently of the underlying
4089 : : * plan's cost; and we charge an extra cpu_operator_cost per original
4090 : : * fetch as well. Note that we're assuming the materialize node will
4091 : : * never spill to disk, since it only has to remember tuples back to the
4092 : : * last mark. (If there are a huge number of duplicates, our other cost
4093 : : * factors will make the path so expensive that it probably won't get
4094 : : * chosen anyway.) So we don't use cost_rescan here.
4095 : : *
4096 : : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
4097 : : * of the generated Material node.
4098 : : */
4099 : 348209 : mat_inner_cost = inner_run_cost +
2695 4100 : 348209 : cpu_operator_cost * inner_rows * rescanratio;
4101 : :
4102 : : /*
4103 : : * If we don't need mark/restore at all, we don't need materialization.
4104 : : */
3315 4105 [ + + ]: 348209 : if (path->skip_mark_restore)
4106 : 100623 : path->materialize_inner = false;
4107 : :
4108 : : /*
4109 : : * If merge joins with materialization are enabled, then choose
4110 : : * materialization if either (a) it looks cheaper or (b) merge joins
4111 : : * without materialization are disabled.
4112 : : */
97 rhaas@postgresql.org 4113 [ + + + + ]:GNC 247586 : else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4114 : 243476 : (mat_inner_cost < bare_inner_cost ||
4115 [ + + ]: 243476 : (extra->pgs_mask & PGS_MERGEJOIN_PLAIN) == 0))
6015 tgl@sss.pgh.pa.us 4116 :CBC 2818 : path->materialize_inner = true;
4117 : :
4118 : : /*
4119 : : * Regardless of what plan shapes are enabled and what the costs seem to
4120 : : * be, we *must* materialize it if the inner path is to be used directly
4121 : : * (without sorting) and it doesn't support mark/restore. Planner failure
4122 : : * is not an option!
4123 : : *
4124 : : * Since the inner side must be ordered, and only Sorts and IndexScans can
4125 : : * create order to begin with, and they both support mark/restore, you
4126 : : * might think there's no problem --- but you'd be wrong. Nestloop and
4127 : : * merge joins can *preserve* the order of their inputs, so they can be
4128 : : * selected as the input of a mergejoin, and they don't support
4129 : : * mark/restore at present.
4130 : : */
4131 [ + + ]: 244768 : else if (innersortkeys == NIL &&
4197 rhaas@postgresql.org 4132 [ + + ]: 6592 : !ExecSupportsMarkRestore(inner_path))
6015 tgl@sss.pgh.pa.us 4133 : 1207 : path->materialize_inner = true;
4134 : :
4135 : : /*
4136 : : * Also, force materializing if the inner path is to be sorted and the
4137 : : * sort is expected to spill to disk. This is because the final merge
4138 : : * pass can be done on-the-fly if it doesn't have to support mark/restore.
4139 : : * We don't try to adjust the cost estimates for this consideration,
4140 : : * though.
4141 : : *
4142 : : * Since materialization is a performance optimization in this case,
4143 : : * rather than necessary for correctness, we skip it if materialization is
4144 : : * switched off.
4145 : : */
97 rhaas@postgresql.org 4146 [ + + + + ]:GNC 243561 : else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4147 : 236905 : innersortkeys != NIL &&
3729 tgl@sss.pgh.pa.us 4148 :CBC 236905 : relation_byte_size(inner_path_rows,
4149 : 236905 : inner_path->pathtarget->width) >
459 4150 [ + + ]: 236905 : work_mem * (Size) 1024)
6015 4151 : 164 : path->materialize_inner = true;
4152 : : else
4153 : 243397 : path->materialize_inner = false;
4154 : :
4155 : : /* Get the number of disabled nodes, not yet including this one. */
97 rhaas@postgresql.org 4156 :GNC 348209 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4157 : :
4158 : : /*
4159 : : * Charge the right incremental cost for the chosen case, and update
4160 : : * enable_mask as appropriate.
4161 : : */
6015 tgl@sss.pgh.pa.us 4162 [ + + ]:CBC 348209 : if (path->materialize_inner)
4163 : : {
4164 : 4189 : run_cost += mat_inner_cost;
97 rhaas@postgresql.org 4165 :GNC 4189 : enable_mask |= PGS_MERGEJOIN_MATERIALIZE;
4166 : : }
4167 : : else
4168 : : {
6015 tgl@sss.pgh.pa.us 4169 :CBC 344020 : run_cost += bare_inner_cost;
97 rhaas@postgresql.org 4170 :GNC 344020 : enable_mask |= PGS_MERGEJOIN_PLAIN;
4171 : : }
4172 : :
4173 : : /* Incremental count of disabled nodes if this node is disabled. */
4174 [ + + ]: 348209 : if (path->jpath.path.parallel_workers == 0)
4175 : 300980 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
4176 [ + + ]: 348209 : if ((extra->pgs_mask & enable_mask) != enable_mask)
4177 : 562 : ++path->jpath.path.disabled_nodes;
4178 : :
4179 : : /* CPU costs */
4180 : :
4181 : : /*
4182 : : * The number of tuple comparisons needed is approximately number of outer
4183 : : * rows plus number of inner rows plus number of rescanned tuples (can we
4184 : : * refine this?). At each one, we need to evaluate the mergejoin quals.
4185 : : */
8499 tgl@sss.pgh.pa.us 4186 :CBC 348209 : startup_cost += merge_qual_cost.startup;
6723 4187 : 348209 : startup_cost += merge_qual_cost.per_tuple *
4188 : 348209 : (outer_skip_rows + inner_skip_rows * rescanratio);
8499 4189 : 348209 : run_cost += merge_qual_cost.per_tuple *
6723 4190 : 348209 : ((outer_rows - outer_skip_rows) +
4191 : 348209 : (inner_rows - inner_skip_rows) * rescanratio);
4192 : :
4193 : : /*
4194 : : * For each tuple that gets through the mergejoin proper, we charge
4195 : : * cpu_tuple_cost plus the cost of evaluating additional restriction
4196 : : * clauses that are to be applied at the join. (This is pessimistic since
4197 : : * not all of the quals may get evaluated at each tuple.)
4198 : : *
4199 : : * Note: we could adjust for SEMI/ANTI joins skipping some qual
4200 : : * evaluations here, but it's probably not worth the trouble.
4201 : : */
8499 4202 : 348209 : startup_cost += qp_qual_cost.startup;
4203 : 348209 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
6471 4204 : 348209 : run_cost += cpu_per_tuple * mergejointuples;
4205 : :
4206 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 4207 : 348209 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4208 : 348209 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4209 : :
8499 4210 : 348209 : path->jpath.path.startup_cost = startup_cost;
4211 : 348209 : path->jpath.path.total_cost = startup_cost + run_cost;
10892 scrappy@hub.org 4212 : 348209 : }
4213 : :
4214 : : /*
4215 : : * run mergejoinscansel() with caching
4216 : : */
4217 : : static MergeScanSelCache *
6746 bruce@momjian.us 4218 : 1122141 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
4219 : : {
4220 : : MergeScanSelCache *cache;
4221 : : ListCell *lc;
4222 : : Selectivity leftstartsel,
4223 : : leftendsel,
4224 : : rightstartsel,
4225 : : rightendsel;
4226 : : MemoryContext oldcontext;
4227 : :
4228 : : /* Do we have this result already? */
7043 tgl@sss.pgh.pa.us 4229 [ + + + + : 1122145 : foreach(lc, rinfo->scansel_cache)
+ + ]
4230 : : {
4231 : 1019271 : cache = (MergeScanSelCache *) lfirst(lc);
4232 [ + - ]: 1019271 : if (cache->opfamily == pathkey->pk_opfamily &&
5526 4233 [ + - ]: 1019271 : cache->collation == pathkey->pk_eclass->ec_collation &&
396 peter@eisentraut.org 4234 [ + + ]: 1019271 : cache->cmptype == pathkey->pk_cmptype &&
7043 tgl@sss.pgh.pa.us 4235 [ + - ]: 1019267 : cache->nulls_first == pathkey->pk_nulls_first)
4236 : 1019267 : return cache;
4237 : : }
4238 : :
4239 : : /* Nope, do the computation */
4240 : 102874 : mergejoinscansel(root,
4241 : 102874 : (Node *) rinfo->clause,
4242 : : pathkey->pk_opfamily,
4243 : : pathkey->pk_cmptype,
4244 : 102874 : pathkey->pk_nulls_first,
4245 : : &leftstartsel,
4246 : : &leftendsel,
4247 : : &rightstartsel,
4248 : : &rightendsel);
4249 : :
4250 : : /* Cache the result in suitably long-lived workspace */
4251 : 102874 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4252 : :
146 michael@paquier.xyz 4253 :GNC 102874 : cache = palloc_object(MergeScanSelCache);
7043 tgl@sss.pgh.pa.us 4254 :CBC 102874 : cache->opfamily = pathkey->pk_opfamily;
5526 4255 : 102874 : cache->collation = pathkey->pk_eclass->ec_collation;
396 peter@eisentraut.org 4256 : 102874 : cache->cmptype = pathkey->pk_cmptype;
7043 tgl@sss.pgh.pa.us 4257 : 102874 : cache->nulls_first = pathkey->pk_nulls_first;
6723 4258 : 102874 : cache->leftstartsel = leftstartsel;
4259 : 102874 : cache->leftendsel = leftendsel;
4260 : 102874 : cache->rightstartsel = rightstartsel;
4261 : 102874 : cache->rightendsel = rightendsel;
4262 : :
7043 4263 : 102874 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4264 : :
4265 : 102874 : MemoryContextSwitchTo(oldcontext);
4266 : :
4267 : 102874 : return cache;
4268 : : }
4269 : :
4270 : : /*
4271 : : * initial_cost_hashjoin
4272 : : * Preliminary estimate of the cost of a hashjoin path.
4273 : : *
4274 : : * This must quickly produce lower-bound estimates of the path's startup and
4275 : : * total costs. If we are unable to eliminate the proposed path from
4276 : : * consideration using the lower bounds, final_cost_hashjoin will be called
4277 : : * to obtain the final estimates.
4278 : : *
4279 : : * The exact division of labor between this function and final_cost_hashjoin
4280 : : * is private to them, and represents a tradeoff between speed of the initial
4281 : : * estimate and getting a tight lower bound. We choose to not examine the
4282 : : * join quals here (other than by counting the number of hash clauses),
4283 : : * so we can't do much with CPU costs. We do assume that
4284 : : * ExecChooseHashTableSize is cheap enough to use here.
4285 : : *
4286 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4287 : : * other data to be used by final_cost_hashjoin
4288 : : * 'jointype' is the type of join to be performed
4289 : : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4290 : : * 'outer_path' is the outer input to the join
4291 : : * 'inner_path' is the inner input to the join
4292 : : * 'extra' contains miscellaneous information about the join
4293 : : * 'parallel_hash' indicates that inner_path is partial and that a shared
4294 : : * hash table will be built in parallel
4295 : : */
4296 : : void
5212 4297 : 660164 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4298 : : JoinType jointype,
4299 : : List *hashclauses,
4300 : : Path *outer_path, Path *inner_path,
4301 : : JoinPathExtraData *extra,
4302 : : bool parallel_hash)
4303 : : {
4304 : : int disabled_nodes;
9576 4305 : 660164 : Cost startup_cost = 0;
4306 : 660164 : Cost run_cost = 0;
5212 4307 : 660164 : double outer_path_rows = outer_path->rows;
4308 : 660164 : double inner_path_rows = inner_path->rows;
3058 andres@anarazel.de 4309 : 660164 : double inner_path_rows_total = inner_path_rows;
8010 neilc@samurai.com 4310 : 660164 : int num_hashclauses = list_length(hashclauses);
4311 : : int numbuckets;
4312 : : int numbatches;
4313 : : int num_skew_mcvs;
4314 : : size_t space_allowed; /* unused */
97 rhaas@postgresql.org 4315 :GNC 660164 : uint64 enable_mask = PGS_HASHJOIN;
4316 : :
4317 [ + + ]: 660164 : if (outer_path->parallel_workers == 0)
4318 : 546145 : enable_mask |= PGS_CONSIDER_NONPARTIAL;
4319 : :
4320 : : /* Count up disabled nodes. */
4321 : 660164 : disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
622 rhaas@postgresql.org 4322 :CBC 660164 : disabled_nodes += inner_path->disabled_nodes;
4323 : 660164 : disabled_nodes += outer_path->disabled_nodes;
4324 : :
4325 : : /* cost of source data */
9576 tgl@sss.pgh.pa.us 4326 : 660164 : startup_cost += outer_path->startup_cost;
4327 : 660164 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4328 : 660164 : startup_cost += inner_path->total_cost;
4329 : :
4330 : : /*
4331 : : * Cost of computing hash function: must do it once per input tuple. We
4332 : : * charge one cpu_operator_cost for each column's hash function. Also,
4333 : : * tack on one cpu_tuple_cost per inner row, to model the costs of
4334 : : * inserting the row into the hashtable.
4335 : : *
4336 : : * XXX when a hashclause is more complex than a single operator, we really
4337 : : * should charge the extra eval costs of the left or right side, as
4338 : : * appropriate, here. This seems more work than it's worth at the moment.
4339 : : */
7057 4340 : 660164 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4341 : 660164 : * inner_path_rows;
8499 4342 : 660164 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4343 : :
4344 : : /*
4345 : : * If this is a parallel hash build, then the value we have for
4346 : : * inner_rows_total currently refers only to the rows returned by each
4347 : : * participant. For shared hash table size estimation, we need the total
4348 : : * number, so we need to undo the division.
4349 : : */
3058 andres@anarazel.de 4350 [ + + ]: 660164 : if (parallel_hash)
4351 : 57878 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4352 : :
4353 : : /*
4354 : : * Get hash table size that executor would use for inner relation.
4355 : : *
4356 : : * XXX for the moment, always assume that skew optimization will be
4357 : : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4358 : : * trying to determine that for sure.
4359 : : *
4360 : : * XXX at some point it might be interesting to try to account for skew
4361 : : * optimization in the cost estimate, but for now, we don't.
4362 : : */
4363 : 660164 : ExecChooseHashTableSize(inner_path_rows_total,
3729 tgl@sss.pgh.pa.us 4364 : 660164 : inner_path->pathtarget->width,
4365 : : true, /* useskew */
4366 : : parallel_hash, /* try_combined_hash_mem */
4367 : : outer_path->parallel_workers,
4368 : : &space_allowed,
4369 : : &numbuckets,
4370 : : &numbatches,
4371 : : &num_skew_mcvs);
4372 : :
4373 : : /*
4374 : : * If inner relation is too big then we will need to "batch" the join,
4375 : : * which implies writing and reading most of the tuples to disk an extra
4376 : : * time. Charge seq_page_cost per page, since the I/O should be nice and
4377 : : * sequential. Writing the inner rel counts as startup cost, all the rest
4378 : : * as run cost.
4379 : : */
5212 4380 [ + + ]: 660164 : if (numbatches > 1)
4381 : : {
4382 : 3517 : double outerpages = page_size(outer_path_rows,
3729 4383 : 3517 : outer_path->pathtarget->width);
5212 4384 : 3517 : double innerpages = page_size(inner_path_rows,
3729 4385 : 3517 : inner_path->pathtarget->width);
4386 : :
5212 4387 : 3517 : startup_cost += seq_page_cost * innerpages;
4388 : 3517 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4389 : : }
4390 : :
4391 : : /* CPU costs left for later */
4392 : :
4393 : : /* Public result fields */
622 rhaas@postgresql.org 4394 : 660164 : workspace->disabled_nodes = disabled_nodes;
5212 tgl@sss.pgh.pa.us 4395 : 660164 : workspace->startup_cost = startup_cost;
4396 : 660164 : workspace->total_cost = startup_cost + run_cost;
4397 : : /* Save private data for final_cost_hashjoin */
4398 : 660164 : workspace->run_cost = run_cost;
4399 : 660164 : workspace->numbuckets = numbuckets;
4400 : 660164 : workspace->numbatches = numbatches;
3058 andres@anarazel.de 4401 : 660164 : workspace->inner_rows_total = inner_path_rows_total;
5212 tgl@sss.pgh.pa.us 4402 : 660164 : }
4403 : :
4404 : : /*
4405 : : * final_cost_hashjoin
4406 : : * Final estimate of the cost and result size of a hashjoin path.
4407 : : *
4408 : : * Note: the numbatches estimate is also saved into 'path' for use later
4409 : : *
4410 : : * 'path' is already filled in except for the rows and cost fields and
4411 : : * num_batches
4412 : : * 'workspace' is the result from initial_cost_hashjoin
4413 : : * 'extra' contains miscellaneous information about the join
4414 : : */
4415 : : void
4416 : 351606 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4417 : : JoinCostWorkspace *workspace,
4418 : : JoinPathExtraData *extra)
4419 : : {
4420 : 351606 : Path *outer_path = path->jpath.outerjoinpath;
4421 : 351606 : Path *inner_path = path->jpath.innerjoinpath;
4422 : 351606 : double outer_path_rows = outer_path->rows;
4423 : 351606 : double inner_path_rows = inner_path->rows;
3058 andres@anarazel.de 4424 : 351606 : double inner_path_rows_total = workspace->inner_rows_total;
5212 tgl@sss.pgh.pa.us 4425 : 351606 : List *hashclauses = path->path_hashclauses;
4426 : 351606 : Cost startup_cost = workspace->startup_cost;
4427 : 351606 : Cost run_cost = workspace->run_cost;
4428 : 351606 : int numbuckets = workspace->numbuckets;
4429 : 351606 : int numbatches = workspace->numbatches;
4430 : : Cost cpu_per_tuple;
4431 : : QualCost hash_qual_cost;
4432 : : QualCost qp_qual_cost;
4433 : : double hashjointuples;
4434 : : double virtualbuckets;
4435 : : Selectivity innerbucketsize;
4436 : : Selectivity innermcvfreq;
4437 : : ListCell *hcl;
4438 : :
4439 : : /* Set the number of disabled nodes. */
622 rhaas@postgresql.org 4440 : 351606 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4441 : :
4442 : : /* Mark the path with the correct row estimate */
5129 tgl@sss.pgh.pa.us 4443 [ + + ]: 351606 : if (path->jpath.path.param_info)
4444 : 2981 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4445 : : else
4446 : 348625 : path->jpath.path.rows = path->jpath.path.parent->rows;
4447 : :
4448 : : /* For partial paths, scale row estimate. */
3399 rhaas@postgresql.org 4449 [ + + ]: 351606 : if (path->jpath.path.parallel_workers > 0)
4450 : : {
3275 bruce@momjian.us 4451 : 81855 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4452 : :
3338 rhaas@postgresql.org 4453 : 81855 : path->jpath.path.rows =
4454 : 81855 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4455 : : }
4456 : :
4457 : : /* mark the path with estimated # of batches */
6249 tgl@sss.pgh.pa.us 4458 : 351606 : path->num_batches = numbatches;
4459 : :
4460 : : /* store the total number of tuples (sum of partial row estimates) */
3058 andres@anarazel.de 4461 : 351606 : path->inner_rows_total = inner_path_rows_total;
4462 : :
4463 : : /* and compute the number of "virtual" buckets in the whole join */
3240 tgl@sss.pgh.pa.us 4464 : 351606 : virtualbuckets = (double) numbuckets * (double) numbatches;
4465 : :
4466 : : /*
4467 : : * Determine bucketsize fraction and MCV frequency for the inner relation.
4468 : : * We use the smallest bucketsize or MCV frequency estimated for any
4469 : : * individual hashclause; this is undoubtedly conservative.
4470 : : *
4471 : : * BUT: if inner relation has been unique-ified, we can assume it's good
4472 : : * for hashing. This is important both because it's the right answer, and
4473 : : * because we avoid contaminating the cache with a value that's wrong for
4474 : : * non-unique-ified paths.
4475 : : */
259 rguo@postgresql.org 4476 [ + + + + :GNC 351606 : if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo,
+ + ]
4477 : : path->jpath.jointype))
4478 : : {
8498 tgl@sss.pgh.pa.us 4479 :CBC 2989 : innerbucketsize = 1.0 / virtualbuckets;
127 tgl@sss.pgh.pa.us 4480 :GNC 2989 : innermcvfreq = 1.0 / inner_path_rows_total;
4481 : : }
4482 : : else
4483 : : {
4484 : : List *otherclauses;
4485 : :
8498 tgl@sss.pgh.pa.us 4486 :CBC 348617 : innerbucketsize = 1.0;
3185 4487 : 348617 : innermcvfreq = 1.0;
4488 : :
4489 : : /* At first, try to estimate bucket size using extended statistics. */
421 akorotkov@postgresql 4490 : 348617 : otherclauses = estimate_multivariate_bucketsize(root,
4491 : : inner_path->parent,
4492 : : hashclauses,
4493 : : &innerbucketsize);
4494 : :
4495 : : /* Pass through the remaining clauses */
4496 [ + + + + : 729626 : foreach(hcl, otherclauses)
+ + ]
4497 : : {
3312 tgl@sss.pgh.pa.us 4498 : 381009 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4499 : : Selectivity thisbucketsize;
4500 : : Selectivity thismcvfreq;
4501 : :
4502 : : /*
4503 : : * First we have to figure out which side of the hashjoin clause
4504 : : * is the inner side.
4505 : : *
4506 : : * Since we tend to visit the same clauses over and over when
4507 : : * planning a large query, we cache the bucket stats estimates in
4508 : : * the RestrictInfo node to avoid repeated lookups of statistics.
4509 : : */
8487 4510 [ + + ]: 381009 : if (bms_is_subset(restrictinfo->right_relids,
4511 : 381009 : inner_path->parent->relids))
4512 : : {
4513 : : /* righthand side is inner */
8498 4514 : 197737 : thisbucketsize = restrictinfo->right_bucketsize;
4515 [ + + ]: 197737 : if (thisbucketsize < 0)
4516 : : {
4517 : : /* not cached yet */
3185 4518 : 83562 : estimate_hash_bucket_stats(root,
4519 : 83562 : get_rightop(restrictinfo->clause),
4520 : : virtualbuckets,
4521 : : &restrictinfo->right_mcvfreq,
4522 : : &restrictinfo->right_bucketsize);
4523 : 83562 : thisbucketsize = restrictinfo->right_bucketsize;
4524 : : }
4525 : 197737 : thismcvfreq = restrictinfo->right_mcvfreq;
4526 : : }
4527 : : else
4528 : : {
8487 4529 [ - + ]: 183272 : Assert(bms_is_subset(restrictinfo->left_relids,
4530 : : inner_path->parent->relids));
4531 : : /* lefthand side is inner */
8498 4532 : 183272 : thisbucketsize = restrictinfo->left_bucketsize;
4533 [ + + ]: 183272 : if (thisbucketsize < 0)
4534 : : {
4535 : : /* not cached yet */
3185 4536 : 71994 : estimate_hash_bucket_stats(root,
4537 : 71994 : get_leftop(restrictinfo->clause),
4538 : : virtualbuckets,
4539 : : &restrictinfo->left_mcvfreq,
4540 : : &restrictinfo->left_bucketsize);
4541 : 71994 : thisbucketsize = restrictinfo->left_bucketsize;
4542 : : }
4543 : 183272 : thismcvfreq = restrictinfo->left_mcvfreq;
4544 : : }
4545 : :
8498 4546 [ + + ]: 381009 : if (innerbucketsize > thisbucketsize)
4547 : 286765 : innerbucketsize = thisbucketsize;
4548 : : /* Disregard zero for MCV freq, it means we have no data */
127 tgl@sss.pgh.pa.us 4549 [ + + + + ]:GNC 381009 : if (thismcvfreq > 0.0 && innermcvfreq > thismcvfreq)
3185 tgl@sss.pgh.pa.us 4550 :CBC 278095 : innermcvfreq = thismcvfreq;
4551 : : }
4552 : : }
4553 : :
4554 : : /*
4555 : : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4556 : : * want to hash unless there is really no other alternative, so apply
4557 : : * disable_cost. (The executor normally copes with excessive memory usage
4558 : : * by splitting batches, but obviously it cannot separate equal values
4559 : : * that way, so it will be unable to drive the batch size below hash_mem
4560 : : * when this is true.)
4561 : : */
4562 : 351606 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
1745 4563 [ + + ]: 703212 : inner_path->pathtarget->width) > get_hash_memory_limit())
3185 4564 : 70 : startup_cost += disable_cost;
4565 : :
4566 : : /*
4567 : : * Compute cost of the hashquals and qpquals (other restriction clauses)
4568 : : * separately.
4569 : : */
5212 4570 : 351606 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4571 : 351606 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4572 : 351606 : qp_qual_cost.startup -= hash_qual_cost.startup;
4573 : 351606 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4574 : :
4575 : : /* CPU costs */
4576 : :
3315 4577 [ + + ]: 351606 : if (path->jpath.jointype == JOIN_SEMI ||
4578 [ + + ]: 347234 : path->jpath.jointype == JOIN_ANTI ||
4579 [ + + ]: 341582 : extra->inner_unique)
6205 4580 : 96894 : {
4581 : : double outer_matched_rows;
4582 : : Selectivity inner_scan_frac;
4583 : :
4584 : : /*
4585 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4586 : : * executor will stop after the first match.
4587 : : *
4588 : : * For an outer-rel row that has at least one match, we can expect the
4589 : : * bucket scan to stop after a fraction 1/(match_count+1) of the
4590 : : * bucket's rows, if the matches are evenly distributed. Since they
4591 : : * probably aren't quite evenly distributed, we apply a fuzz factor of
4592 : : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4593 : : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4594 : : * at least 1, no such clamp is needed now.)
4595 : : */
3315 4596 : 96894 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4597 : 96894 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4598 : :
6205 4599 : 96894 : startup_cost += hash_qual_cost.startup;
4600 : 193788 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4601 : 96894 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4602 : :
4603 : : /*
4604 : : * For unmatched outer-rel rows, the picture is quite a lot different.
4605 : : * In the first place, there is no reason to assume that these rows
4606 : : * preferentially hit heavily-populated buckets; instead assume they
4607 : : * are uncorrelated with the inner distribution and so they see an
4608 : : * average bucket size of inner_path_rows / virtualbuckets. In the
4609 : : * second place, it seems likely that they will have few if any exact
4610 : : * hash-code matches and so very few of the tuples in the bucket will
4611 : : * actually require eval of the hash quals. We don't have any good
4612 : : * way to estimate how many will, but for the moment assume that the
4613 : : * effective cost per bucket entry is one-tenth what it is for
4614 : : * matchable tuples.
4615 : : */
4616 : 193788 : run_cost += hash_qual_cost.per_tuple *
4617 : 193788 : (outer_path_rows - outer_matched_rows) *
4618 : 96894 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4619 : :
4620 : : /* Get # of tuples that will pass the basic join */
2852 4621 [ + + ]: 96894 : if (path->jpath.jointype == JOIN_ANTI)
6205 4622 : 5652 : hashjointuples = outer_path_rows - outer_matched_rows;
4623 : : else
2852 4624 : 91242 : hashjointuples = outer_matched_rows;
4625 : : }
4626 : : else
4627 : : {
4628 : : /*
4629 : : * The number of tuple comparisons needed is the number of outer
4630 : : * tuples times the typical number of tuples in a hash bucket, which
4631 : : * is the inner relation size times its bucketsize fraction. At each
4632 : : * one, we need to evaluate the hashjoin quals. But actually,
4633 : : * charging the full qual eval cost at each tuple is pessimistic,
4634 : : * since we don't evaluate the quals unless the hash values match
4635 : : * exactly. For lack of a better idea, halve the cost estimate to
4636 : : * allow for that.
4637 : : */
6205 4638 : 254712 : startup_cost += hash_qual_cost.startup;
4639 : 509424 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4640 : 254712 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4641 : :
4642 : : /*
4643 : : * Get approx # tuples passing the hashquals. We use
4644 : : * approx_tuple_count here because we need an estimate done with
4645 : : * JOIN_INNER semantics.
4646 : : */
4647 : 254712 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4648 : : }
4649 : :
4650 : : /*
4651 : : * For each tuple that gets through the hashjoin proper, we charge
4652 : : * cpu_tuple_cost plus the cost of evaluating additional restriction
4653 : : * clauses that are to be applied at the join. (This is pessimistic since
4654 : : * not all of the quals may get evaluated at each tuple.)
4655 : : */
8499 4656 : 351606 : startup_cost += qp_qual_cost.startup;
4657 : 351606 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
6471 4658 : 351606 : run_cost += cpu_per_tuple * hashjointuples;
4659 : :
4660 : : /* tlist eval costs are paid per output row, not per tuple scanned */
3729 4661 : 351606 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4662 : 351606 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4663 : :
8499 4664 : 351606 : path->jpath.path.startup_cost = startup_cost;
4665 : 351606 : path->jpath.path.total_cost = startup_cost + run_cost;
9576 4666 : 351606 : }
4667 : :
4668 : :
4669 : : /*
4670 : : * cost_subplan
4671 : : * Figure the costs for a SubPlan (or initplan).
4672 : : *
4673 : : * Note: we could dig the subplan's Plan out of the root list, but in practice
4674 : : * all callers have it handy already, so we make them pass it.
4675 : : */
4676 : : void
6465 4677 : 34158 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4678 : : {
4679 : : QualCost sp_cost;
4680 : :
4681 : : /*
4682 : : * Figure any cost for evaluating the testexpr.
4683 : : *
4684 : : * Usually, SubPlan nodes are built very early, before we have constructed
4685 : : * any RelOptInfos for the parent query level, which means the parent root
4686 : : * does not yet contain enough information to safely consult statistics.
4687 : : * Therefore, we pass root as NULL here. cost_qual_eval() is already
4688 : : * well-equipped to handle a NULL root.
4689 : : *
4690 : : * One exception is SubPlan nodes built for the initplans of MIN/MAX
4691 : : * aggregates from indexes (cf. SS_make_initplan_from_plan). In this
4692 : : * case, having a NULL root is safe because testexpr will be NULL.
4693 : : * Besides, an initplan will by definition not consult anything from the
4694 : : * parent plan.
4695 : : */
4696 : 34158 : cost_qual_eval(&sp_cost,
4697 : 34158 : make_ands_implicit((Expr *) subplan->testexpr),
4698 : : NULL);
4699 : :
4700 [ + + ]: 34158 : if (subplan->useHashTable)
4701 : : {
4702 : : /*
4703 : : * If we are using a hash table for the subquery outputs, then the
4704 : : * cost of evaluating the query is a one-time cost. We charge one
4705 : : * cpu_operator_cost per tuple for the work of loading the hashtable,
4706 : : * too.
4707 : : */
4708 : 1720 : sp_cost.startup += plan->total_cost +
4709 : 1720 : cpu_operator_cost * plan->plan_rows;
4710 : :
4711 : : /*
4712 : : * The per-tuple costs include the cost of evaluating the lefthand
4713 : : * expressions, plus the cost of probing the hashtable. We already
4714 : : * accounted for the lefthand expressions as part of the testexpr, and
4715 : : * will also have counted one cpu_operator_cost for each comparison
4716 : : * operator. That is probably too low for the probing cost, but it's
4717 : : * hard to make a better estimate, so live with it for now.
4718 : : */
4719 : : }
4720 : : else
4721 : : {
4722 : : /*
4723 : : * Otherwise we will be rescanning the subplan output on each
4724 : : * evaluation. We need to estimate how much of the output we will
4725 : : * actually need to scan. NOTE: this logic should agree with the
4726 : : * tuple_fraction estimates used by make_subplan() in
4727 : : * plan/subselect.c.
4728 : : */
4729 : 32438 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4730 : :
4731 [ + + ]: 32438 : if (subplan->subLinkType == EXISTS_SUBLINK)
4732 : : {
4733 : : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3692 4734 : 1886 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4735 : : }
6465 4736 [ + + ]: 30552 : else if (subplan->subLinkType == ALL_SUBLINK ||
4737 [ + + ]: 30537 : subplan->subLinkType == ANY_SUBLINK)
4738 : : {
4739 : : /* assume we need 50% of the tuples */
4740 : 121 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4741 : : /* also charge a cpu_operator_cost per row examined */
4742 : 121 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4743 : : }
4744 : : else
4745 : : {
4746 : : /* assume we need all tuples */
4747 : 30431 : sp_cost.per_tuple += plan_run_cost;
4748 : : }
4749 : :
4750 : : /*
4751 : : * Also account for subplan's startup cost. If the subplan is
4752 : : * uncorrelated or undirect correlated, AND its topmost node is one
4753 : : * that materializes its output, assume that we'll only need to pay
4754 : : * its startup cost once; otherwise assume we pay the startup cost
4755 : : * every time.
4756 : : */
4757 [ + + + + ]: 42541 : if (subplan->parParam == NIL &&
6079 4758 : 10103 : ExecMaterializesOutput(nodeTag(plan)))
6465 4759 : 752 : sp_cost.startup += plan->startup_cost;
4760 : : else
4761 : 31686 : sp_cost.per_tuple += plan->startup_cost;
4762 : : }
4763 : :
46 rhaas@postgresql.org 4764 :GNC 34158 : subplan->disabled_nodes = plan->disabled_nodes;
6465 tgl@sss.pgh.pa.us 4765 :CBC 34158 : subplan->startup_cost = sp_cost.startup;
4766 : 34158 : subplan->per_call_cost = sp_cost.per_tuple;
4767 : 34158 : }
4768 : :
4769 : :
4770 : : /*
4771 : : * cost_rescan
4772 : : * Given a finished Path, estimate the costs of rescanning it after
4773 : : * having done so the first time. For some Path types a rescan is
4774 : : * cheaper than an original scan (if no parameters change), and this
4775 : : * function embodies knowledge about that. The default is to return
4776 : : * the same costs stored in the Path. (Note that the cost estimates
4777 : : * actually stored in Paths are always for first scans.)
4778 : : *
4779 : : * This function is not currently intended to model effects such as rescans
4780 : : * being cheaper due to disk block caching; what we are concerned with is
4781 : : * plan types wherein the executor caches results explicitly, or doesn't
4782 : : * redo startup calculations, etc.
4783 : : */
4784 : : static void
6079 4785 : 2586039 : cost_rescan(PlannerInfo *root, Path *path,
4786 : : Cost *rescan_startup_cost, /* output parameters */
4787 : : Cost *rescan_total_cost)
4788 : : {
4789 [ + + + + : 2586039 : switch (path->pathtype)
+ + ]
4790 : : {
4791 : 35506 : case T_FunctionScan:
4792 : :
4793 : : /*
4794 : : * Currently, nodeFunctionscan.c always executes the function to
4795 : : * completion before returning any rows, and caches the results in
4796 : : * a tuplestore. So the function eval cost is all startup cost
4797 : : * and isn't paid over again on rescans. However, all run costs
4798 : : * will be paid over again.
4799 : : */
4800 : 35506 : *rescan_startup_cost = 0;
4801 : 35506 : *rescan_total_cost = path->total_cost - path->startup_cost;
4802 : 35506 : break;
4803 : 100075 : case T_HashJoin:
4804 : :
4805 : : /*
4806 : : * If it's a single-batch join, we don't need to rebuild the hash
4807 : : * table during a rescan.
4808 : : */
3569 4809 [ + - ]: 100075 : if (((HashPath *) path)->num_batches == 1)
4810 : : {
4811 : : /* Startup cost is exactly the cost of hash table building */
4812 : 100075 : *rescan_startup_cost = 0;
4813 : 100075 : *rescan_total_cost = path->total_cost - path->startup_cost;
4814 : : }
4815 : : else
4816 : : {
4817 : : /* Otherwise, no special treatment */
3569 tgl@sss.pgh.pa.us 4818 :UBC 0 : *rescan_startup_cost = path->startup_cost;
4819 : 0 : *rescan_total_cost = path->total_cost;
4820 : : }
6079 tgl@sss.pgh.pa.us 4821 :CBC 100075 : break;
4822 : 5191 : case T_CteScan:
4823 : : case T_WorkTableScan:
4824 : : {
4825 : : /*
4826 : : * These plan types materialize their final result in a
4827 : : * tuplestore or tuplesort object. So the rescan cost is only
4828 : : * cpu_tuple_cost per tuple, unless the result is large enough
4829 : : * to spill to disk.
4830 : : */
5212 4831 : 5191 : Cost run_cost = cpu_tuple_cost * path->rows;
4832 : 5191 : double nbytes = relation_byte_size(path->rows,
3240 4833 : 5191 : path->pathtarget->width);
459 4834 : 5191 : double work_mem_bytes = work_mem * (Size) 1024;
4835 : :
6079 4836 [ + + ]: 5191 : if (nbytes > work_mem_bytes)
4837 : : {
4838 : : /* It will spill, so account for re-read cost */
4839 : 192 : double npages = ceil(nbytes / BLCKSZ);
4840 : :
4841 : 192 : run_cost += seq_page_cost * npages;
4842 : : }
4843 : 5191 : *rescan_startup_cost = 0;
4844 : 5191 : *rescan_total_cost = run_cost;
4845 : : }
4846 : 5191 : break;
5919 4847 : 888961 : case T_Material:
4848 : : case T_Sort:
4849 : : {
4850 : : /*
4851 : : * These plan types not only materialize their results, but do
4852 : : * not implement qual filtering or projection. So they are
4853 : : * even cheaper to rescan than the ones above. We charge only
4854 : : * cpu_operator_cost per tuple. (Note: keep that in sync with
4855 : : * the run_cost charge in cost_sort, and also see comments in
4856 : : * cost_material before you change it.)
4857 : : */
5212 4858 : 888961 : Cost run_cost = cpu_operator_cost * path->rows;
4859 : 888961 : double nbytes = relation_byte_size(path->rows,
3240 4860 : 888961 : path->pathtarget->width);
459 4861 : 888961 : double work_mem_bytes = work_mem * (Size) 1024;
4862 : :
5919 4863 [ + + ]: 888961 : if (nbytes > work_mem_bytes)
4864 : : {
4865 : : /* It will spill, so account for re-read cost */
4866 : 7296 : double npages = ceil(nbytes / BLCKSZ);
4867 : :
4868 : 7296 : run_cost += seq_page_cost * npages;
4869 : : }
4870 : 888961 : *rescan_startup_cost = 0;
4871 : 888961 : *rescan_total_cost = run_cost;
4872 : : }
4873 : 888961 : break;
1756 drowley@postgresql.o 4874 : 203115 : case T_Memoize:
4875 : : /* All the hard work is done by cost_memoize_rescan */
4876 : 203115 : cost_memoize_rescan(root, (MemoizePath *) path,
4877 : : rescan_startup_cost, rescan_total_cost);
1859 4878 : 203115 : break;
6079 tgl@sss.pgh.pa.us 4879 : 1353191 : default:
4880 : 1353191 : *rescan_startup_cost = path->startup_cost;
4881 : 1353191 : *rescan_total_cost = path->total_cost;
4882 : 1353191 : break;
4883 : : }
4884 : 2586039 : }
4885 : :
4886 : :
4887 : : /*
4888 : : * cost_qual_eval
4889 : : * Estimate the CPU costs of evaluating a WHERE clause.
4890 : : * The input can be either an implicitly-ANDed list of boolean
4891 : : * expressions, or a list of RestrictInfo nodes. (The latter is
4892 : : * preferred since it allows caching of the results.)
4893 : : * The result includes both a one-time (startup) component,
4894 : : * and a per-evaluation component.
4895 : : *
4896 : : * Note: in some code paths root can be passed as NULL, resulting in
4897 : : * slightly worse estimates.
4898 : : */
4899 : : void
7012 4900 : 3667087 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4901 : : {
4902 : : cost_qual_eval_context context;
4903 : : ListCell *l;
4904 : :
4905 : 3667087 : context.root = root;
4906 : 3667087 : context.total.startup = 0;
4907 : 3667087 : context.total.per_tuple = 0;
4908 : :
4909 : : /* We don't charge any cost for the implicit ANDing at top level ... */
4910 : :
9275 4911 [ + + + + : 7052195 : foreach(l, quals)
+ + ]
4912 : : {
9175 bruce@momjian.us 4913 : 3385108 : Node *qual = (Node *) lfirst(l);
4914 : :
7012 tgl@sss.pgh.pa.us 4915 : 3385108 : cost_qual_eval_walker(qual, &context);
4916 : : }
4917 : :
4918 : 3667087 : *cost = context.total;
9576 4919 : 3667087 : }
4920 : :
4921 : : /*
4922 : : * cost_qual_eval_node
4923 : : * As above, for a single RestrictInfo or expression.
4924 : : */
4925 : : void
7012 4926 : 1457288 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4927 : : {
4928 : : cost_qual_eval_context context;
4929 : :
4930 : 1457288 : context.root = root;
4931 : 1457288 : context.total.startup = 0;
4932 : 1457288 : context.total.per_tuple = 0;
4933 : :
4934 : 1457288 : cost_qual_eval_walker(qual, &context);
4935 : :
4936 : 1457288 : *cost = context.total;
7043 4937 : 1457288 : }
4938 : :
4939 : : static bool
6746 bruce@momjian.us 4940 : 7621888 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4941 : : {
9576 tgl@sss.pgh.pa.us 4942 [ + + ]: 7621888 : if (node == NULL)
4943 : 77895 : return false;
4944 : :
4945 : : /*
4946 : : * RestrictInfo nodes contain an eval_cost field reserved for this
4947 : : * routine's use, so that it's not necessary to evaluate the qual clause's
4948 : : * cost more than once. If the clause's cost hasn't been computed yet,
4949 : : * the field's startup value will contain -1.
4950 : : */
7043 4951 [ + + ]: 7543993 : if (IsA(node, RestrictInfo))
4952 : : {
4953 : 3546043 : RestrictInfo *rinfo = (RestrictInfo *) node;
4954 : :
4955 [ + + ]: 3546043 : if (rinfo->eval_cost.startup < 0)
4956 : : {
4957 : : cost_qual_eval_context locContext;
4958 : :
7012 4959 : 474387 : locContext.root = context->root;
4960 : 474387 : locContext.total.startup = 0;
4961 : 474387 : locContext.total.per_tuple = 0;
4962 : :
4963 : : /*
4964 : : * For an OR clause, recurse into the marked-up tree so that we
4965 : : * set the eval_cost for contained RestrictInfos too.
4966 : : */
7043 4967 [ + + ]: 474387 : if (rinfo->orclause)
7012 4968 : 8637 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4969 : : else
4970 : 465750 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4971 : :
4972 : : /*
4973 : : * If the RestrictInfo is marked pseudoconstant, it will be tested
4974 : : * only once, so treat its cost as all startup cost.
4975 : : */
7043 4976 [ + + ]: 474387 : if (rinfo->pseudoconstant)
4977 : : {
4978 : : /* count one execution during startup */
7012 4979 : 8564 : locContext.total.startup += locContext.total.per_tuple;
4980 : 8564 : locContext.total.per_tuple = 0;
4981 : : }
4982 : 474387 : rinfo->eval_cost = locContext.total;
4983 : : }
4984 : 3546043 : context->total.startup += rinfo->eval_cost.startup;
4985 : 3546043 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4986 : : /* do NOT recurse into children */
7043 4987 : 3546043 : return false;
4988 : : }
4989 : :
4990 : : /*
4991 : : * For each operator or function node in the given tree, we charge the
4992 : : * estimated execution cost given by pg_proc.procost (remember to multiply
4993 : : * this by cpu_operator_cost).
4994 : : *
4995 : : * Vars and Consts are charged zero, and so are boolean operators (AND,
4996 : : * OR, NOT). Simplistic, but a lot better than no model at all.
4997 : : *
4998 : : * Should we try to account for the possibility of short-circuit
4999 : : * evaluation of AND/OR? Probably *not*, because that would make the
5000 : : * results depend on the clause ordering, and we are not in any position
5001 : : * to expect that the current ordering of the clauses is the one that's
5002 : : * going to end up being used. The above per-RestrictInfo caching would
5003 : : * not mix well with trying to re-order clauses anyway.
5004 : : *
5005 : : * Another issue that is entirely ignored here is that if a set-returning
5006 : : * function is below top level in the tree, the functions/operators above
5007 : : * it will need to be evaluated multiple times. In practical use, such
5008 : : * cases arise so seldom as to not be worth the added complexity needed;
5009 : : * moreover, since our rowcount estimates for functions tend to be pretty
5010 : : * phony, the results would also be pretty phony.
5011 : : */
5012 [ + + ]: 3997950 : if (IsA(node, FuncExpr))
5013 : : {
2642 5014 : 259341 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
5015 : : &context->total);
5016 : : }
7043 5017 [ + + ]: 3738609 : else if (IsA(node, OpExpr) ||
5018 [ + + ]: 3212114 : IsA(node, DistinctExpr) ||
5019 [ + + ]: 3211342 : IsA(node, NullIfExpr))
5020 : : {
5021 : : /* rely on struct equivalence to treat these all alike */
5022 : 527499 : set_opfuncid((OpExpr *) node);
2642 5023 : 527499 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
5024 : : &context->total);
5025 : : }
8346 5026 [ + + ]: 3211110 : else if (IsA(node, ScalarArrayOpExpr))
5027 : : {
7465 5028 : 35154 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
7153 bruce@momjian.us 5029 : 35154 : Node *arraynode = (Node *) lsecond(saop->args);
5030 : : QualCost sacosts;
5031 : : QualCost hcosts;
852 tgl@sss.pgh.pa.us 5032 : 35154 : double estarraylen = estimate_array_length(context->root, arraynode);
5033 : :
7043 5034 : 35154 : set_sa_opfuncid(saop);
2642 5035 : 35154 : sacosts.startup = sacosts.per_tuple = 0;
5036 : 35154 : add_function_cost(context->root, saop->opfuncid, NULL,
5037 : : &sacosts);
5038 : :
1853 drowley@postgresql.o 5039 [ + + ]: 35154 : if (OidIsValid(saop->hashfuncid))
5040 : : {
5041 : : /* Handle costs for hashed ScalarArrayOpExpr */
5042 : 246 : hcosts.startup = hcosts.per_tuple = 0;
5043 : :
5044 : 246 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
5045 : 246 : context->total.startup += sacosts.startup + hcosts.startup;
5046 : :
5047 : : /* Estimate the cost of building the hashtable. */
5048 : 246 : context->total.startup += estarraylen * hcosts.per_tuple;
5049 : :
5050 : : /*
5051 : : * XXX should we charge a little bit for sacosts.per_tuple when
5052 : : * building the table, or is it ok to assume there will be zero
5053 : : * hash collision?
5054 : : */
5055 : :
5056 : : /*
5057 : : * Charge for hashtable lookups. Charge a single hash and a
5058 : : * single comparison.
5059 : : */
5060 : 246 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
5061 : : }
5062 : : else
5063 : : {
5064 : : /*
5065 : : * Estimate that the operator will be applied to about half of the
5066 : : * array elements before the answer is determined.
5067 : : */
5068 : 34908 : context->total.startup += sacosts.startup;
5069 : 69816 : context->total.per_tuple += sacosts.per_tuple *
852 tgl@sss.pgh.pa.us 5070 : 34908 : estimate_array_length(context->root, arraynode) * 0.5;
5071 : : }
5072 : : }
5490 5073 [ + + ]: 3175956 : else if (IsA(node, Aggref) ||
5074 [ + + ]: 3120583 : IsA(node, WindowFunc))
5075 : : {
5076 : : /*
5077 : : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
5078 : : * ie, zero execution cost in the current model, because they behave
5079 : : * essentially like Vars at execution. We disregard the costs of
5080 : : * their input expressions for the same reason. The actual execution
5081 : : * costs of the aggregate/window functions and their arguments have to
5082 : : * be factored into plan-node-specific costing of the Agg or WindowAgg
5083 : : * plan node.
5084 : : */
5085 : 58723 : return false; /* don't recurse into children */
5086 : : }
1506 5087 [ + + ]: 3117233 : else if (IsA(node, GroupingFunc))
5088 : : {
5089 : : /* Treat this as having cost 1 */
5090 : 358 : context->total.per_tuple += cpu_operator_cost;
5091 : 358 : return false; /* don't recurse into children */
5092 : : }
6909 5093 [ + + ]: 3116875 : else if (IsA(node, CoerceViaIO))
5094 : : {
5095 : 19751 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
5096 : : Oid iofunc;
5097 : : Oid typioparam;
5098 : : bool typisvarlena;
5099 : :
5100 : : /* check the result type's input function */
5101 : 19751 : getTypeInputInfo(iocoerce->resulttype,
5102 : : &iofunc, &typioparam);
2642 5103 : 19751 : add_function_cost(context->root, iofunc, NULL,
5104 : : &context->total);
5105 : : /* check the input type's output function */
6909 5106 : 19751 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
5107 : : &iofunc, &typisvarlena);
2642 5108 : 19751 : add_function_cost(context->root, iofunc, NULL,
5109 : : &context->total);
5110 : : }
6979 5111 [ + + ]: 3097124 : else if (IsA(node, ArrayCoerceExpr))
5112 : : {
5113 : 4038 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
5114 : : QualCost perelemcost;
5115 : :
3139 5116 : 4038 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
5117 : : context->root);
5118 : 4038 : context->total.startup += perelemcost.startup;
5119 [ + + ]: 4038 : if (perelemcost.per_tuple > 0)
5120 : 48 : context->total.per_tuple += perelemcost.per_tuple *
852 5121 : 48 : estimate_array_length(context->root, (Node *) acoerce->arg);
5122 : : }
7433 5123 [ + + ]: 3093086 : else if (IsA(node, RowCompareExpr))
5124 : : {
5125 : : /* Conservatively assume we will check all the columns */
5126 : 235 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
5127 : : ListCell *lc;
5128 : :
7043 5129 [ + - + + : 750 : foreach(lc, rcexpr->opnos)
+ + ]
5130 : : {
6746 bruce@momjian.us 5131 : 515 : Oid opid = lfirst_oid(lc);
5132 : :
2642 tgl@sss.pgh.pa.us 5133 : 515 : add_function_cost(context->root, get_opcode(opid), NULL,
5134 : : &context->total);
5135 : : }
5136 : : }
3217 5137 [ + + ]: 3092851 : else if (IsA(node, MinMaxExpr) ||
1084 michael@paquier.xyz 5138 [ + + ]: 3092628 : IsA(node, SQLValueFunction) ||
3217 tgl@sss.pgh.pa.us 5139 [ + + ]: 3088810 : IsA(node, XmlExpr) ||
5140 [ + + ]: 3088225 : IsA(node, CoerceToDomain) ||
775 amitlan@postgresql.o 5141 [ + + ]: 3081598 : IsA(node, NextValueExpr) ||
5142 [ + + ]: 3081269 : IsA(node, JsonExpr))
5143 : : {
5144 : : /* Treat all these as having cost 1 */
3217 tgl@sss.pgh.pa.us 5145 : 13716 : context->total.per_tuple += cpu_operator_cost;
5146 : : }
8514 5147 [ - + ]: 3079135 : else if (IsA(node, SubLink))
5148 : : {
5149 : : /* This routine should not be applied to un-planned expressions */
8320 tgl@sss.pgh.pa.us 5150 [ # # ]:UBC 0 : elog(ERROR, "cannot handle unplanned sub-select");
5151 : : }
8543 tgl@sss.pgh.pa.us 5152 [ + + ]:CBC 3079135 : else if (IsA(node, SubPlan))
5153 : : {
5154 : : /*
5155 : : * A subplan node in an expression typically indicates that the
5156 : : * subplan will be executed on each evaluation, so charge accordingly.
5157 : : * (Sub-selects that can be executed as InitPlans have already been
5158 : : * removed from the expression.)
5159 : : */
8310 bruce@momjian.us 5160 : 34676 : SubPlan *subplan = (SubPlan *) node;
5161 : :
6465 tgl@sss.pgh.pa.us 5162 : 34676 : context->total.startup += subplan->startup_cost;
5163 : 34676 : context->total.per_tuple += subplan->per_call_cost;
5164 : :
5165 : : /*
5166 : : * We don't want to recurse into the testexpr, because it was already
5167 : : * counted in the SubPlan node's costs. So we're done.
5168 : : */
5169 : 34676 : return false;
5170 : : }
5171 [ + + ]: 3044459 : else if (IsA(node, AlternativeSubPlan))
5172 : : {
5173 : : /*
5174 : : * Arbitrarily use the first alternative plan for costing. (We should
5175 : : * certainly only include one alternative, and we don't yet have
5176 : : * enough information to know which one the executor is most likely to
5177 : : * use.)
5178 : : */
5179 : 1453 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
5180 : :
5181 : 1453 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5182 : : context);
5183 : : }
3729 5184 [ + + ]: 3043006 : else if (IsA(node, PlaceHolderVar))
5185 : : {
5186 : : /*
5187 : : * A PlaceHolderVar should be given cost zero when considering general
5188 : : * expression evaluation costs. The expense of doing the contained
5189 : : * expression is charged as part of the tlist eval costs of the scan
5190 : : * or join where the PHV is first computed (see set_rel_width and
5191 : : * add_placeholders_to_joinrel). If we charged it again here, we'd be
5192 : : * double-counting the cost for each level of plan that the PHV
5193 : : * bubbles up through. Hence, return without recursing into the
5194 : : * phexpr.
5195 : : */
5196 : 4964 : return false;
5197 : : }
5198 : :
5199 : : /* recurse into children */
523 peter@eisentraut.org 5200 : 3897776 : return expression_tree_walker(node, cost_qual_eval_walker, context);
5201 : : }
5202 : :
5203 : : /*
5204 : : * get_restriction_qual_cost
5205 : : * Compute evaluation costs of a baserel's restriction quals, plus any
5206 : : * movable join quals that have been pushed down to the scan.
5207 : : * Results are returned into *qpqual_cost.
5208 : : *
5209 : : * This is a convenience subroutine that works for seqscans and other cases
5210 : : * where all the given quals will be evaluated the hard way. It's not useful
5211 : : * for cost_index(), for example, where the index machinery takes care of
5212 : : * some of the quals. We assume baserestrictcost was previously set by
5213 : : * set_baserel_size_estimates().
5214 : : */
5215 : : static void
5129 tgl@sss.pgh.pa.us 5216 : 878975 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
5217 : : ParamPathInfo *param_info,
5218 : : QualCost *qpqual_cost)
5219 : : {
5220 [ + + ]: 878975 : if (param_info)
5221 : : {
5222 : : /* Include costs of pushed-down clauses */
5223 : 213744 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
5224 : :
5225 : 213744 : qpqual_cost->startup += baserel->baserestrictcost.startup;
5226 : 213744 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
5227 : : }
5228 : : else
5229 : 665231 : *qpqual_cost = baserel->baserestrictcost;
5230 : 878975 : }
5231 : :
5232 : :
5233 : : /*
5234 : : * compute_semi_anti_join_factors
5235 : : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5236 : : * can be expected to scan.
5237 : : *
5238 : : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
5239 : : * inner rows as soon as it finds a match to the current outer row.
5240 : : * The same happens if we have detected the inner rel is unique.
5241 : : * We should therefore adjust some of the cost components for this effect.
5242 : : * This function computes some estimates needed for these adjustments.
5243 : : * These estimates will be the same regardless of the particular paths used
5244 : : * for the outer and inner relation, so we compute these once and then pass
5245 : : * them to all the join cost estimation functions.
5246 : : *
5247 : : * Input parameters:
5248 : : * joinrel: join relation under consideration
5249 : : * outerrel: outer relation under consideration
5250 : : * innerrel: inner relation under consideration
5251 : : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
5252 : : * sjinfo: SpecialJoinInfo relevant to this join
5253 : : * restrictlist: join quals
5254 : : * Output parameters:
5255 : : * *semifactors is filled in (see pathnodes.h for field definitions)
5256 : : */
5257 : : void
5212 5258 : 184951 : compute_semi_anti_join_factors(PlannerInfo *root,
5259 : : RelOptInfo *joinrel,
5260 : : RelOptInfo *outerrel,
5261 : : RelOptInfo *innerrel,
5262 : : JoinType jointype,
5263 : : SpecialJoinInfo *sjinfo,
5264 : : List *restrictlist,
5265 : : SemiAntiJoinFactors *semifactors)
5266 : : {
5267 : : Selectivity jselec;
5268 : : Selectivity nselec;
5269 : : Selectivity avgmatch;
5270 : : SpecialJoinInfo norm_sjinfo;
5271 : : List *joinquals;
5272 : : ListCell *l;
5273 : :
5274 : : /*
5275 : : * In an ANTI join, we must ignore clauses that are "pushed down", since
5276 : : * those won't affect the match logic. In a SEMI join, we do not
5277 : : * distinguish joinquals from "pushed down" quals, so just use the whole
5278 : : * restrictinfo list. For other outer join types, we should consider only
5279 : : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5280 : : */
3315 5281 [ + + ]: 184951 : if (IS_OUTER_JOIN(jointype))
5282 : : {
6205 5283 : 59300 : joinquals = NIL;
5212 5284 [ + + + + : 133814 : foreach(l, restrictlist)
+ + ]
5285 : : {
3312 5286 : 74514 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5287 : :
2937 5288 [ + + + - ]: 74514 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
6205 5289 : 66080 : joinquals = lappend(joinquals, rinfo);
5290 : : }
5291 : : }
5292 : : else
5212 5293 : 125651 : joinquals = restrictlist;
5294 : :
5295 : : /*
5296 : : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5297 : : */
6205 5298 [ + + ]: 184951 : jselec = clauselist_selectivity(root,
5299 : : joinquals,
5300 : : 0,
5301 : : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5302 : : sjinfo);
5303 : :
5304 : : /*
5305 : : * Also get the normal inner-join selectivity of the join clauses.
5306 : : */
771 amitlan@postgresql.o 5307 : 184951 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5308 : :
6205 tgl@sss.pgh.pa.us 5309 : 184951 : nselec = clauselist_selectivity(root,
5310 : : joinquals,
5311 : : 0,
5312 : : JOIN_INNER,
5313 : : &norm_sjinfo);
5314 : :
5315 : : /* Avoid leaking a lot of ListCells */
3315 5316 [ + + ]: 184951 : if (IS_OUTER_JOIN(jointype))
6205 5317 : 59300 : list_free(joinquals);
5318 : :
5319 : : /*
5320 : : * jselec can be interpreted as the fraction of outer-rel rows that have
5321 : : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5322 : : * the fraction of the Cartesian product that matches. So, the average
5323 : : * number of matches for each outer-rel row that has at least one match is
5324 : : * nselec * inner_rows / jselec.
5325 : : *
5326 : : * Note: it is correct to use the inner rel's "rows" count here, even
5327 : : * though we might later be considering a parameterized inner path with
5328 : : * fewer rows. This is because we have included all the join clauses in
5329 : : * the selectivity estimate.
5330 : : */
5331 [ + + ]: 184951 : if (jselec > 0) /* protect against zero divide */
5332 : : {
5212 5333 : 184643 : avgmatch = nselec * innerrel->rows / jselec;
5334 : : /* Clamp to sane range */
6205 5335 [ + + ]: 184643 : avgmatch = Max(1.0, avgmatch);
5336 : : }
5337 : : else
5338 : 308 : avgmatch = 1.0;
5339 : :
5212 5340 : 184951 : semifactors->outer_match_frac = jselec;
5341 : 184951 : semifactors->match_count = avgmatch;
5342 : 184951 : }
5343 : :
5344 : : /*
5345 : : * has_indexed_join_quals
5346 : : * Check whether all the joinquals of a nestloop join are used as
5347 : : * inner index quals.
5348 : : *
5349 : : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5350 : : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5351 : : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5352 : : * expensive.
5353 : : */
5354 : : static bool
1731 peter@eisentraut.org 5355 : 728558 : has_indexed_join_quals(NestPath *path)
5356 : : {
5357 : 728558 : JoinPath *joinpath = &path->jpath;
5129 tgl@sss.pgh.pa.us 5358 : 728558 : Relids joinrelids = joinpath->path.parent->relids;
5359 : 728558 : Path *innerpath = joinpath->innerjoinpath;
5360 : : List *indexclauses;
5361 : : bool found_one;
5362 : : ListCell *lc;
5363 : :
5364 : : /* If join still has quals to evaluate, it's not fast */
5365 [ + + ]: 728558 : if (joinpath->joinrestrictinfo != NIL)
5366 : 527273 : return false;
5367 : : /* Nor if the inner path isn't parameterized at all */
5368 [ + + ]: 201285 : if (innerpath->param_info == NULL)
5369 : 2485 : return false;
5370 : :
5371 : : /* Find the indexclauses list for the inner scan */
5372 [ + + + ]: 198800 : switch (innerpath->pathtype)
5373 : : {
5374 : 126819 : case T_IndexScan:
5375 : : case T_IndexOnlyScan:
5376 : 126819 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5377 : 126819 : break;
5378 : 304 : case T_BitmapHeapScan:
5379 : : {
5380 : : /* Accept only a simple bitmap scan, not AND/OR cases */
5077 bruce@momjian.us 5381 : 304 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5382 : :
5383 [ + + ]: 304 : if (IsA(bmqual, IndexPath))
5384 : 264 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5385 : : else
5386 : 40 : return false;
5387 : 264 : break;
5388 : : }
5129 tgl@sss.pgh.pa.us 5389 : 71677 : default:
5390 : :
5391 : : /*
5392 : : * If it's not a simple indexscan, it probably doesn't run quickly
5393 : : * for zero rows out, even if it's a parameterized path using all
5394 : : * the joinquals.
5395 : : */
5212 5396 : 71677 : return false;
5397 : : }
5398 : :
5399 : : /*
5400 : : * Examine the inner path's param clauses. Any that are from the outer
5401 : : * path must be found in the indexclauses list, either exactly or in an
5402 : : * equivalent form generated by equivclass.c. Also, we must find at least
5403 : : * one such clause, else it's a clauseless join which isn't fast.
5404 : : */
5129 5405 : 127083 : found_one = false;
5406 [ + - + + : 251900 : foreach(lc, innerpath->param_info->ppi_clauses)
+ + ]
5407 : : {
5408 : 130839 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5409 : :
5410 [ + + ]: 130839 : if (join_clause_is_movable_into(rinfo,
5411 : 130839 : innerpath->parent->relids,
5412 : : joinrelids))
5413 : : {
2642 5414 [ + + ]: 130399 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5129 5415 : 6022 : return false;
5416 : 124377 : found_one = true;
5417 : : }
5418 : : }
5419 : 121061 : return found_one;
5420 : : }
5421 : :
5422 : :
5423 : : /*
5424 : : * approx_tuple_count
5425 : : * Quick-and-dirty estimation of the number of join rows passing
5426 : : * a set of qual conditions.
5427 : : *
5428 : : * The quals can be either an implicitly-ANDed list of boolean expressions,
5429 : : * or a list of RestrictInfo nodes (typically the latter).
5430 : : *
5431 : : * We intentionally compute the selectivity under JOIN_INNER rules, even
5432 : : * if it's some type of outer join. This is appropriate because we are
5433 : : * trying to figure out how many tuples pass the initial merge or hash
5434 : : * join step.
5435 : : *
5436 : : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5437 : : * simply multiply the independent clause selectivities together. Now
5438 : : * clauselist_selectivity often can't do any better than that anyhow, but
5439 : : * for some situations (such as range constraints) it is smarter. However,
5440 : : * we can't effectively cache the results of clauselist_selectivity, whereas
5441 : : * the individual clause selectivities can be and are cached.
5442 : : *
5443 : : * Since we are only using the results to estimate how many potential
5444 : : * output tuples are generated and passed through qpqual checking, it
5445 : : * seems OK to live with the approximation.
5446 : : */
5447 : : static double
6297 5448 : 602921 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5449 : : {
5450 : : double tuples;
5212 5451 : 602921 : double outer_tuples = path->outerjoinpath->rows;
5452 : 602921 : double inner_tuples = path->innerjoinpath->rows;
5453 : : SpecialJoinInfo sjinfo;
6471 5454 : 602921 : Selectivity selec = 1.0;
5455 : : ListCell *l;
5456 : :
5457 : : /*
5458 : : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5459 : : */
771 amitlan@postgresql.o 5460 : 602921 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5461 : 602921 : path->innerjoinpath->parent->relids);
5462 : :
5463 : : /* Get the approximate selectivity */
9100 tgl@sss.pgh.pa.us 5464 [ + + + + : 1289205 : foreach(l, quals)
+ + ]
5465 : : {
5466 : 686284 : Node *qual = (Node *) lfirst(l);
5467 : :
5468 : : /* Note that clause_selectivity will be able to cache its result */
3316 simon@2ndQuadrant.co 5469 : 686284 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5470 : : }
5471 : :
5472 : : /* Apply it to the input relation sizes */
6297 tgl@sss.pgh.pa.us 5473 : 602921 : tuples = selec * outer_tuples * inner_tuples;
5474 : :
6471 5475 : 602921 : return clamp_row_est(tuples);
5476 : : }
5477 : :
5478 : :
5479 : : /*
5480 : : * set_baserel_size_estimates
5481 : : * Set the size estimates for the given base relation.
5482 : : *
5483 : : * The rel's targetlist and restrictinfo list must have been constructed
5484 : : * already, and rel->tuples must be set.
5485 : : *
5486 : : * We set the following fields of the rel node:
5487 : : * rows: the estimated number of output tuples (after applying
5488 : : * restriction clauses).
5489 : : * width: the estimated average output tuple width in bytes.
5490 : : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5491 : : */
5492 : : void
7639 5493 : 406200 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5494 : : {
5495 : : double nrows;
5496 : :
5497 : : /* Should only be applied to base relations */
8487 5498 [ - + ]: 406200 : Assert(rel->relid > 0);
5499 : :
8156 5500 : 812380 : nrows = rel->tuples *
8157 5501 : 406200 : clauselist_selectivity(root,
5502 : : rel->baserestrictinfo,
5503 : : 0,
5504 : : JOIN_INNER,
5505 : : NULL);
5506 : :
8156 5507 : 406180 : rel->rows = clamp_row_est(nrows);
5508 : :
7012 5509 : 406180 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5510 : :
9613 5511 : 406180 : set_rel_width(root, rel);
10892 scrappy@hub.org 5512 : 406180 : }
5513 : :
5514 : : /*
5515 : : * get_parameterized_baserel_size
5516 : : * Make a size estimate for a parameterized scan of a base relation.
5517 : : *
5518 : : * 'param_clauses' lists the additional join clauses to be used.
5519 : : *
5520 : : * set_baserel_size_estimates must have been applied already.
5521 : : */
5522 : : double
5129 tgl@sss.pgh.pa.us 5523 : 133054 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5524 : : List *param_clauses)
5525 : : {
5526 : : List *allclauses;
5527 : : double nrows;
5528 : :
5529 : : /*
5530 : : * Estimate the number of rows returned by the parameterized scan, knowing
5531 : : * that it will apply all the extra join clauses as well as the rel's own
5532 : : * restriction clauses. Note that we force the clauses to be treated as
5533 : : * non-join clauses during selectivity estimation.
5534 : : */
2458 5535 : 133054 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5129 5536 : 266108 : nrows = rel->tuples *
5537 : 133054 : clauselist_selectivity(root,
5538 : : allclauses,
3240 5539 : 133054 : rel->relid, /* do not use 0! */
5540 : : JOIN_INNER,
5541 : : NULL);
5129 5542 : 133054 : nrows = clamp_row_est(nrows);
5543 : : /* For safety, make sure result is not more than the base estimate */
5544 [ - + ]: 133054 : if (nrows > rel->rows)
5129 tgl@sss.pgh.pa.us 5545 :UBC 0 : nrows = rel->rows;
5129 tgl@sss.pgh.pa.us 5546 :CBC 133054 : return nrows;
5547 : : }
5548 : :
5549 : : /*
5550 : : * set_joinrel_size_estimates
5551 : : * Set the size estimates for the given join relation.
5552 : : *
5553 : : * The rel's targetlist must have been constructed already, and a
5554 : : * restriction clause list that matches the given component rels must
5555 : : * be provided.
5556 : : *
5557 : : * Since there is more than one way to make a joinrel for more than two
5558 : : * base relations, the results we get here could depend on which component
5559 : : * rel pair is provided. In theory we should get the same answers no matter
5560 : : * which pair is provided; in practice, since the selectivity estimation
5561 : : * routines don't handle all cases equally well, we might not. But there's
5562 : : * not much to be done about it. (Would it make sense to repeat the
5563 : : * calculations for each pair of input rels that's encountered, and somehow
5564 : : * average the results? Probably way more trouble than it's worth, and
5565 : : * anyway we must keep the rowcount estimate the same for all paths for the
5566 : : * joinrel.)
5567 : : *
5568 : : * We set only the rows field here. The reltarget field was already set by
5569 : : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5570 : : */
5571 : : void
7639 5572 : 208447 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5573 : : RelOptInfo *outer_rel,
5574 : : RelOptInfo *inner_rel,
5575 : : SpecialJoinInfo *sjinfo,
5576 : : List *restrictlist)
5577 : : {
5212 5578 : 208447 : rel->rows = calc_joinrel_size_estimate(root,
5579 : : rel,
5580 : : outer_rel,
5581 : : inner_rel,
5582 : : outer_rel->rows,
5583 : : inner_rel->rows,
5584 : : sjinfo,
5585 : : restrictlist);
5586 : 208447 : }
5587 : :
5588 : : /*
5589 : : * get_parameterized_joinrel_size
5590 : : * Make a size estimate for a parameterized scan of a join relation.
5591 : : *
5592 : : * 'rel' is the joinrel under consideration.
5593 : : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5594 : : * produce the relations being joined.
5595 : : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5596 : : * 'restrict_clauses' lists the join clauses that need to be applied at the
5597 : : * join node (including any movable clauses that were moved down to this join,
5598 : : * and not including any movable clauses that were pushed down into the
5599 : : * child paths).
5600 : : *
5601 : : * set_joinrel_size_estimates must have been applied already.
5602 : : */
5603 : : double
5129 5604 : 9437 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5605 : : Path *outer_path,
5606 : : Path *inner_path,
5607 : : SpecialJoinInfo *sjinfo,
5608 : : List *restrict_clauses)
5609 : : {
5610 : : double nrows;
5611 : :
5612 : : /*
5613 : : * Estimate the number of rows returned by the parameterized join as the
5614 : : * sizes of the input paths times the selectivity of the clauses that have
5615 : : * ended up at this join node.
5616 : : *
5617 : : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5618 : : * on the pair of input paths provided, though ideally we'd get the same
5619 : : * estimate for any pair with the same parameterization.
5620 : : */
5621 : 9437 : nrows = calc_joinrel_size_estimate(root,
5622 : : rel,
5623 : : outer_path->parent,
5624 : : inner_path->parent,
5625 : : outer_path->rows,
5626 : : inner_path->rows,
5627 : : sjinfo,
5628 : : restrict_clauses);
5629 : : /* For safety, make sure result is not more than the base estimate */
5630 [ + + ]: 9437 : if (nrows > rel->rows)
5631 : 346 : nrows = rel->rows;
5632 : 9437 : return nrows;
5633 : : }
5634 : :
5635 : : /*
5636 : : * calc_joinrel_size_estimate
5637 : : * Workhorse for set_joinrel_size_estimates and
5638 : : * get_parameterized_joinrel_size.
5639 : : *
5640 : : * outer_rel/inner_rel are the relations being joined, but they should be
5641 : : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5642 : : * than what rel->rows says, when we are considering parameterized paths.
5643 : : */
5644 : : static double
5212 5645 : 217884 : calc_joinrel_size_estimate(PlannerInfo *root,
5646 : : RelOptInfo *joinrel,
5647 : : RelOptInfo *outer_rel,
5648 : : RelOptInfo *inner_rel,
5649 : : double outer_rows,
5650 : : double inner_rows,
5651 : : SpecialJoinInfo *sjinfo,
5652 : : List *restrictlist)
5653 : : {
6473 5654 : 217884 : JoinType jointype = sjinfo->jointype;
5655 : : Selectivity fkselec;
5656 : : Selectivity jselec;
5657 : : Selectivity pselec;
5658 : : double nrows;
5659 : :
5660 : : /*
5661 : : * Compute joinclause selectivity. Note that we are only considering
5662 : : * clauses that become restriction clauses at this join level; we are not
5663 : : * double-counting them because they were not considered in estimating the
5664 : : * sizes of the component rels.
5665 : : *
5666 : : * First, see whether any of the joinclauses can be matched to known FK
5667 : : * constraints. If so, drop those clauses from the restrictlist, and
5668 : : * instead estimate their selectivity using FK semantics. (We do this
5669 : : * without regard to whether said clauses are local or "pushed down".
5670 : : * Probably, an FK-matching clause could never be seen as pushed down at
5671 : : * an outer join, since it would be strict and hence would be grounds for
5672 : : * join strength reduction.) fkselec gets the net selectivity for
5673 : : * FK-matching clauses, or 1.0 if there are none.
5674 : : */
3608 5675 : 217884 : fkselec = get_foreign_key_join_selectivity(root,
5676 : : outer_rel->relids,
5677 : : inner_rel->relids,
5678 : : sjinfo,
5679 : : &restrictlist);
5680 : :
5681 : : /*
5682 : : * For an outer join, we have to distinguish the selectivity of the join's
5683 : : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5684 : : * down". For inner joins we just count them all as joinclauses.
5685 : : */
7116 5686 [ + + ]: 217884 : if (IS_OUTER_JOIN(jointype))
5687 : : {
5688 : 60468 : List *joinquals = NIL;
5689 : 60468 : List *pushedquals = NIL;
5690 : : ListCell *l;
5691 : :
5692 : : /* Grovel through the clauses to separate into two lists */
5693 [ + + + + : 138566 : foreach(l, restrictlist)
+ + ]
5694 : : {
3312 5695 : 78098 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5696 : :
2937 5697 [ + + + + ]: 78098 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
7116 5698 : 5648 : pushedquals = lappend(pushedquals, rinfo);
5699 : : else
5700 : 72450 : joinquals = lappend(joinquals, rinfo);
5701 : : }
5702 : :
5703 : : /* Get the separate selectivities */
3619 5704 : 60468 : jselec = clauselist_selectivity(root,
5705 : : joinquals,
5706 : : 0,
5707 : : jointype,
5708 : : sjinfo);
7116 5709 : 60468 : pselec = clauselist_selectivity(root,
5710 : : pushedquals,
5711 : : 0,
5712 : : jointype,
5713 : : sjinfo);
5714 : :
5715 : : /* Avoid leaking a lot of ListCells */
5716 : 60468 : list_free(joinquals);
5717 : 60468 : list_free(pushedquals);
5718 : : }
5719 : : else
5720 : : {
3619 5721 : 157416 : jselec = clauselist_selectivity(root,
5722 : : restrictlist,
5723 : : 0,
5724 : : jointype,
5725 : : sjinfo);
7116 5726 : 157416 : pselec = 0.0; /* not used, keep compiler quiet */
5727 : : }
5728 : :
5729 : : /*
5730 : : * Basically, we multiply size of Cartesian product by selectivity.
5731 : : *
5732 : : * If we are doing an outer join, take that into account: the joinqual
5733 : : * selectivity has to be clamped using the knowledge that the output must
5734 : : * be at least as large as the non-nullable input. However, any
5735 : : * pushed-down quals are applied after the outer join, so their
5736 : : * selectivity applies fully.
5737 : : *
5738 : : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5739 : : * of LHS rows that have matches, and we apply that straightforwardly.
5740 : : */
9209 5741 [ + + + + : 217884 : switch (jointype)
+ - ]
5742 : : {
5743 : 151037 : case JOIN_INNER:
3608 5744 : 151037 : nrows = outer_rows * inner_rows * fkselec * jselec;
5745 : : /* pselec not used */
9209 5746 : 151037 : break;
5747 : 52389 : case JOIN_LEFT:
3608 5748 : 52389 : nrows = outer_rows * inner_rows * fkselec * jselec;
5212 5749 [ + + ]: 52389 : if (nrows < outer_rows)
5750 : 24431 : nrows = outer_rows;
7116 5751 : 52389 : nrows *= pselec;
9209 5752 : 52389 : break;
5753 : 1390 : case JOIN_FULL:
3608 5754 : 1390 : nrows = outer_rows * inner_rows * fkselec * jselec;
5212 5755 [ + + ]: 1390 : if (nrows < outer_rows)
5756 : 987 : nrows = outer_rows;
5757 [ + + ]: 1390 : if (nrows < inner_rows)
5758 : 100 : nrows = inner_rows;
7116 5759 : 1390 : nrows *= pselec;
9209 5760 : 1390 : break;
6473 5761 : 6379 : case JOIN_SEMI:
3608 5762 : 6379 : nrows = outer_rows * fkselec * jselec;
5763 : : /* pselec not used */
8506 5764 : 6379 : break;
6473 5765 : 6689 : case JOIN_ANTI:
3608 5766 : 6689 : nrows = outer_rows * (1.0 - fkselec * jselec);
6473 5767 : 6689 : nrows *= pselec;
8506 5768 : 6689 : break;
9209 tgl@sss.pgh.pa.us 5769 :UBC 0 : default:
5770 : : /* other values not expected here */
8320 5771 [ # # ]: 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5772 : : nrows = 0; /* keep compiler quiet */
5773 : : break;
5774 : : }
5775 : :
5212 tgl@sss.pgh.pa.us 5776 :CBC 217884 : return clamp_row_est(nrows);
5777 : : }
5778 : :
5779 : : /*
5780 : : * get_foreign_key_join_selectivity
5781 : : * Estimate join selectivity for foreign-key-related clauses.
5782 : : *
5783 : : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5784 : : * and return a substitute estimate of their selectivity. 1.0 is returned
5785 : : * when there are no such clauses.
5786 : : *
5787 : : * The reason for treating such clauses specially is that we can get better
5788 : : * estimates this way than by relying on clauselist_selectivity(), especially
5789 : : * for multi-column FKs where that function's assumption that the clauses are
5790 : : * independent falls down badly. But even with single-column FKs, we may be
5791 : : * able to get a better answer when the pg_statistic stats are missing or out
5792 : : * of date.
5793 : : */
5794 : : static Selectivity
3608 5795 : 217884 : get_foreign_key_join_selectivity(PlannerInfo *root,
5796 : : Relids outer_relids,
5797 : : Relids inner_relids,
5798 : : SpecialJoinInfo *sjinfo,
5799 : : List **restrictlist)
5800 : : {
5801 : 217884 : Selectivity fkselec = 1.0;
5802 : 217884 : JoinType jointype = sjinfo->jointype;
5803 : 217884 : List *worklist = *restrictlist;
5804 : : ListCell *lc;
5805 : :
5806 : : /* Consider each FK constraint that is known to match the query */
5807 [ + + + + : 221264 : foreach(lc, root->fkey_list)
+ + ]
5808 : : {
5809 : 3380 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5810 : : bool ref_is_outer;
5811 : : List *removedlist;
5812 : : ListCell *cell;
5813 : :
5814 : : /*
5815 : : * This FK is not relevant unless it connects a baserel on one side of
5816 : : * this join to a baserel on the other side.
5817 : : */
5818 [ + + + + ]: 5750 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5819 : 2370 : bms_is_member(fkinfo->ref_relid, inner_relids))
5820 : 1548 : ref_is_outer = false;
5821 [ + + + + ]: 2652 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5822 : 820 : bms_is_member(fkinfo->con_relid, inner_relids))
5823 : 255 : ref_is_outer = true;
5824 : : else
5825 : 1577 : continue;
5826 : :
5827 : : /*
5828 : : * If we're dealing with a semi/anti join, and the FK's referenced
5829 : : * relation is on the outside, then knowledge of the FK doesn't help
5830 : : * us figure out what we need to know (which is the fraction of outer
5831 : : * rows that have matches). On the other hand, if the referenced rel
5832 : : * is on the inside, then all outer rows must have matches in the
5833 : : * referenced table (ignoring nulls). But any restriction or join
5834 : : * clauses that filter that table will reduce the fraction of matches.
5835 : : * We can account for restriction clauses, but it's too hard to guess
5836 : : * how many table rows would get through a join that's inside the RHS.
5837 : : * Hence, if either case applies, punt and ignore the FK.
5838 : : */
3242 5839 [ + - + + : 1803 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
+ + ]
5840 [ - + ]: 851 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5841 : 10 : continue;
5842 : :
5843 : : /*
5844 : : * Modify the restrictlist by removing clauses that match the FK (and
5845 : : * putting them into removedlist instead). It seems unsafe to modify
5846 : : * the originally-passed List structure, so we make a shallow copy the
5847 : : * first time through.
5848 : : */
3608 5849 [ + + ]: 1793 : if (worklist == *restrictlist)
5850 : 1605 : worklist = list_copy(worklist);
5851 : :
5852 : 1793 : removedlist = NIL;
2486 5853 [ + + + + : 3706 : foreach(cell, worklist)
+ + ]
5854 : : {
3608 5855 : 1913 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5856 : 1913 : bool remove_it = false;
5857 : : int i;
5858 : :
5859 : : /* Drop this clause if it matches any column of the FK */
5860 [ + + ]: 2276 : for (i = 0; i < fkinfo->nkeys; i++)
5861 : : {
5862 [ + + ]: 2251 : if (rinfo->parent_ec)
5863 : : {
5864 : : /*
5865 : : * EC-derived clauses can only match by EC. It is okay to
5866 : : * consider any clause derived from the same EC as
5867 : : * matching the FK: even if equivclass.c chose to generate
5868 : : * a clause equating some other pair of Vars, it could
5869 : : * have generated one equating the FK's Vars. So for
5870 : : * purposes of estimation, we can act as though it did so.
5871 : : *
5872 : : * Note: checking parent_ec is a bit of a cheat because
5873 : : * there are EC-derived clauses that don't have parent_ec
5874 : : * set; but such clauses must compare expressions that
5875 : : * aren't just Vars, so they cannot match the FK anyway.
5876 : : */
5877 [ + + ]: 787 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5878 : : {
5879 : 782 : remove_it = true;
5880 : 782 : break;
5881 : : }
5882 : : }
5883 : : else
5884 : : {
5885 : : /*
5886 : : * Otherwise, see if rinfo was previously matched to FK as
5887 : : * a "loose" clause.
5888 : : */
5889 [ + + ]: 1464 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5890 : : {
5891 : 1106 : remove_it = true;
5892 : 1106 : break;
5893 : : }
5894 : : }
5895 : : }
5896 [ + + ]: 1913 : if (remove_it)
5897 : : {
2486 5898 : 1888 : worklist = foreach_delete_current(worklist, cell);
3608 5899 : 1888 : removedlist = lappend(removedlist, rinfo);
5900 : : }
5901 : : }
5902 : :
5903 : : /*
5904 : : * If we failed to remove all the matching clauses we expected to
5905 : : * find, chicken out and ignore this FK; applying its selectivity
5906 : : * might result in double-counting. Put any clauses we did manage to
5907 : : * remove back into the worklist.
5908 : : *
5909 : : * Since the matching clauses are known not outerjoin-delayed, they
5910 : : * would normally have appeared in the initial joinclause list. If we
5911 : : * didn't find them, there are two possibilities:
5912 : : *
5913 : : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5914 : : * have generated any join clauses at all. We discount such ECs while
5915 : : * checking to see if we have "all" the clauses. (Below, we'll adjust
5916 : : * the selectivity estimate for this case.)
5917 : : *
5918 : : * 2. The clauses were matched to some other FK in a previous
5919 : : * iteration of this loop, and thus removed from worklist. (A likely
5920 : : * case is that two FKs are matched to the same EC; there will be only
5921 : : * one EC-derived clause in the initial list, so the first FK will
5922 : : * consume it.) Applying both FKs' selectivity independently risks
5923 : : * underestimating the join size; in particular, this would undo one
5924 : : * of the main things that ECs were invented for, namely to avoid
5925 : : * double-counting the selectivity of redundant equality conditions.
5926 : : * Later we might think of a reasonable way to combine the estimates,
5927 : : * but for now, just punt, since this is a fairly uncommon situation.
5928 : : */
2015 5929 [ + + ]: 1793 : if (removedlist == NIL ||
5930 : 1570 : list_length(removedlist) !=
5931 [ - + ]: 1570 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5932 : : {
3608 5933 : 223 : worklist = list_concat(worklist, removedlist);
5934 : 223 : continue;
5935 : : }
5936 : :
5937 : : /*
5938 : : * Finally we get to the payoff: estimate selectivity using the
5939 : : * knowledge that each referencing row will match exactly one row in
5940 : : * the referenced table.
5941 : : *
5942 : : * XXX that's not true in the presence of nulls in the referencing
5943 : : * column(s), so in principle we should derate the estimate for those.
5944 : : * However (1) if there are any strict restriction clauses for the
5945 : : * referencing column(s) elsewhere in the query, derating here would
5946 : : * be double-counting the null fraction, and (2) it's not very clear
5947 : : * how to combine null fractions for multiple referencing columns. So
5948 : : * we do nothing for now about correcting for nulls.
5949 : : *
5950 : : * XXX another point here is that if either side of an FK constraint
5951 : : * is an inheritance parent, we estimate as though the constraint
5952 : : * covers all its children as well. This is not an unreasonable
5953 : : * assumption for a referencing table, ie the user probably applied
5954 : : * identical constraints to all child tables (though perhaps we ought
5955 : : * to check that). But it's not possible to have done that for a
5956 : : * referenced table. Fortunately, precisely because that doesn't
5957 : : * work, it is uncommon in practice to have an FK referencing a parent
5958 : : * table. So, at least for now, disregard inheritance here.
5959 : : */
3242 5960 [ + - + + ]: 1570 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
3608 5961 : 663 : {
5962 : : /*
5963 : : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5964 : : * referenced table is exactly the inside of the join. The join
5965 : : * selectivity is defined as the fraction of LHS rows that have
5966 : : * matches. The FK implies that every LHS row has a match *in the
5967 : : * referenced table*; but any restriction clauses on it will
5968 : : * reduce the number of matches. Hence we take the join
5969 : : * selectivity as equal to the selectivity of the table's
5970 : : * restriction clauses, which is rows / tuples; but we must guard
5971 : : * against tuples == 0.
5972 : : */
3242 5973 : 663 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5974 [ + + ]: 663 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5975 : :
5976 : 663 : fkselec *= ref_rel->rows / ref_tuples;
5977 : : }
5978 : : else
5979 : : {
5980 : : /*
5981 : : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5982 : : * guard against tuples == 0. Note we should use the raw table
5983 : : * tuple count, not any estimate of its filtered or joined size.
5984 : : */
3608 5985 : 907 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5986 [ + - ]: 907 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5987 : :
5988 : 907 : fkselec *= 1.0 / ref_tuples;
5989 : : }
5990 : :
5991 : : /*
5992 : : * If any of the FK columns participated in ec_has_const ECs, then
5993 : : * equivclass.c will have generated "var = const" restrictions for
5994 : : * each side of the join, thus reducing the sizes of both input
5995 : : * relations. Taking the fkselec at face value would amount to
5996 : : * double-counting the selectivity of the constant restriction for the
5997 : : * referencing Var. Hence, look for the restriction clause(s) that
5998 : : * were applied to the referencing Var(s), and divide out their
5999 : : * selectivity to correct for this.
6000 : : */
2015 6001 [ + + ]: 1570 : if (fkinfo->nconst_ec > 0)
6002 : : {
6003 [ + + ]: 20 : for (int i = 0; i < fkinfo->nkeys; i++)
6004 : : {
6005 : 15 : EquivalenceClass *ec = fkinfo->eclass[i];
6006 : :
6007 [ + - + + ]: 15 : if (ec && ec->ec_has_const)
6008 : : {
6009 : 5 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
396 amitlan@postgresql.o 6010 : 5 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(root,
6011 : : ec,
6012 : : em);
6013 : :
2015 tgl@sss.pgh.pa.us 6014 [ + - ]: 5 : if (rinfo)
6015 : : {
6016 : : Selectivity s0;
6017 : :
6018 : 5 : s0 = clause_selectivity(root,
6019 : : (Node *) rinfo,
6020 : : 0,
6021 : : jointype,
6022 : : sjinfo);
6023 [ + - ]: 5 : if (s0 > 0)
6024 : 5 : fkselec /= s0;
6025 : : }
6026 : : }
6027 : : }
6028 : : }
6029 : : }
6030 : :
3608 6031 : 217884 : *restrictlist = worklist;
2015 6032 [ - + - + ]: 217884 : CLAMP_PROBABILITY(fkselec);
3608 6033 : 217884 : return fkselec;
6034 : : }
6035 : :
6036 : : /*
6037 : : * set_subquery_size_estimates
6038 : : * Set the size estimates for a base relation that is a subquery.
6039 : : *
6040 : : * The rel's targetlist and restrictinfo list must have been constructed
6041 : : * already, and the Paths for the subquery must have been completed.
6042 : : * We look at the subquery's PlannerInfo to extract data.
6043 : : *
6044 : : * We set the same fields as set_baserel_size_estimates.
6045 : : */
6046 : : void
5358 6047 : 30756 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6048 : : {
6049 : 30756 : PlannerInfo *subroot = rel->subroot;
6050 : : RelOptInfo *sub_final_rel;
6051 : : ListCell *lc;
6052 : :
6053 : : /* Should only be applied to base relations that are subqueries */
5646 6054 [ - + ]: 30756 : Assert(rel->relid > 0);
3148 andrew@dunslane.net 6055 [ + - - + ]: 30756 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
6056 : :
6057 : : /*
6058 : : * Copy raw number of output rows from subquery. All of its paths should
6059 : : * have the same output rowcount, so just look at cheapest-total.
6060 : : */
3711 tgl@sss.pgh.pa.us 6061 : 30756 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
6062 : 30756 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
6063 : :
6064 : : /*
6065 : : * Compute per-output-column width estimates by examining the subquery's
6066 : : * targetlist. For any output that is a plain Var, get the width estimate
6067 : : * that was made while planning the subquery. Otherwise, we leave it to
6068 : : * set_rel_width to fill in a datatype-based default estimate.
6069 : : */
5646 6070 [ + + + + : 152310 : foreach(lc, subroot->parse->targetList)
+ + ]
6071 : : {
3312 6072 : 121554 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5646 6073 : 121554 : Node *texpr = (Node *) te->expr;
5369 6074 : 121554 : int32 item_width = 0;
6075 : :
6076 : : /* junk columns aren't visible to upper query */
5646 6077 [ + + ]: 121554 : if (te->resjunk)
6078 : 3848 : continue;
6079 : :
6080 : : /*
6081 : : * The subquery could be an expansion of a view that's had columns
6082 : : * added to it since the current query was parsed, so that there are
6083 : : * non-junk tlist columns in it that don't correspond to any column
6084 : : * visible at our query level. Ignore such columns.
6085 : : */
4783 6086 [ + - - + ]: 117706 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4783 tgl@sss.pgh.pa.us 6087 :UBC 0 : continue;
6088 : :
6089 : : /*
6090 : : * XXX This currently doesn't work for subqueries containing set
6091 : : * operations, because the Vars in their tlists are bogus references
6092 : : * to the first leaf subquery, which wouldn't give the right answer
6093 : : * even if we could still get to its PlannerInfo.
6094 : : *
6095 : : * Also, the subquery could be an appendrel for which all branches are
6096 : : * known empty due to constraint exclusion, in which case
6097 : : * set_append_rel_pathlist will have left the attr_widths set to zero.
6098 : : *
6099 : : * In either case, we just leave the width estimate zero until
6100 : : * set_rel_width fixes it.
6101 : : */
5646 tgl@sss.pgh.pa.us 6102 [ + + ]:CBC 117706 : if (IsA(texpr, Var) &&
6103 [ + + ]: 51746 : subroot->parse->setOperations == NULL)
6104 : : {
5504 bruce@momjian.us 6105 : 49499 : Var *var = (Var *) texpr;
5646 tgl@sss.pgh.pa.us 6106 : 49499 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
6107 : :
6108 : 49499 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
6109 : : }
6110 : 117706 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
6111 : : }
6112 : :
6113 : : /* Now estimate number of output rows, etc */
6114 : 30756 : set_baserel_size_estimates(root, rel);
6115 : 30756 : }
6116 : :
6117 : : /*
6118 : : * set_function_size_estimates
6119 : : * Set the size estimates for a base relation that is a function call.
6120 : : *
6121 : : * The rel's targetlist and restrictinfo list must have been constructed
6122 : : * already.
6123 : : *
6124 : : * We set the same fields as set_baserel_size_estimates.
6125 : : */
6126 : : void
7639 6127 : 35764 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6128 : : {
6129 : : RangeTblEntry *rte;
6130 : : ListCell *lc;
6131 : :
6132 : : /* Should only be applied to base relations that are functions */
8487 6133 [ - + ]: 35764 : Assert(rel->relid > 0);
6954 6134 [ + - ]: 35764 : rte = planner_rt_fetch(rel->relid, root);
7517 6135 [ - + ]: 35764 : Assert(rte->rtekind == RTE_FUNCTION);
6136 : :
6137 : : /*
6138 : : * Estimate number of rows the functions will return. The rowcount of the
6139 : : * node is that of the largest function result.
6140 : : */
4548 6141 : 35764 : rel->tuples = 0;
6142 [ + - + + : 71781 : foreach(lc, rte->functions)
+ + ]
6143 : : {
6144 : 36017 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
2642 6145 : 36017 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
6146 : :
4548 6147 [ + + ]: 36017 : if (ntup > rel->tuples)
6148 : 35784 : rel->tuples = ntup;
6149 : : }
6150 : :
6151 : : /* Now estimate number of output rows, etc */
8156 6152 : 35764 : set_baserel_size_estimates(root, rel);
8759 6153 : 35764 : }
6154 : :
6155 : : /*
6156 : : * set_function_size_estimates
6157 : : * Set the size estimates for a base relation that is a function call.
6158 : : *
6159 : : * The rel's targetlist and restrictinfo list must have been constructed
6160 : : * already.
6161 : : *
6162 : : * We set the same fields as set_tablefunc_size_estimates.
6163 : : */
6164 : : void
3345 alvherre@alvh.no-ip. 6165 : 517 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6166 : : {
6167 : : /* Should only be applied to base relations that are functions */
6168 [ - + ]: 517 : Assert(rel->relid > 0);
3148 andrew@dunslane.net 6169 [ + - - + ]: 517 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
6170 : :
3345 alvherre@alvh.no-ip. 6171 : 517 : rel->tuples = 100;
6172 : :
6173 : : /* Now estimate number of output rows, etc */
6174 : 517 : set_baserel_size_estimates(root, rel);
6175 : 517 : }
6176 : :
6177 : : /*
6178 : : * set_values_size_estimates
6179 : : * Set the size estimates for a base relation that is a values list.
6180 : : *
6181 : : * The rel's targetlist and restrictinfo list must have been constructed
6182 : : * already.
6183 : : *
6184 : : * We set the same fields as set_baserel_size_estimates.
6185 : : */
6186 : : void
7216 mail@joeconway.com 6187 : 6858 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6188 : : {
6189 : : RangeTblEntry *rte;
6190 : :
6191 : : /* Should only be applied to base relations that are values lists */
6192 [ - + ]: 6858 : Assert(rel->relid > 0);
6954 tgl@sss.pgh.pa.us 6193 [ + - ]: 6858 : rte = planner_rt_fetch(rel->relid, root);
7216 mail@joeconway.com 6194 [ - + ]: 6858 : Assert(rte->rtekind == RTE_VALUES);
6195 : :
6196 : : /*
6197 : : * Estimate number of rows the values list will return. We know this
6198 : : * precisely based on the list length (well, barring set-returning
6199 : : * functions in list items, but that's a refinement not catered for
6200 : : * anywhere else either).
6201 : : */
6202 : 6858 : rel->tuples = list_length(rte->values_lists);
6203 : :
6204 : : /* Now estimate number of output rows, etc */
6205 : 6858 : set_baserel_size_estimates(root, rel);
6206 : 6858 : }
6207 : :
6208 : : /*
6209 : : * set_cte_size_estimates
6210 : : * Set the size estimates for a base relation that is a CTE reference.
6211 : : *
6212 : : * The rel's targetlist and restrictinfo list must have been constructed
6213 : : * already, and we need an estimate of the number of rows returned by the CTE
6214 : : * (if a regular CTE) or the non-recursive term (if a self-reference).
6215 : : *
6216 : : * We set the same fields as set_baserel_size_estimates.
6217 : : */
6218 : : void
3711 tgl@sss.pgh.pa.us 6219 : 3607 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
6220 : : {
6221 : : RangeTblEntry *rte;
6222 : :
6223 : : /* Should only be applied to base relations that are CTE references */
6422 6224 [ - + ]: 3607 : Assert(rel->relid > 0);
6225 [ + - ]: 3607 : rte = planner_rt_fetch(rel->relid, root);
6226 [ - + ]: 3607 : Assert(rte->rtekind == RTE_CTE);
6227 : :
6228 [ + + ]: 3607 : if (rte->self_reference)
6229 : : {
6230 : : /*
6231 : : * In a self-reference, we assume the average worktable size is a
6232 : : * multiple of the nonrecursive term's size. The best multiplier will
6233 : : * vary depending on query "fan-out", so make its value adjustable.
6234 : : */
1503 6235 : 694 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
6236 : : }
6237 : : else
6238 : : {
6239 : : /* Otherwise just believe the CTE's rowcount estimate */
3711 6240 : 2913 : rel->tuples = cte_rows;
6241 : : }
6242 : :
6243 : : /* Now estimate number of output rows, etc */
6422 6244 : 3607 : set_baserel_size_estimates(root, rel);
6245 : 3607 : }
6246 : :
6247 : : /*
6248 : : * set_namedtuplestore_size_estimates
6249 : : * Set the size estimates for a base relation that is a tuplestore reference.
6250 : : *
6251 : : * The rel's targetlist and restrictinfo list must have been constructed
6252 : : * already.
6253 : : *
6254 : : * We set the same fields as set_baserel_size_estimates.
6255 : : */
6256 : : void
3322 kgrittn@postgresql.o 6257 : 438 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6258 : : {
6259 : : RangeTblEntry *rte;
6260 : :
6261 : : /* Should only be applied to base relations that are tuplestore references */
6262 [ - + ]: 438 : Assert(rel->relid > 0);
6263 [ + - ]: 438 : rte = planner_rt_fetch(rel->relid, root);
6264 [ - + ]: 438 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6265 : :
6266 : : /*
6267 : : * Use the estimate provided by the code which is generating the named
6268 : : * tuplestore. In some cases, the actual number might be available; in
6269 : : * others the same plan will be re-used, so a "typical" value might be
6270 : : * estimated and used.
6271 : : */
6272 : 438 : rel->tuples = rte->enrtuples;
6273 [ - + ]: 438 : if (rel->tuples < 0)
3322 kgrittn@postgresql.o 6274 :UBC 0 : rel->tuples = 1000;
6275 : :
6276 : : /* Now estimate number of output rows, etc */
3322 kgrittn@postgresql.o 6277 :CBC 438 : set_baserel_size_estimates(root, rel);
6278 : 438 : }
6279 : :
6280 : : /*
6281 : : * set_result_size_estimates
6282 : : * Set the size estimates for an RTE_RESULT base relation
6283 : : *
6284 : : * The rel's targetlist and restrictinfo list must have been constructed
6285 : : * already.
6286 : : *
6287 : : * We set the same fields as set_baserel_size_estimates.
6288 : : */
6289 : : void
2654 tgl@sss.pgh.pa.us 6290 : 3616 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6291 : : {
6292 : : /* Should only be applied to RTE_RESULT base relations */
6293 [ - + ]: 3616 : Assert(rel->relid > 0);
6294 [ + - - + ]: 3616 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6295 : :
6296 : : /* RTE_RESULT always generates a single row, natively */
6297 : 3616 : rel->tuples = 1;
6298 : :
6299 : : /* Now estimate number of output rows, etc */
6300 : 3616 : set_baserel_size_estimates(root, rel);
6301 : 3616 : }
6302 : :
6303 : : /*
6304 : : * set_foreign_size_estimates
6305 : : * Set the size estimates for a base relation that is a foreign table.
6306 : : *
6307 : : * There is not a whole lot that we can do here; the foreign-data wrapper
6308 : : * is responsible for producing useful estimates. We can do a decent job
6309 : : * of estimating baserestrictcost, so we set that, and we also set up width
6310 : : * using what will be purely datatype-driven estimates from the targetlist.
6311 : : * There is no way to do anything sane with the rows value, so we just put
6312 : : * a default estimate and hope that the wrapper can improve on it. The
6313 : : * wrapper's GetForeignRelSize function will be called momentarily.
6314 : : *
6315 : : * The rel's targetlist and restrictinfo list must have been constructed
6316 : : * already.
6317 : : */
6318 : : void
5553 6319 : 1257 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6320 : : {
6321 : : /* Should only be applied to base relations */
6322 [ - + ]: 1257 : Assert(rel->relid > 0);
6323 : :
6324 : 1257 : rel->rows = 1000; /* entirely bogus default estimate */
6325 : :
6326 : 1257 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6327 : :
6328 : 1257 : set_rel_width(root, rel);
6329 : 1257 : }
6330 : :
6331 : :
6332 : : /*
6333 : : * set_rel_width
6334 : : * Set the estimated output width of a base relation.
6335 : : *
6336 : : * The estimated output width is the sum of the per-attribute width estimates
6337 : : * for the actually-referenced columns, plus any PHVs or other expressions
6338 : : * that have to be calculated at this relation. This is the amount of data
6339 : : * we'd need to pass upwards in case of a sort, hash, etc.
6340 : : *
6341 : : * This function also sets reltarget->cost, so it's a bit misnamed now.
6342 : : *
6343 : : * NB: this works best on plain relations because it prefers to look at
6344 : : * real Vars. For subqueries, set_subquery_size_estimates will already have
6345 : : * copied up whatever per-column estimates were made within the subquery,
6346 : : * and for other types of rels there isn't much we can do anyway. We fall
6347 : : * back on (fairly stupid) datatype-based width estimates if we can't get
6348 : : * any better number.
6349 : : *
6350 : : * The per-attribute width estimates are cached for possible re-use while
6351 : : * building join relations or post-scan/join pathtargets.
6352 : : */
6353 : : static void
7639 6354 : 407437 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6355 : : {
6409 6356 [ + - ]: 407437 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
868 6357 : 407437 : int64 tuple_width = 0;
5646 6358 : 407437 : bool have_wholerow_var = false;
6359 : : ListCell *lc;
6360 : :
6361 : : /* Vars are assumed to have cost zero, but other exprs do not */
3704 6362 : 407437 : rel->reltarget->cost.startup = 0;
6363 : 407437 : rel->reltarget->cost.per_tuple = 0;
6364 : :
6365 [ + + + + : 1465752 : foreach(lc, rel->reltarget->exprs)
+ + ]
6366 : : {
6405 6367 : 1058315 : Node *node = (Node *) lfirst(lc);
6368 : :
6369 : : /*
6370 : : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6371 : : * but there are corner cases involving LATERAL references where that
6372 : : * isn't so. If the Var has the wrong varno, fall through to the
6373 : : * generic case (it doesn't seem worth the trouble to be any smarter).
6374 : : */
5000 6375 [ + + ]: 1058315 : if (IsA(node, Var) &&
6376 [ + + ]: 1038324 : ((Var *) node)->varno == rel->relid)
8004 6377 : 271269 : {
6405 6378 : 1038249 : Var *var = (Var *) node;
6379 : : int ndx;
6380 : : int32 item_width;
6381 : :
6382 [ - + ]: 1038249 : Assert(var->varattno >= rel->min_attr);
6383 [ - + ]: 1038249 : Assert(var->varattno <= rel->max_attr);
6384 : :
6385 : 1038249 : ndx = var->varattno - rel->min_attr;
6386 : :
6387 : : /*
6388 : : * If it's a whole-row Var, we'll deal with it below after we have
6389 : : * already cached as many attr widths as possible.
6390 : : */
5646 6391 [ + + ]: 1038249 : if (var->varattno == 0)
6392 : : {
6393 : 2126 : have_wholerow_var = true;
6394 : 2126 : continue;
6395 : : }
6396 : :
6397 : : /*
6398 : : * The width may have been cached already (especially if it's a
6399 : : * subquery), so don't duplicate effort.
6400 : : */
6405 6401 [ + + ]: 1036123 : if (rel->attr_widths[ndx] > 0)
6402 : : {
6403 : 217294 : tuple_width += rel->attr_widths[ndx];
8346 6404 : 217294 : continue;
6405 : : }
6406 : :
6407 : : /* Try to get column width from statistics */
5646 6408 [ + + + + ]: 818829 : if (reloid != InvalidOid && var->varattno > 0)
6409 : : {
6405 6410 : 654474 : item_width = get_attavgwidth(reloid, var->varattno);
6411 [ + + ]: 654474 : if (item_width > 0)
6412 : : {
6413 : 547560 : rel->attr_widths[ndx] = item_width;
6414 : 547560 : tuple_width += item_width;
6415 : 547560 : continue;
6416 : : }
6417 : : }
6418 : :
6419 : : /*
6420 : : * Not a plain relation, or can't find statistics for it. Estimate
6421 : : * using just the type info.
6422 : : */
6423 : 271269 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6424 [ - + ]: 271269 : Assert(item_width > 0);
6425 : 271269 : rel->attr_widths[ndx] = item_width;
6426 : 271269 : tuple_width += item_width;
6427 : : }
6428 [ + + ]: 20066 : else if (IsA(node, PlaceHolderVar))
6429 : : {
6430 : : /*
6431 : : * We will need to evaluate the PHV's contained expression while
6432 : : * scanning this rel, so be sure to include it in reltarget->cost.
6433 : : */
6434 : 1870 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
1357 6435 : 1870 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6436 : : QualCost cost;
6437 : :
6405 6438 : 1870 : tuple_width += phinfo->ph_width;
3729 6439 : 1870 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
3704 6440 : 1870 : rel->reltarget->cost.startup += cost.startup;
6441 : 1870 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6442 : : }
6443 : : else
6444 : : {
6445 : : /*
6446 : : * We could be looking at an expression pulled up from a subquery,
6447 : : * or a ROW() representing a whole-row child Var, etc. Do what we
6448 : : * can using the expression type information.
6449 : : */
6450 : : int32 item_width;
6451 : : QualCost cost;
6452 : :
6142 6453 : 18196 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6454 [ - + ]: 18196 : Assert(item_width > 0);
6455 : 18196 : tuple_width += item_width;
6456 : : /* Not entirely clear if we need to account for cost, but do so */
3729 6457 : 18196 : cost_qual_eval_node(&cost, node, root);
3704 6458 : 18196 : rel->reltarget->cost.startup += cost.startup;
6459 : 18196 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6460 : : }
6461 : : }
6462 : :
6463 : : /*
6464 : : * If we have a whole-row reference, estimate its width as the sum of
6465 : : * per-column widths plus heap tuple header overhead.
6466 : : */
5646 6467 [ + + ]: 407437 : if (have_wholerow_var)
6468 : : {
868 6469 : 2126 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6470 : :
5646 6471 [ + + ]: 2126 : if (reloid != InvalidOid)
6472 : : {
6473 : : /* Real relation, so estimate true tuple width */
6474 : 1617 : wholerow_width += get_relation_data_width(reloid,
3240 6475 : 1617 : rel->attr_widths - rel->min_attr);
6476 : : }
6477 : : else
6478 : : {
6479 : : /* Do what we can with info for a phony rel */
6480 : : AttrNumber i;
6481 : :
5646 6482 [ + + ]: 1386 : for (i = 1; i <= rel->max_attr; i++)
6483 : 877 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6484 : : }
6485 : :
868 6486 : 2126 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6487 : :
6488 : : /*
6489 : : * Include the whole-row Var as part of the output tuple. Yes, that
6490 : : * really is what happens at runtime.
6491 : : */
5646 6492 : 2126 : tuple_width += wholerow_width;
6493 : : }
6494 : :
868 6495 : 407437 : rel->reltarget->width = clamp_width_est(tuple_width);
10892 scrappy@hub.org 6496 : 407437 : }
6497 : :
6498 : : /*
6499 : : * set_pathtarget_cost_width
6500 : : * Set the estimated eval cost and output width of a PathTarget tlist.
6501 : : *
6502 : : * As a notational convenience, returns the same PathTarget pointer passed in.
6503 : : *
6504 : : * Most, though not quite all, uses of this function occur after we've run
6505 : : * set_rel_width() for base relations; so we can usually obtain cached width
6506 : : * estimates for Vars. If we can't, fall back on datatype-based width
6507 : : * estimates. Present early-planning uses of PathTargets don't need accurate
6508 : : * widths badly enough to justify going to the catalogs for better data.
6509 : : */
6510 : : PathTarget *
3711 tgl@sss.pgh.pa.us 6511 : 472480 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6512 : : {
868 6513 : 472480 : int64 tuple_width = 0;
6514 : : ListCell *lc;
6515 : :
6516 : : /* Vars are assumed to have cost zero, but other exprs do not */
3711 6517 : 472480 : target->cost.startup = 0;
6518 : 472480 : target->cost.per_tuple = 0;
6519 : :
6520 [ + + + + : 1662052 : foreach(lc, target->exprs)
+ + ]
6521 : : {
6522 : 1189572 : Node *node = (Node *) lfirst(lc);
6523 : :
1142 drowley@postgresql.o 6524 : 1189572 : tuple_width += get_expr_width(root, node);
6525 : :
6526 : : /* For non-Vars, account for evaluation cost */
6527 [ + + ]: 1189572 : if (!IsA(node, Var))
6528 : : {
6529 : : QualCost cost;
6530 : :
3711 tgl@sss.pgh.pa.us 6531 : 506828 : cost_qual_eval_node(&cost, node, root);
6532 : 506828 : target->cost.startup += cost.startup;
6533 : 506828 : target->cost.per_tuple += cost.per_tuple;
6534 : : }
6535 : : }
6536 : :
868 6537 : 472480 : target->width = clamp_width_est(tuple_width);
6538 : :
3711 6539 : 472480 : return target;
6540 : : }
6541 : :
6542 : : /*
6543 : : * get_expr_width
6544 : : * Estimate the width of the given expr attempting to use the width
6545 : : * cached in a Var's owning RelOptInfo, else fallback on the type's
6546 : : * average width when unable to or when the given Node is not a Var.
6547 : : */
6548 : : static int32
1142 drowley@postgresql.o 6549 : 1419284 : get_expr_width(PlannerInfo *root, const Node *expr)
6550 : : {
6551 : : int32 width;
6552 : :
6553 [ + + ]: 1419284 : if (IsA(expr, Var))
6554 : : {
6555 : 903776 : const Var *var = (const Var *) expr;
6556 : :
6557 : : /* We should not see any upper-level Vars here */
6558 [ - + ]: 903776 : Assert(var->varlevelsup == 0);
6559 : :
6560 : : /* Try to get data from RelOptInfo cache */
6561 [ + + ]: 903776 : if (!IS_SPECIAL_VARNO(var->varno) &&
6562 [ + - ]: 899159 : var->varno < root->simple_rel_array_size)
6563 : : {
6564 : 899159 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6565 : :
6566 [ + + ]: 899159 : if (rel != NULL &&
6567 [ + - ]: 884546 : var->varattno >= rel->min_attr &&
6568 [ + - ]: 884546 : var->varattno <= rel->max_attr)
6569 : : {
6570 : 884546 : int ndx = var->varattno - rel->min_attr;
6571 : :
6572 [ + + ]: 884546 : if (rel->attr_widths[ndx] > 0)
6573 : 857108 : return rel->attr_widths[ndx];
6574 : : }
6575 : : }
6576 : :
6577 : : /*
6578 : : * No cached data available, so estimate using just the type info.
6579 : : */
6580 : 46668 : width = get_typavgwidth(var->vartype, var->vartypmod);
6581 [ - + ]: 46668 : Assert(width > 0);
6582 : :
6583 : 46668 : return width;
6584 : : }
6585 : :
6586 : 515508 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6587 [ - + ]: 515508 : Assert(width > 0);
6588 : 515508 : return width;
6589 : : }
6590 : :
6591 : : /*
6592 : : * relation_byte_size
6593 : : * Estimate the storage space in bytes for a given number of tuples
6594 : : * of a given width (size in bytes).
6595 : : */
6596 : : static double
9613 tgl@sss.pgh.pa.us 6597 : 3817807 : relation_byte_size(double tuples, int width)
6598 : : {
4091 6599 : 3817807 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6600 : : }
6601 : :
6602 : : /*
6603 : : * page_size
6604 : : * Returns an estimate of the number of pages covered by a given
6605 : : * number of tuples of a given width (size in bytes).
6606 : : */
6607 : : static double
9613 6608 : 7034 : page_size(double tuples, int width)
6609 : : {
6610 : 7034 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6611 : : }
6612 : :
6613 : : /*
6614 : : * Estimate the fraction of the work that each worker will do given the
6615 : : * number of workers budgeted for the path.
6616 : : */
6617 : : static double
3399 rhaas@postgresql.org 6618 : 373744 : get_parallel_divisor(Path *path)
6619 : : {
6620 : 373744 : double parallel_divisor = path->parallel_workers;
6621 : :
6622 : : /*
6623 : : * Early experience with parallel query suggests that when there is only
6624 : : * one worker, the leader often makes a very substantial contribution to
6625 : : * executing the parallel portion of the plan, but as more workers are
6626 : : * added, it does less and less, because it's busy reading tuples from the
6627 : : * workers and doing whatever non-parallel post-processing is needed. By
6628 : : * the time we reach 4 workers, the leader no longer makes a meaningful
6629 : : * contribution. Thus, for now, estimate that the leader spends 30% of
6630 : : * its time servicing each worker, and the remainder executing the
6631 : : * parallel plan.
6632 : : */
3093 6633 [ + + ]: 373744 : if (parallel_leader_participation)
6634 : : {
6635 : : double leader_contribution;
6636 : :
6637 : 372739 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6638 [ + + ]: 372739 : if (leader_contribution > 0)
6639 : 370588 : parallel_divisor += leader_contribution;
6640 : : }
6641 : :
3399 6642 : 373744 : return parallel_divisor;
6643 : : }
6644 : :
6645 : : /*
6646 : : * compute_bitmap_pages
6647 : : * Estimate number of pages fetched from heap in a bitmap heap scan.
6648 : : *
6649 : : * 'baserel' is the relation to be scanned
6650 : : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6651 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
6652 : : * estimates of caching behavior
6653 : : *
6654 : : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6655 : : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6656 : : */
6657 : : double
869 tgl@sss.pgh.pa.us 6658 : 561582 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6659 : : Path *bitmapqual, double loop_count,
6660 : : Cost *cost_p, double *tuples_p)
6661 : : {
6662 : : Cost indexTotalCost;
6663 : : Selectivity indexSelectivity;
6664 : : double T;
6665 : : double pages_fetched;
6666 : : double tuples_fetched;
6667 : : double heap_pages;
6668 : : double maxentries;
6669 : :
6670 : : /*
6671 : : * Fetch total cost of obtaining the bitmap, as well as its total
6672 : : * selectivity.
6673 : : */
3385 rhaas@postgresql.org 6674 : 561582 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6675 : :
6676 : : /*
6677 : : * Estimate number of main-table pages fetched.
6678 : : */
6679 : 561582 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6680 : :
6681 [ + + ]: 561582 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6682 : :
6683 : : /*
6684 : : * For a single scan, the number of heap pages that need to be fetched is
6685 : : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6686 : : * re-reads needed).
6687 : : */
3098 6688 : 561582 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6689 : :
6690 : : /*
6691 : : * Calculate the number of pages fetched from the heap. Then based on
6692 : : * current work_mem estimate get the estimated maxentries in the bitmap.
6693 : : * (Note that we always do this calculation based on the number of pages
6694 : : * that would be fetched in a single iteration, even if loop_count > 1.
6695 : : * That's correct, because only that number of entries will be stored in
6696 : : * the bitmap at one time.)
6697 : : */
6698 [ + + ]: 561582 : heap_pages = Min(pages_fetched, baserel->pages);
459 tgl@sss.pgh.pa.us 6699 : 561582 : maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
6700 : :
3385 rhaas@postgresql.org 6701 [ + + ]: 561582 : if (loop_count > 1)
6702 : : {
6703 : : /*
6704 : : * For repeated bitmap scans, scale up the number of tuples fetched in
6705 : : * the Mackert and Lohman formula by the number of scans, so that we
6706 : : * estimate the number of pages fetched by all the scans. Then
6707 : : * pro-rate for one scan.
6708 : : */
6709 : 127633 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6710 : : baserel->pages,
6711 : : get_indexpath_pages(bitmapqual),
6712 : : root);
6713 : 127633 : pages_fetched /= loop_count;
6714 : : }
6715 : :
6716 [ + + ]: 561582 : if (pages_fetched >= T)
6717 : 55945 : pages_fetched = T;
6718 : : else
6719 : 505637 : pages_fetched = ceil(pages_fetched);
6720 : :
3098 6721 [ + + ]: 561582 : if (maxentries < heap_pages)
6722 : : {
6723 : : double exact_pages;
6724 : : double lossy_pages;
6725 : :
6726 : : /*
6727 : : * Crude approximation of the number of lossy pages. Because of the
6728 : : * way tbm_lossify() is coded, the number of lossy pages increases
6729 : : * very sharply as soon as we run short of memory; this formula has
6730 : : * that property and seems to perform adequately in testing, but it's
6731 : : * possible we could do better somehow.
6732 : : */
6733 [ - + ]: 15 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6734 : 15 : exact_pages = heap_pages - lossy_pages;
6735 : :
6736 : : /*
6737 : : * If there are lossy pages then recompute the number of tuples
6738 : : * processed by the bitmap heap node. We assume here that the chance
6739 : : * of a given tuple coming from an exact page is the same as the
6740 : : * chance that a given page is exact. This might not be true, but
6741 : : * it's not clear how we can do any better.
6742 : : */
6743 [ + - ]: 15 : if (lossy_pages > 0)
6744 : : tuples_fetched =
6745 : 15 : clamp_row_est(indexSelectivity *
6746 : 15 : (exact_pages / heap_pages) * baserel->tuples +
6747 : 15 : (lossy_pages / heap_pages) * baserel->tuples);
6748 : : }
6749 : :
869 tgl@sss.pgh.pa.us 6750 [ + + ]: 561582 : if (cost_p)
6751 : 449641 : *cost_p = indexTotalCost;
6752 [ + + ]: 561582 : if (tuples_p)
6753 : 449641 : *tuples_p = tuples_fetched;
6754 : :
3385 rhaas@postgresql.org 6755 : 561582 : return pages_fetched;
6756 : : }
6757 : :
6758 : : /*
6759 : : * compute_gather_rows
6760 : : * Estimate number of rows for gather (merge) nodes.
6761 : : *
6762 : : * In a parallel plan, each worker's row estimate is determined by dividing the
6763 : : * total number of rows by parallel_divisor, which accounts for the leader's
6764 : : * contribution in addition to the number of workers. Accordingly, when
6765 : : * estimating the number of rows for gather (merge) nodes, we multiply the rows
6766 : : * per worker by the same parallel_divisor to undo the division.
6767 : : */
6768 : : double
651 rguo@postgresql.org 6769 : 37798 : compute_gather_rows(Path *path)
6770 : : {
6771 [ - + ]: 37798 : Assert(path->parallel_workers > 0);
6772 : :
6773 : 37798 : return clamp_row_est(path->rows * get_parallel_divisor(path));
6774 : : }
|