Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeAgg.c
4 : : * Routines to handle aggregate nodes.
5 : : *
6 : : * ExecAgg normally evaluates each aggregate in the following steps:
7 : : *
8 : : * transvalue = initcond
9 : : * foreach input_tuple do
10 : : * transvalue = transfunc(transvalue, input_value(s))
11 : : * result = finalfunc(transvalue, direct_argument(s))
12 : : *
13 : : * If a finalfunc is not supplied then the result is just the ending
14 : : * value of transvalue.
15 : : *
16 : : * Other behaviors can be selected by the "aggsplit" mode, which exists
17 : : * to support partial aggregation. It is possible to:
18 : : * * Skip running the finalfunc, so that the output is always the
19 : : * final transvalue state.
20 : : * * Substitute the combinefunc for the transfunc, so that transvalue
21 : : * states (propagated up from a child partial-aggregation step) are merged
22 : : * rather than processing raw input rows. (The statements below about
23 : : * the transfunc apply equally to the combinefunc, when it's selected.)
24 : : * * Apply the serializefunc to the output values (this only makes sense
25 : : * when skipping the finalfunc, since the serializefunc works on the
26 : : * transvalue data type).
27 : : * * Apply the deserializefunc to the input values (this only makes sense
28 : : * when using the combinefunc, for similar reasons).
29 : : * It is the planner's responsibility to connect up Agg nodes using these
30 : : * alternate behaviors in a way that makes sense, with partial aggregation
31 : : * results being fed to nodes that expect them.
32 : : *
33 : : * If a normal aggregate call specifies DISTINCT or ORDER BY, we sort the
34 : : * input tuples and eliminate duplicates (if required) before performing
35 : : * the above-depicted process. (However, we don't do that for ordered-set
36 : : * aggregates; their "ORDER BY" inputs are ordinary aggregate arguments
37 : : * so far as this module is concerned.) Note that partial aggregation
38 : : * is not supported in these cases, since we couldn't ensure global
39 : : * ordering or distinctness of the inputs.
40 : : *
41 : : * If transfunc is marked "strict" in pg_proc and initcond is NULL,
42 : : * then the first non-NULL input_value is assigned directly to transvalue,
43 : : * and transfunc isn't applied until the second non-NULL input_value.
44 : : * The agg's first input type and transtype must be the same in this case!
45 : : *
46 : : * If transfunc is marked "strict" then NULL input_values are skipped,
47 : : * keeping the previous transvalue. If transfunc is not strict then it
48 : : * is called for every input tuple and must deal with NULL initcond
49 : : * or NULL input_values for itself.
50 : : *
51 : : * If finalfunc is marked "strict" then it is not called when the
52 : : * ending transvalue is NULL, instead a NULL result is created
53 : : * automatically (this is just the usual handling of strict functions,
54 : : * of course). A non-strict finalfunc can make its own choice of
55 : : * what to return for a NULL ending transvalue.
56 : : *
57 : : * Ordered-set aggregates are treated specially in one other way: we
58 : : * evaluate any "direct" arguments and pass them to the finalfunc along
59 : : * with the transition value.
60 : : *
61 : : * A finalfunc can have additional arguments beyond the transvalue and
62 : : * any "direct" arguments, corresponding to the input arguments of the
63 : : * aggregate. These are always just passed as NULL. Such arguments may be
64 : : * needed to allow resolution of a polymorphic aggregate's result type.
65 : : *
66 : : * We compute aggregate input expressions and run the transition functions
67 : : * in a temporary econtext (aggstate->tmpcontext). This is reset at least
68 : : * once per input tuple, so when the transvalue datatype is
69 : : * pass-by-reference, we have to be careful to copy it into a longer-lived
70 : : * memory context, and free the prior value to avoid memory leakage. We
71 : : * store transvalues in another set of econtexts, aggstate->aggcontexts
72 : : * (one per grouping set, see below), which are also used for the hashtable
73 : : * structures in AGG_HASHED mode. These econtexts are rescanned, not just
74 : : * reset, at group boundaries so that aggregate transition functions can
75 : : * register shutdown callbacks via AggRegisterCallback.
76 : : *
77 : : * The node's regular econtext (aggstate->ss.ps.ps_ExprContext) is used to
78 : : * run finalize functions and compute the output tuple; this context can be
79 : : * reset once per output tuple.
80 : : *
81 : : * The executor's AggState node is passed as the fmgr "context" value in
82 : : * all transfunc and finalfunc calls. It is not recommended that the
83 : : * transition functions look at the AggState node directly, but they can
84 : : * use AggCheckCallContext() to verify that they are being called by
85 : : * nodeAgg.c (and not as ordinary SQL functions). The main reason a
86 : : * transition function might want to know this is so that it can avoid
87 : : * palloc'ing a fixed-size pass-by-ref transition value on every call:
88 : : * it can instead just scribble on and return its left input. Ordinarily
89 : : * it is completely forbidden for functions to modify pass-by-ref inputs,
90 : : * but in the aggregate case we know the left input is either the initial
91 : : * transition value or a previous function result, and in either case its
92 : : * value need not be preserved. See int8inc() for an example. Notice that
93 : : * the EEOP_AGG_PLAIN_TRANS step is coded to avoid a data copy step when
94 : : * the previous transition value pointer is returned. It is also possible
95 : : * to avoid repeated data copying when the transition value is an expanded
96 : : * object: to do that, the transition function must take care to return
97 : : * an expanded object that is in a child context of the memory context
98 : : * returned by AggCheckCallContext(). Also, some transition functions want
99 : : * to store working state in addition to the nominal transition value; they
100 : : * can use the memory context returned by AggCheckCallContext() to do that.
101 : : *
102 : : * Note: AggCheckCallContext() is available as of PostgreSQL 9.0. The
103 : : * AggState is available as context in earlier releases (back to 8.1),
104 : : * but direct examination of the node is needed to use it before 9.0.
105 : : *
106 : : * As of 9.4, aggregate transition functions can also use AggGetAggref()
107 : : * to get hold of the Aggref expression node for their aggregate call.
108 : : * This is mainly intended for ordered-set aggregates, which are not
109 : : * supported as window functions. (A regular aggregate function would
110 : : * need some fallback logic to use this, since there's no Aggref node
111 : : * for a window function.)
112 : : *
113 : : * Grouping sets:
114 : : *
115 : : * A list of grouping sets which is structurally equivalent to a ROLLUP
116 : : * clause (e.g. (a,b,c), (a,b), (a)) can be processed in a single pass over
117 : : * ordered data. We do this by keeping a separate set of transition values
118 : : * for each grouping set being concurrently processed; for each input tuple
119 : : * we update them all, and on group boundaries we reset those states
120 : : * (starting at the front of the list) whose grouping values have changed
121 : : * (the list of grouping sets is ordered from most specific to least
122 : : * specific).
123 : : *
124 : : * Where more complex grouping sets are used, we break them down into
125 : : * "phases", where each phase has a different sort order (except phase 0
126 : : * which is reserved for hashing). During each phase but the last, the
127 : : * input tuples are additionally stored in a tuplesort which is keyed to the
128 : : * next phase's sort order; during each phase but the first, the input
129 : : * tuples are drawn from the previously sorted data. (The sorting of the
130 : : * data for the first phase is handled by the planner, as it might be
131 : : * satisfied by underlying nodes.)
132 : : *
133 : : * Hashing can be mixed with sorted grouping. To do this, we have an
134 : : * AGG_MIXED strategy that populates the hashtables during the first sorted
135 : : * phase, and switches to reading them out after completing all sort phases.
136 : : * We can also support AGG_HASHED with multiple hash tables and no sorting
137 : : * at all.
138 : : *
139 : : * From the perspective of aggregate transition and final functions, the
140 : : * only issue regarding grouping sets is this: a single call site (flinfo)
141 : : * of an aggregate function may be used for updating several different
142 : : * transition values in turn. So the function must not cache in the flinfo
143 : : * anything which logically belongs as part of the transition value (most
144 : : * importantly, the memory context in which the transition value exists).
145 : : * The support API functions (AggCheckCallContext, AggRegisterCallback) are
146 : : * sensitive to the grouping set for which the aggregate function is
147 : : * currently being called.
148 : : *
149 : : * Plan structure:
150 : : *
151 : : * What we get from the planner is actually one "real" Agg node which is
152 : : * part of the plan tree proper, but which optionally has an additional list
153 : : * of Agg nodes hung off the side via the "chain" field. This is because an
154 : : * Agg node happens to be a convenient representation of all the data we
155 : : * need for grouping sets.
156 : : *
157 : : * For many purposes, we treat the "real" node as if it were just the first
158 : : * node in the chain. The chain must be ordered such that hashed entries
159 : : * come before sorted/plain entries; the real node is marked AGG_MIXED if
160 : : * there are both types present (in which case the real node describes one
161 : : * of the hashed groupings, other AGG_HASHED nodes may optionally follow in
162 : : * the chain, followed in turn by AGG_SORTED or (one) AGG_PLAIN node). If
163 : : * the real node is marked AGG_HASHED or AGG_SORTED, then all the chained
164 : : * nodes must be of the same type; if it is AGG_PLAIN, there can be no
165 : : * chained nodes.
166 : : *
167 : : * We collect all hashed nodes into a single "phase", numbered 0, and create
168 : : * a sorted phase (numbered 1..n) for each AGG_SORTED or AGG_PLAIN node.
169 : : * Phase 0 is allocated even if there are no hashes, but remains unused in
170 : : * that case.
171 : : *
172 : : * AGG_HASHED nodes actually refer to only a single grouping set each,
173 : : * because for each hashed grouping we need a separate grpColIdx and
174 : : * numGroups estimate. AGG_SORTED nodes represent a "rollup", a list of
175 : : * grouping sets that share a sort order. Each AGG_SORTED node other than
176 : : * the first one has an associated Sort node which describes the sort order
177 : : * to be used; the first sorted node takes its input from the outer subtree,
178 : : * which the planner has already arranged to provide ordered data.
179 : : *
180 : : * Memory and ExprContext usage:
181 : : *
182 : : * Because we're accumulating aggregate values across input rows, we need to
183 : : * use more memory contexts than just simple input/output tuple contexts.
184 : : * In fact, for a rollup, we need a separate context for each grouping set
185 : : * so that we can reset the inner (finer-grained) aggregates on their group
186 : : * boundaries while continuing to accumulate values for outer
187 : : * (coarser-grained) groupings. On top of this, we might be simultaneously
188 : : * populating hashtables; however, we only need one context for all the
189 : : * hashtables.
190 : : *
191 : : * So we create an array, aggcontexts, with an ExprContext for each grouping
192 : : * set in the largest rollup that we're going to process, and use the
193 : : * per-tuple memory context of those ExprContexts to store the aggregate
194 : : * transition values. hashcontext is the single context created to support
195 : : * all hash tables.
196 : : *
197 : : * Spilling To Disk
198 : : *
199 : : * When performing hash aggregation, if the hash table memory exceeds the
200 : : * limit (see hash_agg_check_limits()), we enter "spill mode". In spill
201 : : * mode, we advance the transition states only for groups already in the
202 : : * hash table. For tuples that would need to create a new hash table
203 : : * entries (and initialize new transition states), we instead spill them to
204 : : * disk to be processed later. The tuples are spilled in a partitioned
205 : : * manner, so that subsequent batches are smaller and less likely to exceed
206 : : * hash_mem (if a batch does exceed hash_mem, it must be spilled
207 : : * recursively).
208 : : *
209 : : * Spilled data is written to logical tapes. These provide better control
210 : : * over memory usage, disk space, and the number of files than if we were
211 : : * to use a BufFile for each spill. We don't know the number of tapes needed
212 : : * at the start of the algorithm (because it can recurse), so a tape set is
213 : : * allocated at the beginning, and individual tapes are created as needed.
214 : : * As a particular tape is read, logtape.c recycles its disk space. When a
215 : : * tape is read to completion, it is destroyed entirely.
216 : : *
217 : : * Tapes' buffers can take up substantial memory when many tapes are open at
218 : : * once. We only need one tape open at a time in read mode (using a buffer
219 : : * that's a multiple of BLCKSZ); but we need one tape open in write mode (each
220 : : * requiring a buffer of size BLCKSZ) for each partition.
221 : : *
222 : : * Note that it's possible for transition states to start small but then
223 : : * grow very large; for instance in the case of ARRAY_AGG. In such cases,
224 : : * it's still possible to significantly exceed hash_mem. We try to avoid
225 : : * this situation by estimating what will fit in the available memory, and
226 : : * imposing a limit on the number of groups separately from the amount of
227 : : * memory consumed.
228 : : *
229 : : * Transition / Combine function invocation:
230 : : *
231 : : * For performance reasons transition functions, including combine
232 : : * functions, aren't invoked one-by-one from nodeAgg.c after computing
233 : : * arguments using the expression evaluation engine. Instead
234 : : * ExecBuildAggTrans() builds one large expression that does both argument
235 : : * evaluation and transition function invocation. That avoids performance
236 : : * issues due to repeated uses of expression evaluation, complications due
237 : : * to filter expressions having to be evaluated early, and allows to JIT
238 : : * the entire expression into one native function.
239 : : *
240 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
241 : : * Portions Copyright (c) 1994, Regents of the University of California
242 : : *
243 : : * IDENTIFICATION
244 : : * src/backend/executor/nodeAgg.c
245 : : *
246 : : *-------------------------------------------------------------------------
247 : : */
248 : :
249 : : #include "postgres.h"
250 : :
251 : : #include "access/htup_details.h"
252 : : #include "access/parallel.h"
253 : : #include "catalog/objectaccess.h"
254 : : #include "catalog/pg_aggregate.h"
255 : : #include "catalog/pg_proc.h"
256 : : #include "catalog/pg_type.h"
257 : : #include "common/hashfn.h"
258 : : #include "executor/execExpr.h"
259 : : #include "executor/executor.h"
260 : : #include "executor/nodeAgg.h"
261 : : #include "lib/hyperloglog.h"
262 : : #include "miscadmin.h"
263 : : #include "nodes/nodeFuncs.h"
264 : : #include "optimizer/optimizer.h"
265 : : #include "parser/parse_agg.h"
266 : : #include "parser/parse_coerce.h"
267 : : #include "utils/acl.h"
268 : : #include "utils/builtins.h"
269 : : #include "utils/datum.h"
270 : : #include "utils/expandeddatum.h"
271 : : #include "utils/injection_point.h"
272 : : #include "utils/logtape.h"
273 : : #include "utils/lsyscache.h"
274 : : #include "utils/memutils.h"
275 : : #include "utils/memutils_memorychunk.h"
276 : : #include "utils/syscache.h"
277 : : #include "utils/tuplesort.h"
278 : :
279 : : /*
280 : : * Control how many partitions are created when spilling HashAgg to
281 : : * disk.
282 : : *
283 : : * HASHAGG_PARTITION_FACTOR is multiplied by the estimated number of
284 : : * partitions needed such that each partition will fit in memory. The factor
285 : : * is set higher than one because there's not a high cost to having a few too
286 : : * many partitions, and it makes it less likely that a partition will need to
287 : : * be spilled recursively. Another benefit of having more, smaller partitions
288 : : * is that small hash tables may perform better than large ones due to memory
289 : : * caching effects.
290 : : *
291 : : * We also specify a min and max number of partitions per spill. Too few might
292 : : * mean a lot of wasted I/O from repeated spilling of the same tuples. Too
293 : : * many will result in lots of memory wasted buffering the spill files (which
294 : : * could instead be spent on a larger hash table).
295 : : */
296 : : #define HASHAGG_PARTITION_FACTOR 1.50
297 : : #define HASHAGG_MIN_PARTITIONS 4
298 : : #define HASHAGG_MAX_PARTITIONS 1024
299 : :
300 : : /*
301 : : * For reading from tapes, the buffer size must be a multiple of
302 : : * BLCKSZ. Larger values help when reading from multiple tapes concurrently,
303 : : * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a
304 : : * tape always uses a buffer of size BLCKSZ.
305 : : */
306 : : #define HASHAGG_READ_BUFFER_SIZE BLCKSZ
307 : : #define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ
308 : :
309 : : /*
310 : : * HyperLogLog is used for estimating the cardinality of the spilled tuples in
311 : : * a given partition. 5 bits corresponds to a size of about 32 bytes and a
312 : : * worst-case error of around 18%. That's effective enough to choose a
313 : : * reasonable number of partitions when recursing.
314 : : */
315 : : #define HASHAGG_HLL_BIT_WIDTH 5
316 : :
317 : : /*
318 : : * Assume the palloc overhead always uses sizeof(MemoryChunk) bytes.
319 : : */
320 : : #define CHUNKHDRSZ sizeof(MemoryChunk)
321 : :
322 : : /*
323 : : * Represents partitioned spill data for a single hashtable. Contains the
324 : : * necessary information to route tuples to the correct partition, and to
325 : : * transform the spilled data into new batches.
326 : : *
327 : : * The high bits are used for partition selection (when recursing, we ignore
328 : : * the bits that have already been used for partition selection at an earlier
329 : : * level).
330 : : */
331 : : typedef struct HashAggSpill
332 : : {
333 : : int npartitions; /* number of partitions */
334 : : LogicalTape **partitions; /* spill partition tapes */
335 : : int64 *ntuples; /* number of tuples in each partition */
336 : : uint32 mask; /* mask to find partition from hash value */
337 : : int shift; /* after masking, shift by this amount */
338 : : hyperLogLogState *hll_card; /* cardinality estimate for contents */
339 : : } HashAggSpill;
340 : :
341 : : /*
342 : : * Represents work to be done for one pass of hash aggregation (with only one
343 : : * grouping set).
344 : : *
345 : : * Also tracks the bits of the hash already used for partition selection by
346 : : * earlier iterations, so that this batch can use new bits. If all bits have
347 : : * already been used, no partitioning will be done (any spilled data will go
348 : : * to a single output tape).
349 : : */
350 : : typedef struct HashAggBatch
351 : : {
352 : : int setno; /* grouping set */
353 : : int used_bits; /* number of bits of hash already used */
354 : : LogicalTape *input_tape; /* input partition tape */
355 : : int64 input_tuples; /* number of tuples in this batch */
356 : : double input_card; /* estimated group cardinality */
357 : : } HashAggBatch;
358 : :
359 : : /* used to find referenced colnos */
360 : : typedef struct FindColsContext
361 : : {
362 : : bool is_aggref; /* is under an aggref */
363 : : Bitmapset *aggregated; /* column references under an aggref */
364 : : Bitmapset *unaggregated; /* other column references */
365 : : } FindColsContext;
366 : :
367 : : static void select_current_set(AggState *aggstate, int setno, bool is_hash);
368 : : static void initialize_phase(AggState *aggstate, int newphase);
369 : : static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
370 : : static void initialize_aggregates(AggState *aggstate,
371 : : AggStatePerGroup *pergroups,
372 : : int numReset);
373 : : static void advance_transition_function(AggState *aggstate,
374 : : AggStatePerTrans pertrans,
375 : : AggStatePerGroup pergroupstate);
376 : : static void advance_aggregates(AggState *aggstate);
377 : : static void process_ordered_aggregate_single(AggState *aggstate,
378 : : AggStatePerTrans pertrans,
379 : : AggStatePerGroup pergroupstate);
380 : : static void process_ordered_aggregate_multi(AggState *aggstate,
381 : : AggStatePerTrans pertrans,
382 : : AggStatePerGroup pergroupstate);
383 : : static void finalize_aggregate(AggState *aggstate,
384 : : AggStatePerAgg peragg,
385 : : AggStatePerGroup pergroupstate,
386 : : Datum *resultVal, bool *resultIsNull);
387 : : static void finalize_partialaggregate(AggState *aggstate,
388 : : AggStatePerAgg peragg,
389 : : AggStatePerGroup pergroupstate,
390 : : Datum *resultVal, bool *resultIsNull);
391 : : static inline void prepare_hash_slot(AggStatePerHash perhash,
392 : : TupleTableSlot *inputslot,
393 : : TupleTableSlot *hashslot);
394 : : static void prepare_projection_slot(AggState *aggstate,
395 : : TupleTableSlot *slot,
396 : : int currentSet);
397 : : static void finalize_aggregates(AggState *aggstate,
398 : : AggStatePerAgg peraggs,
399 : : AggStatePerGroup pergroup);
400 : : static TupleTableSlot *project_aggregates(AggState *aggstate);
401 : : static void find_cols(AggState *aggstate, Bitmapset **aggregated,
402 : : Bitmapset **unaggregated);
403 : : static bool find_cols_walker(Node *node, FindColsContext *context);
404 : : static void build_hash_tables(AggState *aggstate);
405 : : static void build_hash_table(AggState *aggstate, int setno, double nbuckets);
406 : : static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
407 : : bool nullcheck);
408 : : static void hash_create_memory(AggState *aggstate);
409 : : static double hash_choose_num_buckets(double hashentrysize,
410 : : double ngroups, Size memory);
411 : : static int hash_choose_num_partitions(double input_groups,
412 : : double hashentrysize,
413 : : int used_bits,
414 : : int *log2_npartitions);
415 : : static void initialize_hash_entry(AggState *aggstate,
416 : : TupleHashTable hashtable,
417 : : TupleHashEntry entry);
418 : : static void lookup_hash_entries(AggState *aggstate);
419 : : static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
420 : : static void agg_fill_hash_table(AggState *aggstate);
421 : : static bool agg_refill_hash_table(AggState *aggstate);
422 : : static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
423 : : static TupleTableSlot *agg_retrieve_hash_table_in_memory(AggState *aggstate);
424 : : static void hash_agg_check_limits(AggState *aggstate);
425 : : static void hash_agg_enter_spill_mode(AggState *aggstate);
426 : : static void hash_agg_update_metrics(AggState *aggstate, bool from_tape,
427 : : int npartitions);
428 : : static void hashagg_finish_initial_spills(AggState *aggstate);
429 : : static void hashagg_reset_spill_state(AggState *aggstate);
430 : : static HashAggBatch *hashagg_batch_new(LogicalTape *input_tape, int setno,
431 : : int64 input_tuples, double input_card,
432 : : int used_bits);
433 : : static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
434 : : static void hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset,
435 : : int used_bits, double input_groups,
436 : : double hashentrysize);
437 : : static Size hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
438 : : TupleTableSlot *inputslot, uint32 hash);
439 : : static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill,
440 : : int setno);
441 : : static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
442 : : static void build_pertrans_for_aggref(AggStatePerTrans pertrans,
443 : : AggState *aggstate, EState *estate,
444 : : Aggref *aggref, Oid transfn_oid,
445 : : Oid aggtranstype, Oid aggserialfn,
446 : : Oid aggdeserialfn, Datum initValue,
447 : : bool initValueIsNull, Oid *inputTypes,
448 : : int numArguments);
449 : :
450 : :
451 : : /*
452 : : * Select the current grouping set; affects current_set and
453 : : * curaggcontext.
454 : : */
455 : : static void
3186 rhodiumtoad@postgres 456 :CBC 3953531 : select_current_set(AggState *aggstate, int setno, bool is_hash)
457 : : {
458 : : /*
459 : : * When changing this, also adapt ExecAggPlainTransByVal() and
460 : : * ExecAggPlainTransByRef().
461 : : */
462 [ + + ]: 3953531 : if (is_hash)
463 : 3610637 : aggstate->curaggcontext = aggstate->hashcontext;
464 : : else
465 : 342894 : aggstate->curaggcontext = aggstate->aggcontexts[setno];
466 : :
467 : 3953531 : aggstate->current_set = setno;
468 : 3953531 : }
469 : :
470 : : /*
471 : : * Switch to phase "newphase", which must either be 0 or 1 (to reset) or
472 : : * current_phase + 1. Juggle the tuplesorts accordingly.
473 : : *
474 : : * Phase 0 is for hashing, which we currently handle last in the AGG_MIXED
475 : : * case, so when entering phase 0, all we need to do is drop open sorts.
476 : : */
477 : : static void
3867 andres@anarazel.de 478 : 46707 : initialize_phase(AggState *aggstate, int newphase)
479 : : {
3186 rhodiumtoad@postgres 480 [ + + - + ]: 46707 : Assert(newphase <= 1 || newphase == aggstate->current_phase + 1);
481 : :
482 : : /*
483 : : * Whatever the previous state, we're now done with whatever input
484 : : * tuplesort was in use.
485 : : */
3867 andres@anarazel.de 486 [ + + ]: 46707 : if (aggstate->sort_in)
487 : : {
488 : 21 : tuplesort_end(aggstate->sort_in);
489 : 21 : aggstate->sort_in = NULL;
490 : : }
491 : :
3186 rhodiumtoad@postgres 492 [ + + ]: 46707 : if (newphase <= 1)
493 : : {
494 : : /*
495 : : * Discard any existing output tuplesort.
496 : : */
3867 andres@anarazel.de 497 [ + + ]: 46605 : if (aggstate->sort_out)
498 : : {
499 : 3 : tuplesort_end(aggstate->sort_out);
500 : 3 : aggstate->sort_out = NULL;
501 : : }
502 : : }
503 : : else
504 : : {
505 : : /*
506 : : * The old output tuplesort becomes the new input one, and this is the
507 : : * right time to actually sort it.
508 : : */
509 : 102 : aggstate->sort_in = aggstate->sort_out;
510 : 102 : aggstate->sort_out = NULL;
511 [ - + ]: 102 : Assert(aggstate->sort_in);
512 : 102 : tuplesort_performsort(aggstate->sort_in);
513 : : }
514 : :
515 : : /*
516 : : * If this isn't the last phase, we need to sort appropriately for the
517 : : * next phase in sequence.
518 : : */
3186 rhodiumtoad@postgres 519 [ + + + + ]: 46707 : if (newphase > 0 && newphase < aggstate->numphases - 1)
520 : : {
3860 bruce@momjian.us 521 : 129 : Sort *sortnode = aggstate->phases[newphase + 1].sortnode;
3867 andres@anarazel.de 522 : 129 : PlanState *outerNode = outerPlanState(aggstate);
523 : 129 : TupleDesc tupDesc = ExecGetResultType(outerNode);
524 : :
525 : 129 : aggstate->sort_out = tuplesort_begin_heap(tupDesc,
526 : : sortnode->numCols,
527 : : sortnode->sortColIdx,
528 : : sortnode->sortOperators,
529 : : sortnode->collations,
530 : : sortnode->nullsFirst,
531 : : work_mem,
532 : : NULL, TUPLESORT_NONE);
533 : : }
534 : :
535 : 46707 : aggstate->current_phase = newphase;
536 : 46707 : aggstate->phase = &aggstate->phases[newphase];
537 : 46707 : }
538 : :
539 : : /*
540 : : * Fetch a tuple from either the outer plan (for phase 1) or from the sorter
541 : : * populated by the previous phase. Copy it to the sorter for the next phase
542 : : * if any.
543 : : *
544 : : * Callers cannot rely on memory for tuple in returned slot remaining valid
545 : : * past any subsequently fetched tuple.
546 : : */
547 : : static TupleTableSlot *
548 : 14584924 : fetch_input_tuple(AggState *aggstate)
549 : : {
550 : : TupleTableSlot *slot;
551 : :
552 [ + + ]: 14584924 : if (aggstate->sort_in)
553 : : {
554 : : /* make sure we check for interrupts in either path through here */
3066 555 [ - + ]: 147450 : CHECK_FOR_INTERRUPTS();
3176 556 [ + + ]: 147450 : if (!tuplesort_gettupleslot(aggstate->sort_in, true, false,
557 : : aggstate->sort_slot, NULL))
3867 558 : 102 : return NULL;
559 : 147348 : slot = aggstate->sort_slot;
560 : : }
561 : : else
562 : 14437474 : slot = ExecProcNode(outerPlanState(aggstate));
563 : :
564 [ + + + + : 14584782 : if (!TupIsNull(slot) && aggstate->sort_out)
+ + ]
565 : 147348 : tuplesort_puttupleslot(aggstate->sort_out, slot);
566 : :
567 : 14584782 : return slot;
568 : : }
569 : :
570 : : /*
571 : : * (Re)Initialize an individual aggregate.
572 : : *
573 : : * This function handles only one grouping set, already set in
574 : : * aggstate->current_set.
575 : : *
576 : : * When called, CurrentMemoryContext should be the per-query context.
577 : : */
578 : : static void
3787 heikki.linnakangas@i 579 : 569074 : initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
580 : : AggStatePerGroup pergroupstate)
581 : : {
582 : : /*
583 : : * Start a fresh sort operation for each DISTINCT/ORDER BY aggregate.
584 : : */
1232 drowley@postgresql.o 585 [ + + ]: 569074 : if (pertrans->aggsortrequired)
586 : : {
587 : : /*
588 : : * In case of rescan, maybe there could be an uncompleted sort
589 : : * operation? Clean it up if so.
590 : : */
3787 heikki.linnakangas@i 591 [ - + ]: 26921 : if (pertrans->sortstates[aggstate->current_set])
3787 heikki.linnakangas@i 592 :UBC 0 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
593 : :
594 : :
595 : : /*
596 : : * We use a plain Datum sorter when there's a single input column;
597 : : * otherwise sort the full tuple. (See comments for
598 : : * process_ordered_aggregate_single.)
599 : : */
3787 heikki.linnakangas@i 600 [ + + ]:CBC 26921 : if (pertrans->numInputs == 1)
601 : : {
3040 andres@anarazel.de 602 : 26879 : Form_pg_attribute attr = TupleDescAttr(pertrans->sortdesc, 0);
603 : :
3787 heikki.linnakangas@i 604 : 26879 : pertrans->sortstates[aggstate->current_set] =
3040 andres@anarazel.de 605 : 26879 : tuplesort_begin_datum(attr->atttypid,
3787 heikki.linnakangas@i 606 : 26879 : pertrans->sortOperators[0],
607 : 26879 : pertrans->sortCollations[0],
608 : 26879 : pertrans->sortNullsFirst[0],
609 : : work_mem, NULL, TUPLESORT_NONE);
610 : : }
611 : : else
612 : 42 : pertrans->sortstates[aggstate->current_set] =
3303 andres@anarazel.de 613 : 42 : tuplesort_begin_heap(pertrans->sortdesc,
614 : : pertrans->numSortCols,
615 : : pertrans->sortColIdx,
616 : : pertrans->sortOperators,
617 : : pertrans->sortCollations,
618 : : pertrans->sortNullsFirst,
619 : : work_mem, NULL, TUPLESORT_NONE);
620 : : }
621 : :
622 : : /*
623 : : * (Re)set transValue to the initial value.
624 : : *
625 : : * Note that when the initial value is pass-by-ref, we must copy it (into
626 : : * the aggcontext) since we will pfree the transValue later.
627 : : */
3787 heikki.linnakangas@i 628 [ + + ]: 569074 : if (pertrans->initValueIsNull)
629 : 298096 : pergroupstate->transValue = pertrans->initValue;
630 : : else
631 : : {
632 : : MemoryContext oldContext;
633 : :
2147 alvherre@alvh.no-ip. 634 : 270978 : oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
3787 heikki.linnakangas@i 635 : 541956 : pergroupstate->transValue = datumCopy(pertrans->initValue,
636 : 270978 : pertrans->transtypeByVal,
637 : 270978 : pertrans->transtypeLen);
3867 andres@anarazel.de 638 : 270978 : MemoryContextSwitchTo(oldContext);
639 : : }
3787 heikki.linnakangas@i 640 : 569074 : pergroupstate->transValueIsNull = pertrans->initValueIsNull;
641 : :
642 : : /*
643 : : * If the initial value for the transition state doesn't exist in the
644 : : * pg_aggregate table then we will let the first non-NULL value returned
645 : : * from the outer procNode become the initial value. (This is useful for
646 : : * aggregates like max() and min().) The noTransValue flag signals that we
647 : : * still need to do this.
648 : : */
649 : 569074 : pergroupstate->noTransValue = pertrans->initValueIsNull;
3867 andres@anarazel.de 650 : 569074 : }
651 : :
652 : : /*
653 : : * Initialize all aggregate transition states for a new group of input values.
654 : : *
655 : : * If there are multiple grouping sets, we initialize only the first numReset
656 : : * of them (the grouping sets are ordered so that the most specific one, which
657 : : * is reset most often, is first). As a convenience, if numReset is 0, we
658 : : * reinitialize all sets.
659 : : *
660 : : * NB: This cannot be used for hash aggregates, as for those the grouping set
661 : : * number has to be specified from further up.
662 : : *
663 : : * When called, CurrentMemoryContext should be the per-query context.
664 : : */
665 : : static void
666 : 153088 : initialize_aggregates(AggState *aggstate,
667 : : AggStatePerGroup *pergroups,
668 : : int numReset)
669 : : {
670 : : int transno;
3860 bruce@momjian.us 671 : 153088 : int numGroupingSets = Max(aggstate->phase->numsets, 1);
672 : 153088 : int setno = 0;
3186 rhodiumtoad@postgres 673 : 153088 : int numTrans = aggstate->numtrans;
3787 heikki.linnakangas@i 674 : 153088 : AggStatePerTrans transstates = aggstate->pertrans;
675 : :
3186 rhodiumtoad@postgres 676 [ - + ]: 153088 : if (numReset == 0)
3867 andres@anarazel.de 677 :UBC 0 : numReset = numGroupingSets;
678 : :
2905 andres@anarazel.de 679 [ + + ]:CBC 313263 : for (setno = 0; setno < numReset; setno++)
680 : : {
681 : 160175 : AggStatePerGroup pergroup = pergroups[setno];
682 : :
683 : 160175 : select_current_set(aggstate, setno, false);
684 : :
685 [ + + ]: 496343 : for (transno = 0; transno < numTrans; transno++)
686 : : {
687 : 336168 : AggStatePerTrans pertrans = &transstates[transno];
688 : 336168 : AggStatePerGroup pergroupstate = &pergroup[transno];
689 : :
690 : 336168 : initialize_aggregate(aggstate, pertrans, pergroupstate);
691 : : }
692 : : }
9500 tgl@sss.pgh.pa.us 693 : 153088 : }
694 : :
695 : : /*
696 : : * Given new input value(s), advance the transition function of one aggregate
697 : : * state within one grouping set only (already set in aggstate->current_set)
698 : : *
699 : : * The new values (and null flags) have been preloaded into argument positions
700 : : * 1 and up in pertrans->transfn_fcinfo, so that we needn't copy them again to
701 : : * pass to the transition function. We also expect that the static fields of
702 : : * the fcinfo are already initialized; that was done by ExecInitAgg().
703 : : *
704 : : * It doesn't matter which memory context this is called in.
705 : : */
706 : : static void
8441 707 : 362145 : advance_transition_function(AggState *aggstate,
708 : : AggStatePerTrans pertrans,
709 : : AggStatePerGroup pergroupstate)
710 : : {
2516 andres@anarazel.de 711 : 362145 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
712 : : MemoryContext oldContext;
713 : : Datum newVal;
714 : :
3787 heikki.linnakangas@i 715 [ + + ]: 362145 : if (pertrans->transfn.fn_strict)
716 : : {
717 : : /*
718 : : * For a strict transfn, nothing happens when there's a NULL input; we
719 : : * just keep the prior transValue.
720 : : */
721 : 112500 : int numTransInputs = pertrans->numTransInputs;
722 : : int i;
723 : :
4376 tgl@sss.pgh.pa.us 724 [ + + ]: 225000 : for (i = 1; i <= numTransInputs; i++)
725 : : {
2516 andres@anarazel.de 726 [ - + ]: 112500 : if (fcinfo->args[i].isnull)
7082 tgl@sss.pgh.pa.us 727 :UBC 0 : return;
728 : : }
8441 tgl@sss.pgh.pa.us 729 [ - + ]:CBC 112500 : if (pergroupstate->noTransValue)
730 : : {
731 : : /*
732 : : * transValue has not been initialized. This is the first non-NULL
733 : : * input value. We use it as the initial value for transValue. (We
734 : : * already checked that the agg's input type is binary-compatible
735 : : * with its transtype, so straight copy here is OK.)
736 : : *
737 : : * We must copy the datum into aggcontext if it is pass-by-ref. We
738 : : * do not need to pfree the old transValue, since it's NULL.
739 : : */
2147 alvherre@alvh.no-ip. 740 :UBC 0 : oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
2516 andres@anarazel.de 741 : 0 : pergroupstate->transValue = datumCopy(fcinfo->args[1].value,
3787 heikki.linnakangas@i 742 : 0 : pertrans->transtypeByVal,
743 : 0 : pertrans->transtypeLen);
8441 tgl@sss.pgh.pa.us 744 : 0 : pergroupstate->transValueIsNull = false;
745 : 0 : pergroupstate->noTransValue = false;
746 : 0 : MemoryContextSwitchTo(oldContext);
9283 747 : 0 : return;
748 : : }
8441 tgl@sss.pgh.pa.us 749 [ - + ]:CBC 112500 : if (pergroupstate->transValueIsNull)
750 : : {
751 : : /*
752 : : * Don't call a strict function with NULL inputs. Note it is
753 : : * possible to get here despite the above tests, if the transfn is
754 : : * strict *and* returned a NULL on a prior cycle. If that happens
755 : : * we will propagate the NULL all the way to the end.
756 : : */
9283 tgl@sss.pgh.pa.us 757 :UBC 0 : return;
758 : : }
759 : : }
760 : :
761 : : /* We run the transition functions in per-input-tuple memory context */
8441 tgl@sss.pgh.pa.us 762 :CBC 362145 : oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
763 : :
764 : : /* set up aggstate->curpertrans for AggGetAggref() */
3787 heikki.linnakangas@i 765 : 362145 : aggstate->curpertrans = pertrans;
766 : :
767 : : /*
768 : : * OK to call the transition function
769 : : */
2516 andres@anarazel.de 770 : 362145 : fcinfo->args[0].value = pergroupstate->transValue;
771 : 362145 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
4360 tgl@sss.pgh.pa.us 772 : 362145 : fcinfo->isnull = false; /* just in case transfn doesn't set it */
773 : :
7082 774 : 362145 : newVal = FunctionCallInvoke(fcinfo);
775 : :
3787 heikki.linnakangas@i 776 : 362145 : aggstate->curpertrans = NULL;
777 : :
778 : : /*
779 : : * If pass-by-ref datatype, must copy the new value into aggcontext and
780 : : * free the prior transValue. But if transfn returned a pointer to its
781 : : * first input, we don't need to do anything.
782 : : *
783 : : * It's safe to compare newVal with pergroup->transValue without regard
784 : : * for either being NULL, because ExecAggCopyTransValue takes care to set
785 : : * transValue to 0 when NULL. Otherwise we could end up accidentally not
786 : : * reparenting, when the transValue has the same numerical value as
787 : : * newValue, despite being NULL. This is a somewhat hot path, making it
788 : : * undesirable to instead solve this with another branch for the common
789 : : * case of the transition function returning its (modified) input
790 : : * argument.
791 : : */
792 [ - + - - ]: 362145 : if (!pertrans->transtypeByVal &&
7367 bruce@momjian.us 793 :UBC 0 : DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
967 tgl@sss.pgh.pa.us 794 : 0 : newVal = ExecAggCopyTransValue(aggstate, pertrans,
795 : 0 : newVal, fcinfo->isnull,
796 : : pergroupstate->transValue,
797 : 0 : pergroupstate->transValueIsNull);
798 : :
8441 tgl@sss.pgh.pa.us 799 :CBC 362145 : pergroupstate->transValue = newVal;
7082 800 : 362145 : pergroupstate->transValueIsNull = fcinfo->isnull;
801 : :
8441 802 : 362145 : MemoryContextSwitchTo(oldContext);
803 : : }
804 : :
805 : : /*
806 : : * Advance each aggregate transition state for one input tuple. The input
807 : : * tuple has been stored in tmpcontext->ecxt_outertuple, so that it is
808 : : * accessible to ExecEvalExpr.
809 : : *
810 : : * We have two sets of transition states to handle: one for sorted aggregation
811 : : * and one for hashed; we do them both here, to avoid multiple evaluation of
812 : : * the inputs.
813 : : *
814 : : * When called, CurrentMemoryContext should be the per-query context.
815 : : */
816 : : static void
2898 andres@anarazel.de 817 : 14919991 : advance_aggregates(AggState *aggstate)
818 : : {
280 dgustafsson@postgres 819 : 14919991 : ExecEvalExprNoReturnSwitchContext(aggstate->phase->evaltrans,
820 : : aggstate->tmpcontext);
8441 tgl@sss.pgh.pa.us 821 : 14919952 : }
822 : :
823 : : /*
824 : : * Run the transition function for a DISTINCT or ORDER BY aggregate
825 : : * with only one input. This is called after we have completed
826 : : * entering all the input values into the sort object. We complete the
827 : : * sort, read out the values in sorted order, and run the transition
828 : : * function on each value (applying DISTINCT if appropriate).
829 : : *
830 : : * Note that the strictness of the transition function was checked when
831 : : * entering the values into the sort, so we don't check it again here;
832 : : * we just apply standard SQL DISTINCT logic.
833 : : *
834 : : * The one-input case is handled separately from the multi-input case
835 : : * for performance reasons: for single by-value inputs, such as the
836 : : * common case of count(distinct id), the tuplesort_getdatum code path
837 : : * is around 300% faster. (The speedup for by-reference types is less
838 : : * but still noticeable.)
839 : : *
840 : : * This function handles only one grouping set (already set in
841 : : * aggstate->current_set).
842 : : *
843 : : * When called, CurrentMemoryContext should be the per-query context.
844 : : */
845 : : static void
5845 846 : 26879 : process_ordered_aggregate_single(AggState *aggstate,
847 : : AggStatePerTrans pertrans,
848 : : AggStatePerGroup pergroupstate)
849 : : {
9288 850 : 26879 : Datum oldVal = (Datum) 0;
5772 bruce@momjian.us 851 : 26879 : bool oldIsNull = true;
9288 tgl@sss.pgh.pa.us 852 : 26879 : bool haveOldVal = false;
8441 853 : 26879 : MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
854 : : MemoryContext oldContext;
3787 heikki.linnakangas@i 855 : 26879 : bool isDistinct = (pertrans->numDistinctCols > 0);
3590 rhaas@postgresql.org 856 : 26879 : Datum newAbbrevVal = (Datum) 0;
857 : 26879 : Datum oldAbbrevVal = (Datum) 0;
2516 andres@anarazel.de 858 : 26879 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
859 : : Datum *newVal;
860 : : bool *isNull;
861 : :
3787 heikki.linnakangas@i 862 [ - + ]: 26879 : Assert(pertrans->numDistinctCols < 2);
863 : :
864 : 26879 : tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
865 : :
866 : : /* Load the column into argument 1 (arg 0 will be transition value) */
2516 andres@anarazel.de 867 : 26879 : newVal = &fcinfo->args[1].value;
868 : 26879 : isNull = &fcinfo->args[1].isnull;
869 : :
870 : : /*
871 : : * Note: if input type is pass-by-ref, the datums returned by the sort are
872 : : * freshly palloc'd in the per-query context, so we must be careful to
873 : : * pfree them when they are no longer needed.
874 : : */
875 : :
3787 heikki.linnakangas@i 876 [ + + ]: 449138 : while (tuplesort_getdatum(pertrans->sortstates[aggstate->current_set],
877 : : true, false, newVal, isNull, &newAbbrevVal))
878 : : {
879 : : /*
880 : : * Clear and select the working context for evaluation of the equality
881 : : * function and transition function.
882 : : */
8441 tgl@sss.pgh.pa.us 883 : 422259 : MemoryContextReset(workcontext);
884 : 422259 : oldContext = MemoryContextSwitchTo(workcontext);
885 : :
886 : : /*
887 : : * If DISTINCT mode, and not distinct from prior, skip it.
888 : : */
5845 889 [ + + + + ]: 422259 : if (isDistinct &&
890 [ - + ]: 155227 : haveOldVal &&
5845 tgl@sss.pgh.pa.us 891 [ # # ]:UBC 0 : ((oldIsNull && *isNull) ||
5845 tgl@sss.pgh.pa.us 892 [ + - + - ]:CBC 155227 : (!oldIsNull && !*isNull &&
3590 rhaas@postgresql.org 893 [ + + + + ]: 302974 : oldAbbrevVal == newAbbrevVal &&
2461 peter@eisentraut.org 894 : 147747 : DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
895 : : pertrans->aggCollation,
896 : : oldVal, *newVal)))))
897 : : {
1145 drowley@postgresql.o 898 : 60222 : MemoryContextSwitchTo(oldContext);
899 : 60222 : continue;
900 : : }
901 : : else
902 : : {
3787 heikki.linnakangas@i 903 : 362037 : advance_transition_function(aggstate, pertrans, pergroupstate);
904 : :
1145 drowley@postgresql.o 905 : 362037 : MemoryContextSwitchTo(oldContext);
906 : :
907 : : /*
908 : : * Forget the old value, if any, and remember the new one for
909 : : * subsequent equality checks.
910 : : */
911 [ + + ]: 362037 : if (!pertrans->inputtypeByVal)
912 : : {
913 [ + + ]: 262644 : if (!oldIsNull)
914 : 262554 : pfree(DatumGetPointer(oldVal));
915 [ + + ]: 262644 : if (!*isNull)
916 : 262614 : oldVal = datumCopy(*newVal, pertrans->inputtypeByVal,
917 : 262614 : pertrans->inputtypeLen);
918 : : }
919 : : else
920 : 99393 : oldVal = *newVal;
3590 rhaas@postgresql.org 921 : 362037 : oldAbbrevVal = newAbbrevVal;
5845 tgl@sss.pgh.pa.us 922 : 362037 : oldIsNull = *isNull;
9500 923 : 362037 : haveOldVal = true;
924 : : }
925 : : }
926 : :
3787 heikki.linnakangas@i 927 [ + + + + ]: 26879 : if (!oldIsNull && !pertrans->inputtypeByVal)
9288 tgl@sss.pgh.pa.us 928 : 60 : pfree(DatumGetPointer(oldVal));
929 : :
3787 heikki.linnakangas@i 930 : 26879 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
931 : 26879 : pertrans->sortstates[aggstate->current_set] = NULL;
9288 tgl@sss.pgh.pa.us 932 : 26879 : }
933 : :
934 : : /*
935 : : * Run the transition function for a DISTINCT or ORDER BY aggregate
936 : : * with more than one input. This is called after we have completed
937 : : * entering all the input values into the sort object. We complete the
938 : : * sort, read out the values in sorted order, and run the transition
939 : : * function on each value (applying DISTINCT if appropriate).
940 : : *
941 : : * This function handles only one grouping set (already set in
942 : : * aggstate->current_set).
943 : : *
944 : : * When called, CurrentMemoryContext should be the per-query context.
945 : : */
946 : : static void
5845 947 : 42 : process_ordered_aggregate_multi(AggState *aggstate,
948 : : AggStatePerTrans pertrans,
949 : : AggStatePerGroup pergroupstate)
950 : : {
2861 andres@anarazel.de 951 : 42 : ExprContext *tmpcontext = aggstate->tmpcontext;
2516 952 : 42 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
3303 953 : 42 : TupleTableSlot *slot1 = pertrans->sortslot;
3787 heikki.linnakangas@i 954 : 42 : TupleTableSlot *slot2 = pertrans->uniqslot;
955 : 42 : int numTransInputs = pertrans->numTransInputs;
956 : 42 : int numDistinctCols = pertrans->numDistinctCols;
3590 rhaas@postgresql.org 957 : 42 : Datum newAbbrevVal = (Datum) 0;
958 : 42 : Datum oldAbbrevVal = (Datum) 0;
5772 bruce@momjian.us 959 : 42 : bool haveOldValue = false;
2861 andres@anarazel.de 960 : 42 : TupleTableSlot *save = aggstate->tmpcontext->ecxt_outertuple;
961 : : int i;
962 : :
3787 heikki.linnakangas@i 963 : 42 : tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
964 : :
5845 tgl@sss.pgh.pa.us 965 : 42 : ExecClearTuple(slot1);
966 [ - + ]: 42 : if (slot2)
5845 tgl@sss.pgh.pa.us 967 :UBC 0 : ExecClearTuple(slot2);
968 : :
3787 heikki.linnakangas@i 969 [ + + ]:CBC 150 : while (tuplesort_gettupleslot(pertrans->sortstates[aggstate->current_set],
970 : : true, true, slot1, &newAbbrevVal))
971 : : {
3066 andres@anarazel.de 972 [ - + ]: 108 : CHECK_FOR_INTERRUPTS();
973 : :
2861 974 : 108 : tmpcontext->ecxt_outertuple = slot1;
975 : 108 : tmpcontext->ecxt_innertuple = slot2;
976 : :
5845 tgl@sss.pgh.pa.us 977 [ - + ]: 108 : if (numDistinctCols == 0 ||
5845 tgl@sss.pgh.pa.us 978 [ # # ]:UBC 0 : !haveOldValue ||
3590 rhaas@postgresql.org 979 [ # # ]: 0 : newAbbrevVal != oldAbbrevVal ||
2861 andres@anarazel.de 980 [ # # ]: 0 : !ExecQual(pertrans->equalfnMulti, tmpcontext))
981 : : {
982 : : /*
983 : : * Extract the first numTransInputs columns as datums to pass to
984 : : * the transfn.
985 : : */
2861 andres@anarazel.de 986 :CBC 108 : slot_getsomeattrs(slot1, numTransInputs);
987 : :
988 : : /* Load values into fcinfo */
989 : : /* Start from 1, since the 0th arg will be the transition value */
4376 tgl@sss.pgh.pa.us 990 [ + + ]: 306 : for (i = 0; i < numTransInputs; i++)
991 : : {
2516 andres@anarazel.de 992 : 198 : fcinfo->args[i + 1].value = slot1->tts_values[i];
993 : 198 : fcinfo->args[i + 1].isnull = slot1->tts_isnull[i];
994 : : }
995 : :
3787 heikki.linnakangas@i 996 : 108 : advance_transition_function(aggstate, pertrans, pergroupstate);
997 : :
5845 tgl@sss.pgh.pa.us 998 [ - + ]: 108 : if (numDistinctCols > 0)
999 : : {
1000 : : /* swap the slot pointers to retain the current tuple */
5845 tgl@sss.pgh.pa.us 1001 :UBC 0 : TupleTableSlot *tmpslot = slot2;
1002 : :
1003 : 0 : slot2 = slot1;
1004 : 0 : slot1 = tmpslot;
1005 : : /* avoid ExecQual() calls by reusing abbreviated keys */
3590 rhaas@postgresql.org 1006 : 0 : oldAbbrevVal = newAbbrevVal;
5845 tgl@sss.pgh.pa.us 1007 : 0 : haveOldValue = true;
1008 : : }
1009 : : }
1010 : :
1011 : : /* Reset context each time */
2861 andres@anarazel.de 1012 :CBC 108 : ResetExprContext(tmpcontext);
1013 : :
5845 tgl@sss.pgh.pa.us 1014 : 108 : ExecClearTuple(slot1);
1015 : : }
1016 : :
1017 [ - + ]: 42 : if (slot2)
5845 tgl@sss.pgh.pa.us 1018 :UBC 0 : ExecClearTuple(slot2);
1019 : :
3787 heikki.linnakangas@i 1020 :CBC 42 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
1021 : 42 : pertrans->sortstates[aggstate->current_set] = NULL;
1022 : :
1023 : : /* restore previous slot, potentially in use for grouping sets */
2861 andres@anarazel.de 1024 : 42 : tmpcontext->ecxt_outertuple = save;
5845 tgl@sss.pgh.pa.us 1025 : 42 : }
1026 : :
1027 : : /*
1028 : : * Compute the final value of one aggregate.
1029 : : *
1030 : : * This function handles only one grouping set (already set in
1031 : : * aggstate->current_set).
1032 : : *
1033 : : * The finalfn will be run, and the result delivered, in the
1034 : : * output-tuple context; caller's CurrentMemoryContext does not matter.
1035 : : * (But note that in some cases, such as when there is no finalfn, the
1036 : : * result might be a pointer to or into the agg's transition value.)
1037 : : *
1038 : : * The finalfn uses the state as set in the transno. This also might be
1039 : : * being used by another aggregate function, so it's important that we do
1040 : : * nothing destructive here. Moreover, the aggregate's final value might
1041 : : * get used in multiple places, so we mustn't return a R/W expanded datum.
1042 : : */
1043 : : static void
8441 1044 : 561366 : finalize_aggregate(AggState *aggstate,
1045 : : AggStatePerAgg peragg,
1046 : : AggStatePerGroup pergroupstate,
1047 : : Datum *resultVal, bool *resultIsNull)
1048 : : {
2516 andres@anarazel.de 1049 : 561366 : LOCAL_FCINFO(fcinfo, FUNC_MAX_ARGS);
4376 tgl@sss.pgh.pa.us 1050 : 561366 : bool anynull = false;
1051 : : MemoryContext oldContext;
1052 : : int i;
1053 : : ListCell *lc;
3787 heikki.linnakangas@i 1054 : 561366 : AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
1055 : :
8412 tgl@sss.pgh.pa.us 1056 : 561366 : oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
1057 : :
1058 : : /*
1059 : : * Evaluate any direct arguments. We do this even if there's no finalfn
1060 : : * (which is unlikely anyway), so that side-effects happen as expected.
1061 : : * The direct arguments go into arg positions 1 and up, leaving position 0
1062 : : * for the transition state value.
1063 : : */
4376 1064 : 561366 : i = 1;
2983 1065 [ + + + + : 561853 : foreach(lc, peragg->aggdirectargs)
+ + ]
1066 : : {
4376 1067 : 487 : ExprState *expr = (ExprState *) lfirst(lc);
1068 : :
2516 andres@anarazel.de 1069 : 487 : fcinfo->args[i].value = ExecEvalExpr(expr,
1070 : : aggstate->ss.ps.ps_ExprContext,
1071 : : &fcinfo->args[i].isnull);
1072 : 487 : anynull |= fcinfo->args[i].isnull;
4376 tgl@sss.pgh.pa.us 1073 : 487 : i++;
1074 : : }
1075 : :
1076 : : /*
1077 : : * Apply the agg's finalfn if one is provided, else return transValue.
1078 : : */
3787 heikki.linnakangas@i 1079 [ + + ]: 561366 : if (OidIsValid(peragg->finalfn_oid))
1080 : : {
1081 : 168746 : int numFinalArgs = peragg->numFinalArgs;
1082 : :
1083 : : /* set up aggstate->curperagg for AggGetAggref() */
2987 tgl@sss.pgh.pa.us 1084 : 168746 : aggstate->curperagg = peragg;
1085 : :
2516 andres@anarazel.de 1086 : 168746 : InitFunctionCallInfoData(*fcinfo, &peragg->finalfn,
1087 : : numFinalArgs,
1088 : : pertrans->aggCollation,
1089 : : (Node *) aggstate, NULL);
1090 : :
1091 : : /* Fill in the transition state value */
1092 : 168746 : fcinfo->args[0].value =
1093 [ + + + + ]: 168746 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1094 : : pergroupstate->transValueIsNull,
1095 : : pertrans->transtypeLen);
1096 : 168746 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
4376 tgl@sss.pgh.pa.us 1097 : 168746 : anynull |= pergroupstate->transValueIsNull;
1098 : :
1099 : : /* Fill any remaining argument positions with nulls */
4255 1100 [ + + ]: 244364 : for (; i < numFinalArgs; i++)
1101 : : {
2516 andres@anarazel.de 1102 : 75618 : fcinfo->args[i].value = (Datum) 0;
1103 : 75618 : fcinfo->args[i].isnull = true;
4376 tgl@sss.pgh.pa.us 1104 : 75618 : anynull = true;
1105 : : }
1106 : :
2516 andres@anarazel.de 1107 [ + + - + ]: 168746 : if (fcinfo->flinfo->fn_strict && anynull)
1108 : : {
1109 : : /* don't call a strict function with NULL inputs */
9333 tgl@sss.pgh.pa.us 1110 :UBC 0 : *resultVal = (Datum) 0;
1111 : 0 : *resultIsNull = true;
1112 : : }
1113 : : else
1114 : : {
1115 : : Datum result;
1116 : :
975 tgl@sss.pgh.pa.us 1117 :CBC 168746 : result = FunctionCallInvoke(fcinfo);
2516 andres@anarazel.de 1118 : 168740 : *resultIsNull = fcinfo->isnull;
975 tgl@sss.pgh.pa.us 1119 [ + + + + ]: 168740 : *resultVal = MakeExpandedObjectReadOnly(result,
1120 : : fcinfo->isnull,
1121 : : peragg->resulttypeLen);
1122 : : }
2987 1123 : 168740 : aggstate->curperagg = NULL;
1124 : : }
1125 : : else
1126 : : {
1167 1127 : 392620 : *resultVal =
1128 [ + + + + ]: 392620 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1129 : : pergroupstate->transValueIsNull,
1130 : : pertrans->transtypeLen);
8441 1131 : 392620 : *resultIsNull = pergroupstate->transValueIsNull;
1132 : : }
1133 : :
1134 : 561360 : MemoryContextSwitchTo(oldContext);
9500 1135 : 561360 : }
1136 : :
1137 : : /*
1138 : : * Compute the output value of one partial aggregate.
1139 : : *
1140 : : * The serialization function will be run, and the result delivered, in the
1141 : : * output-tuple context; caller's CurrentMemoryContext does not matter.
1142 : : */
1143 : : static void
3549 rhaas@postgresql.org 1144 : 9485 : finalize_partialaggregate(AggState *aggstate,
1145 : : AggStatePerAgg peragg,
1146 : : AggStatePerGroup pergroupstate,
1147 : : Datum *resultVal, bool *resultIsNull)
1148 : : {
3477 1149 : 9485 : AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
1150 : : MemoryContext oldContext;
1151 : :
3549 1152 : 9485 : oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
1153 : :
1154 : : /*
1155 : : * serialfn_oid will be set if we must serialize the transvalue before
1156 : : * returning it
1157 : : */
1158 [ + + ]: 9485 : if (OidIsValid(pertrans->serialfn_oid))
1159 : : {
1160 : : /* Don't call a strict serialization function with NULL input. */
1161 [ + - + + ]: 333 : if (pertrans->serialfn.fn_strict && pergroupstate->transValueIsNull)
1162 : : {
1163 : 35 : *resultVal = (Datum) 0;
1164 : 35 : *resultIsNull = true;
1165 : : }
1166 : : else
1167 : : {
2516 andres@anarazel.de 1168 : 298 : FunctionCallInfo fcinfo = pertrans->serialfn_fcinfo;
1169 : : Datum result;
1170 : :
1171 : 298 : fcinfo->args[0].value =
1172 [ + - + - ]: 298 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1173 : : pergroupstate->transValueIsNull,
1174 : : pertrans->transtypeLen);
1175 : 298 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
2065 tgl@sss.pgh.pa.us 1176 : 298 : fcinfo->isnull = false;
1177 : :
975 1178 : 298 : result = FunctionCallInvoke(fcinfo);
3549 rhaas@postgresql.org 1179 : 298 : *resultIsNull = fcinfo->isnull;
975 tgl@sss.pgh.pa.us 1180 [ + - + - ]: 298 : *resultVal = MakeExpandedObjectReadOnly(result,
1181 : : fcinfo->isnull,
1182 : : peragg->resulttypeLen);
1183 : : }
1184 : : }
1185 : : else
1186 : : {
1167 1187 : 9152 : *resultVal =
1188 [ + + + + ]: 9152 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1189 : : pergroupstate->transValueIsNull,
1190 : : pertrans->transtypeLen);
3549 rhaas@postgresql.org 1191 : 9152 : *resultIsNull = pergroupstate->transValueIsNull;
1192 : : }
1193 : :
1194 : 9485 : MemoryContextSwitchTo(oldContext);
1195 : 9485 : }
1196 : :
1197 : : /*
1198 : : * Extract the attributes that make up the grouping key into the
1199 : : * hashslot. This is necessary to compute the hash or perform a lookup.
1200 : : */
1201 : : static inline void
1969 jdavis@postgresql.or 1202 : 4113737 : prepare_hash_slot(AggStatePerHash perhash,
1203 : : TupleTableSlot *inputslot,
1204 : : TupleTableSlot *hashslot)
1205 : : {
1206 : : int i;
1207 : :
1208 : : /* transfer just the needed columns into hashslot */
2127 1209 : 4113737 : slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
1210 : 4113737 : ExecClearTuple(hashslot);
1211 : :
1212 [ + + ]: 10201560 : for (i = 0; i < perhash->numhashGrpCols; i++)
1213 : : {
1214 : 6087823 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
1215 : :
1216 : 6087823 : hashslot->tts_values[i] = inputslot->tts_values[varNumber];
1217 : 6087823 : hashslot->tts_isnull[i] = inputslot->tts_isnull[varNumber];
1218 : : }
1219 : 4113737 : ExecStoreVirtualTuple(hashslot);
1220 : 4113737 : }
1221 : :
1222 : : /*
1223 : : * Prepare to finalize and project based on the specified representative tuple
1224 : : * slot and grouping set.
1225 : : *
1226 : : * In the specified tuple slot, force to null all attributes that should be
1227 : : * read as null in the context of the current grouping set. Also stash the
1228 : : * current group bitmap where GroupingExpr can get at it.
1229 : : *
1230 : : * This relies on three conditions:
1231 : : *
1232 : : * 1) Nothing is ever going to try and extract the whole tuple from this slot,
1233 : : * only reference it in evaluations, which will only access individual
1234 : : * attributes.
1235 : : *
1236 : : * 2) No system columns are going to need to be nulled. (If a system column is
1237 : : * referenced in a group clause, it is actually projected in the outer plan
1238 : : * tlist.)
1239 : : *
1240 : : * 3) Within a given phase, we never need to recover the value of an attribute
1241 : : * once it has been set to null.
1242 : : *
1243 : : * Poking into the slot this way is a bit ugly, but the consensus is that the
1244 : : * alternative was worse.
1245 : : */
1246 : : static void
3867 andres@anarazel.de 1247 : 420684 : prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet)
1248 : : {
1249 [ + + ]: 420684 : if (aggstate->phase->grouped_cols)
1250 : : {
3860 bruce@momjian.us 1251 : 275294 : Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
1252 : :
3867 andres@anarazel.de 1253 : 275294 : aggstate->grouped_cols = grouped_cols;
1254 : :
2619 1255 [ + + ]: 275294 : if (TTS_EMPTY(slot))
1256 : : {
1257 : : /*
1258 : : * Force all values to be NULL if working on an empty input tuple
1259 : : * (i.e. an empty grouping set for which no input rows were
1260 : : * supplied).
1261 : : */
3867 1262 : 30 : ExecStoreAllNullTuple(slot);
1263 : : }
1264 [ + + ]: 275264 : else if (aggstate->all_grouped_cols)
1265 : : {
1266 : : ListCell *lc;
1267 : :
1268 : : /* all_grouped_cols is arranged in desc order */
1269 : 275240 : slot_getsomeattrs(slot, linitial_int(aggstate->all_grouped_cols));
1270 : :
1271 [ + - + + : 753876 : foreach(lc, aggstate->all_grouped_cols)
+ + ]
1272 : : {
3860 bruce@momjian.us 1273 : 478636 : int attnum = lfirst_int(lc);
1274 : :
3867 andres@anarazel.de 1275 [ + + ]: 478636 : if (!bms_is_member(attnum, grouped_cols))
1276 : 28916 : slot->tts_isnull[attnum - 1] = true;
1277 : : }
1278 : : }
1279 : : }
1280 : 420684 : }
1281 : :
1282 : : /*
1283 : : * Compute the final value of all aggregates for one group.
1284 : : *
1285 : : * This function handles only one grouping set at a time, which the caller must
1286 : : * have selected. It's also the caller's responsibility to adjust the supplied
1287 : : * pergroup parameter to point to the current set's transvalues.
1288 : : *
1289 : : * Results are stored in the output econtext aggvalues/aggnulls.
1290 : : */
1291 : : static void
1292 : 420684 : finalize_aggregates(AggState *aggstate,
1293 : : AggStatePerAgg peraggs,
1294 : : AggStatePerGroup pergroup)
1295 : : {
1296 : 420684 : ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
1297 : 420684 : Datum *aggvalues = econtext->ecxt_aggvalues;
1298 : 420684 : bool *aggnulls = econtext->ecxt_aggnulls;
1299 : : int aggno;
1300 : :
1301 : : /*
1302 : : * If there were any DISTINCT and/or ORDER BY aggregates, sort their
1303 : : * inputs and run the transition functions.
1304 : : */
1210 drowley@postgresql.o 1305 [ + + ]: 991394 : for (int transno = 0; transno < aggstate->numtrans; transno++)
1306 : : {
3787 heikki.linnakangas@i 1307 : 570710 : AggStatePerTrans pertrans = &aggstate->pertrans[transno];
1308 : : AggStatePerGroup pergroupstate;
1309 : :
3186 rhodiumtoad@postgres 1310 : 570710 : pergroupstate = &pergroup[transno];
1311 : :
1232 drowley@postgresql.o 1312 [ + + ]: 570710 : if (pertrans->aggsortrequired)
1313 : : {
3186 rhodiumtoad@postgres 1314 [ + - - + ]: 26921 : Assert(aggstate->aggstrategy != AGG_HASHED &&
1315 : : aggstate->aggstrategy != AGG_MIXED);
1316 : :
3787 heikki.linnakangas@i 1317 [ + + ]: 26921 : if (pertrans->numInputs == 1)
3867 andres@anarazel.de 1318 : 26879 : process_ordered_aggregate_single(aggstate,
1319 : : pertrans,
1320 : : pergroupstate);
1321 : : else
1322 : 42 : process_ordered_aggregate_multi(aggstate,
1323 : : pertrans,
1324 : : pergroupstate);
1325 : : }
1232 drowley@postgresql.o 1326 [ + + + + ]: 543789 : else if (pertrans->numDistinctCols > 0 && pertrans->haslast)
1327 : : {
1328 : 9179 : pertrans->haslast = false;
1329 : :
1330 [ + + ]: 9179 : if (pertrans->numDistinctCols == 1)
1331 : : {
1332 [ + + + + ]: 9131 : if (!pertrans->inputtypeByVal && !pertrans->lastisnull)
1333 : 131 : pfree(DatumGetPointer(pertrans->lastdatum));
1334 : :
1335 : 9131 : pertrans->lastisnull = false;
1336 : 9131 : pertrans->lastdatum = (Datum) 0;
1337 : : }
1338 : : else
1339 : 48 : ExecClearTuple(pertrans->uniqslot);
1340 : : }
1341 : : }
1342 : :
1343 : : /*
1344 : : * Run the final functions.
1345 : : */
3283 heikki.linnakangas@i 1346 [ + + ]: 991529 : for (aggno = 0; aggno < aggstate->numaggs; aggno++)
1347 : : {
1348 : 570851 : AggStatePerAgg peragg = &peraggs[aggno];
1349 : 570851 : int transno = peragg->transno;
1350 : : AggStatePerGroup pergroupstate;
1351 : :
3186 rhodiumtoad@postgres 1352 : 570851 : pergroupstate = &pergroup[transno];
1353 : :
3460 tgl@sss.pgh.pa.us 1354 [ + + ]: 570851 : if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
3549 rhaas@postgresql.org 1355 : 9485 : finalize_partialaggregate(aggstate, peragg, pergroupstate,
1356 : 9485 : &aggvalues[aggno], &aggnulls[aggno]);
1357 : : else
3460 tgl@sss.pgh.pa.us 1358 : 561366 : finalize_aggregate(aggstate, peragg, pergroupstate,
1359 : 561366 : &aggvalues[aggno], &aggnulls[aggno]);
1360 : : }
3867 andres@anarazel.de 1361 : 420678 : }
1362 : :
1363 : : /*
1364 : : * Project the result of a group (whose aggs have already been calculated by
1365 : : * finalize_aggregates). Returns the result slot, or NULL if no row is
1366 : : * projected (suppressed by qual).
1367 : : */
1368 : : static TupleTableSlot *
1369 : 420678 : project_aggregates(AggState *aggstate)
1370 : : {
1371 : 420678 : ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
1372 : :
1373 : : /*
1374 : : * Check the qual (HAVING clause); if the group does not match, ignore it.
1375 : : */
3199 1376 [ + + ]: 420678 : if (ExecQual(aggstate->ss.ps.qual, econtext))
1377 : : {
1378 : : /*
1379 : : * Form and return projection tuple using the aggregate results and
1380 : : * the representative input tuple.
1381 : : */
3253 1382 : 367440 : return ExecProject(aggstate->ss.ps.ps_ProjInfo);
1383 : : }
1384 : : else
3867 1385 [ - + ]: 53238 : InstrCountFiltered1(aggstate, 1);
1386 : :
1387 : 53238 : return NULL;
1388 : : }
1389 : :
1390 : : /*
1391 : : * Find input-tuple columns that are needed, dividing them into
1392 : : * aggregated and unaggregated sets.
1393 : : */
1394 : : static void
1983 jdavis@postgresql.or 1395 : 3518 : find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
1396 : : {
1679 tgl@sss.pgh.pa.us 1397 : 3518 : Agg *agg = (Agg *) aggstate->ss.ps.plan;
1398 : : FindColsContext context;
1399 : :
1983 jdavis@postgresql.or 1400 : 3518 : context.is_aggref = false;
1401 : 3518 : context.aggregated = NULL;
1402 : 3518 : context.unaggregated = NULL;
1403 : :
1404 : : /* Examine tlist and quals */
1405 : 3518 : (void) find_cols_walker((Node *) agg->plan.targetlist, &context);
1406 : 3518 : (void) find_cols_walker((Node *) agg->plan.qual, &context);
1407 : :
1408 : : /* In some cases, grouping columns will not appear in the tlist */
1776 tgl@sss.pgh.pa.us 1409 [ + + ]: 8954 : for (int i = 0; i < agg->numCols; i++)
1410 : 5436 : context.unaggregated = bms_add_member(context.unaggregated,
1411 : 5436 : agg->grpColIdx[i]);
1412 : :
1983 jdavis@postgresql.or 1413 : 3518 : *aggregated = context.aggregated;
1414 : 3518 : *unaggregated = context.unaggregated;
7111 tgl@sss.pgh.pa.us 1415 : 3518 : }
1416 : :
1417 : : static bool
1983 jdavis@postgresql.or 1418 : 41995 : find_cols_walker(Node *node, FindColsContext *context)
1419 : : {
7111 tgl@sss.pgh.pa.us 1420 [ + + ]: 41995 : if (node == NULL)
1421 : 7562 : return false;
1422 [ + + ]: 34433 : if (IsA(node, Var))
1423 : : {
1424 : 9485 : Var *var = (Var *) node;
1425 : :
1426 : : /* setrefs.c should have set the varno to OUTER_VAR */
5180 1427 [ - + ]: 9485 : Assert(var->varno == OUTER_VAR);
7111 1428 [ - + ]: 9485 : Assert(var->varlevelsup == 0);
1983 jdavis@postgresql.or 1429 [ + + ]: 9485 : if (context->is_aggref)
1430 : 3051 : context->aggregated = bms_add_member(context->aggregated,
1431 : 3051 : var->varattno);
1432 : : else
1433 : 6434 : context->unaggregated = bms_add_member(context->unaggregated,
1434 : 6434 : var->varattno);
7111 tgl@sss.pgh.pa.us 1435 : 9485 : return false;
1436 : : }
1983 jdavis@postgresql.or 1437 [ + + ]: 24948 : if (IsA(node, Aggref))
1438 : : {
1439 [ - + ]: 4294 : Assert(!context->is_aggref);
1440 : 4294 : context->is_aggref = true;
383 peter@eisentraut.org 1441 : 4294 : expression_tree_walker(node, find_cols_walker, context);
1983 jdavis@postgresql.or 1442 : 4294 : context->is_aggref = false;
7111 tgl@sss.pgh.pa.us 1443 : 4294 : return false;
1444 : : }
383 peter@eisentraut.org 1445 : 20654 : return expression_tree_walker(node, find_cols_walker, context);
1446 : : }
1447 : :
1448 : : /*
1449 : : * (Re-)initialize the hash table(s) to empty.
1450 : : *
1451 : : * To implement hashed aggregation, we need a hashtable that stores a
1452 : : * representative tuple and an array of AggStatePerGroup structs for each
1453 : : * distinct set of GROUP BY column values. We compute the hash key from the
1454 : : * GROUP BY columns. The per-group data is allocated in initialize_hash_entry(),
1455 : : * for each entry.
1456 : : *
1457 : : * We have a separate hashtable and associated perhash data structure for each
1458 : : * grouping set for which we're doing hashing.
1459 : : *
1460 : : * The contents of the hash tables live in the aggstate's hash_tuplescxt
1461 : : * memory context (there is only one of these for all tables together, since
1462 : : * they are all reset at the same time).
1463 : : */
1464 : : static void
2127 jdavis@postgresql.or 1465 : 8918 : build_hash_tables(AggState *aggstate)
1466 : : {
1467 : : int setno;
1468 : :
1469 [ + + ]: 18008 : for (setno = 0; setno < aggstate->num_hashes; ++setno)
1470 : : {
1471 : 9090 : AggStatePerHash perhash = &aggstate->perhash[setno];
1472 : : double nbuckets;
1473 : : Size memory;
1474 : :
2099 1475 [ + + ]: 9090 : if (perhash->hashtable != NULL)
1476 : : {
1477 : 6359 : ResetTupleHashTable(perhash->hashtable);
1478 : 6359 : continue;
1479 : : }
1480 : :
1481 : 2731 : memory = aggstate->hash_mem_limit / aggstate->num_hashes;
1482 : :
1483 : : /* choose reasonable number of buckets per hashtable */
2042 tgl@sss.pgh.pa.us 1484 : 2731 : nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
1485 : 2731 : perhash->aggnode->numGroups,
1486 : : memory);
1487 : :
1488 : : #ifdef USE_INJECTION_POINTS
1489 : : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-oversize-table"))
1490 : : {
1491 : : nbuckets = memory / TupleHashEntrySize();
1492 : : INJECTION_POINT_CACHED("hash-aggregate-oversize-table", NULL);
1493 : : }
1494 : : #endif
1495 : :
2099 jdavis@postgresql.or 1496 : 2731 : build_hash_table(aggstate, setno, nbuckets);
1497 : : }
1498 : :
1499 : 8918 : aggstate->hash_ngroups_current = 0;
6270 neilc@samurai.com 1500 : 8918 : }
1501 : :
1502 : : /*
1503 : : * Build a single hashtable for this grouping set.
1504 : : */
1505 : : static void
44 tgl@sss.pgh.pa.us 1506 :GNC 2731 : build_hash_table(AggState *aggstate, int setno, double nbuckets)
1507 : : {
2127 jdavis@postgresql.or 1508 :CBC 2731 : AggStatePerHash perhash = &aggstate->perhash[setno];
2042 tgl@sss.pgh.pa.us 1509 : 2731 : MemoryContext metacxt = aggstate->hash_metacxt;
47 tgl@sss.pgh.pa.us 1510 :GNC 2731 : MemoryContext tuplescxt = aggstate->hash_tuplescxt;
2042 tgl@sss.pgh.pa.us 1511 :CBC 2731 : MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
1512 : : Size additionalsize;
1513 : :
2127 jdavis@postgresql.or 1514 [ + + - + ]: 2731 : Assert(aggstate->aggstrategy == AGG_HASHED ||
1515 : : aggstate->aggstrategy == AGG_MIXED);
1516 : :
1517 : : /*
1518 : : * Used to make sure initial hash table allocation does not exceed
1519 : : * hash_mem. Note that the estimate does not include space for
1520 : : * pass-by-reference transition data values, nor for the representative
1521 : : * tuple of each group.
1522 : : */
1523 : 2731 : additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
1524 : :
362 tgl@sss.pgh.pa.us 1525 : 5462 : perhash->hashtable = BuildTupleHashTable(&aggstate->ss.ps,
1526 : 2731 : perhash->hashslot->tts_tupleDescriptor,
1527 : 2731 : perhash->hashslot->tts_ops,
1528 : : perhash->numCols,
1529 : : perhash->hashGrpColIdxHash,
1530 : 2731 : perhash->eqfuncoids,
1531 : : perhash->hashfunctions,
1532 : 2731 : perhash->aggnode->grpCollations,
1533 : : nbuckets,
1534 : : additionalsize,
1535 : : metacxt,
1536 : : tuplescxt,
1537 : : tmpcxt,
1538 : 2731 : DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
2127 jdavis@postgresql.or 1539 : 2731 : }
1540 : :
1541 : : /*
1542 : : * Compute columns that actually need to be stored in hashtable entries. The
1543 : : * incoming tuples from the child plan node will contain grouping columns,
1544 : : * other columns referenced in our targetlist and qual, columns used to
1545 : : * compute the aggregate functions, and perhaps just junk columns we don't use
1546 : : * at all. Only columns of the first two types need to be stored in the
1547 : : * hashtable, and getting rid of the others can make the table entries
1548 : : * significantly smaller. The hashtable only contains the relevant columns,
1549 : : * and is packed/unpacked in lookup_hash_entries() / agg_retrieve_hash_table()
1550 : : * into the format of the normal input descriptor.
1551 : : *
1552 : : * Additional columns, in addition to the columns grouped by, come from two
1553 : : * sources: Firstly functionally dependent columns that we don't need to group
1554 : : * by themselves, and secondly ctids for row-marks.
1555 : : *
1556 : : * To eliminate duplicates, we build a bitmapset of the needed columns, and
1557 : : * then build an array of the columns included in the hashtable. We might
1558 : : * still have duplicates if the passed-in grpColIdx has them, which can happen
1559 : : * in edge cases from semijoins/distinct; these can't always be removed,
1560 : : * because it's not certain that the duplicate cols will be using the same
1561 : : * hash function.
1562 : : *
1563 : : * Note that the array is preserved over ExecReScanAgg, so we allocate it in
1564 : : * the per-query context (unlike the hash table itself).
1565 : : */
1566 : : static void
6270 neilc@samurai.com 1567 : 3518 : find_hash_columns(AggState *aggstate)
1568 : : {
1569 : : Bitmapset *base_colnos;
1570 : : Bitmapset *aggregated_colnos;
1983 jdavis@postgresql.or 1571 : 3518 : TupleDesc scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
3303 andres@anarazel.de 1572 : 3518 : List *outerTlist = outerPlanState(aggstate)->plan->targetlist;
3186 rhodiumtoad@postgres 1573 : 3518 : int numHashes = aggstate->num_hashes;
2861 andres@anarazel.de 1574 : 3518 : EState *estate = aggstate->ss.ps.state;
1575 : : int j;
1576 : :
1577 : : /* Find Vars that will be needed in tlist and qual */
1983 jdavis@postgresql.or 1578 : 3518 : find_cols(aggstate, &aggregated_colnos, &base_colnos);
1579 : 3518 : aggstate->colnos_needed = bms_union(base_colnos, aggregated_colnos);
1580 : 3518 : aggstate->max_colno_needed = 0;
1581 : 3518 : aggstate->all_cols_needed = true;
1582 : :
1583 [ + + ]: 14706 : for (int i = 0; i < scanDesc->natts; i++)
1584 : : {
1679 tgl@sss.pgh.pa.us 1585 : 11188 : int colno = i + 1;
1586 : :
1983 jdavis@postgresql.or 1587 [ + + ]: 11188 : if (bms_is_member(colno, aggstate->colnos_needed))
1588 : 8144 : aggstate->max_colno_needed = colno;
1589 : : else
1590 : 3044 : aggstate->all_cols_needed = false;
1591 : : }
1592 : :
3186 rhodiumtoad@postgres 1593 [ + + ]: 7293 : for (j = 0; j < numHashes; ++j)
1594 : : {
1595 : 3775 : AggStatePerHash perhash = &aggstate->perhash[j];
1596 : 3775 : Bitmapset *colnos = bms_copy(base_colnos);
1597 : 3775 : AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
1598 : 3775 : List *hashTlist = NIL;
1599 : : TupleDesc hashDesc;
1600 : : int maxCols;
1601 : : int i;
1602 : :
1603 : 3775 : perhash->largestGrpColIdx = 0;
1604 : :
1605 : : /*
1606 : : * If we're doing grouping sets, then some Vars might be referenced in
1607 : : * tlist/qual for the benefit of other grouping sets, but not needed
1608 : : * when hashing; i.e. prepare_projection_slot will null them out, so
1609 : : * there'd be no point storing them. Use prepare_projection_slot's
1610 : : * logic to determine which.
1611 : : */
1612 [ + - ]: 3775 : if (aggstate->phases[0].grouped_cols)
1613 : : {
1614 : 3775 : Bitmapset *grouped_cols = aggstate->phases[0].grouped_cols[j];
1615 : : ListCell *lc;
1616 : :
1617 [ + - + + : 10194 : foreach(lc, aggstate->all_grouped_cols)
+ + ]
1618 : : {
1619 : 6419 : int attnum = lfirst_int(lc);
1620 : :
1621 [ + + ]: 6419 : if (!bms_is_member(attnum, grouped_cols))
1622 : 672 : colnos = bms_del_member(colnos, attnum);
1623 : : }
1624 : : }
1625 : :
1626 : : /*
1627 : : * Compute maximum number of input columns accounting for possible
1628 : : * duplications in the grpColIdx array, which can happen in some edge
1629 : : * cases where HashAggregate was generated as part of a semijoin or a
1630 : : * DISTINCT.
1631 : : */
2399 1632 : 3775 : maxCols = bms_num_members(colnos) + perhash->numCols;
1633 : :
3186 1634 : 3775 : perhash->hashGrpColIdxInput =
2399 1635 : 3775 : palloc(maxCols * sizeof(AttrNumber));
3186 1636 : 3775 : perhash->hashGrpColIdxHash =
1637 : 3775 : palloc(perhash->numCols * sizeof(AttrNumber));
1638 : :
1639 : : /* Add all the grouping columns to colnos */
2399 1640 [ + + ]: 9522 : for (i = 0; i < perhash->numCols; i++)
1641 : 5747 : colnos = bms_add_member(colnos, grpColIdx[i]);
1642 : :
1643 : : /*
1644 : : * First build mapping for columns directly hashed. These are the
1645 : : * first, because they'll be accessed when computing hash values and
1646 : : * comparing tuples for exact matches. We also build simple mapping
1647 : : * for execGrouping, so it knows where to find the to-be-hashed /
1648 : : * compared columns in the input.
1649 : : */
3186 1650 [ + + ]: 9522 : for (i = 0; i < perhash->numCols; i++)
1651 : : {
1652 : 5747 : perhash->hashGrpColIdxInput[i] = grpColIdx[i];
1653 : 5747 : perhash->hashGrpColIdxHash[i] = i + 1;
1654 : 5747 : perhash->numhashGrpCols++;
1655 : : /* delete already mapped columns */
1020 tgl@sss.pgh.pa.us 1656 : 5747 : colnos = bms_del_member(colnos, grpColIdx[i]);
1657 : : }
1658 : :
1659 : : /* and add the remaining columns */
1660 : 3775 : i = -1;
1661 [ + + ]: 4411 : while ((i = bms_next_member(colnos, i)) >= 0)
1662 : : {
3186 rhodiumtoad@postgres 1663 : 636 : perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
1664 : 636 : perhash->numhashGrpCols++;
1665 : : }
1666 : :
1667 : : /* and build a tuple descriptor for the hashtable */
1668 [ + + ]: 10158 : for (i = 0; i < perhash->numhashGrpCols; i++)
1669 : : {
1670 : 6383 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
1671 : :
1672 : 6383 : hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
1673 : 6383 : perhash->largestGrpColIdx =
1674 : 6383 : Max(varNumber + 1, perhash->largestGrpColIdx);
1675 : : }
1676 : :
2583 andres@anarazel.de 1677 : 3775 : hashDesc = ExecTypeFromTL(hashTlist);
1678 : :
2861 1679 : 3775 : execTuplesHashPrepare(perhash->numCols,
1680 : 3775 : perhash->aggnode->grpOperators,
1681 : : &perhash->eqfuncoids,
1682 : : &perhash->hashfunctions);
2860 1683 : 3775 : perhash->hashslot =
2588 1684 : 3775 : ExecAllocTableSlot(&estate->es_tupleTable, hashDesc,
1685 : : &TTSOpsMinimalTuple);
1686 : :
3186 rhodiumtoad@postgres 1687 : 3775 : list_free(hashTlist);
1688 : 3775 : bms_free(colnos);
1689 : : }
1690 : :
1691 : 3518 : bms_free(base_colnos);
8441 tgl@sss.pgh.pa.us 1692 : 3518 : }
1693 : :
1694 : : /*
1695 : : * Estimate per-hash-table-entry overhead.
1696 : : */
1697 : : Size
2083 jdavis@postgresql.or 1698 : 21089 : hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
1699 : : {
1700 : : Size tupleChunkSize;
1701 : : Size pergroupChunkSize;
1702 : : Size transitionChunkSize;
2042 tgl@sss.pgh.pa.us 1703 : 21089 : Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
1704 : : tupleWidth);
1705 : 21089 : Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
1706 : :
1707 : : /*
1708 : : * Entries use the Bump allocator, so the chunk sizes are the same as the
1709 : : * requested sizes.
1710 : : */
267 jdavis@postgresql.or 1711 : 21089 : tupleChunkSize = MAXALIGN(tupleSize);
1712 : 21089 : pergroupChunkSize = pergroupSize;
1713 : :
1714 : : /*
1715 : : * Transition values use AllocSet, which has a chunk header and also uses
1716 : : * power-of-two allocations.
1717 : : */
2083 1718 [ + + ]: 21089 : if (transitionSpace > 0)
267 1719 : 2699 : transitionChunkSize = CHUNKHDRSZ + pg_nextpower2_size_t(transitionSpace);
1720 : : else
2083 1721 : 18390 : transitionChunkSize = 0;
1722 : :
1723 : : return
267 1724 : 21089 : TupleHashEntrySize() +
2083 1725 : 21089 : tupleChunkSize +
1726 : 21089 : pergroupChunkSize +
1727 : : transitionChunkSize;
1728 : : }
1729 : :
1730 : : /*
1731 : : * hashagg_recompile_expressions()
1732 : : *
1733 : : * Identifies the right phase, compiles the right expression given the
1734 : : * arguments, and then sets phase->evalfunc to that expression.
1735 : : *
1736 : : * Different versions of the compiled expression are needed depending on
1737 : : * whether hash aggregation has spilled or not, and whether it's reading from
1738 : : * the outer plan or a tape. Before spilling to disk, the expression reads
1739 : : * from the outer plan and does not need to perform a NULL check. After
1740 : : * HashAgg begins to spill, new groups will not be created in the hash table,
1741 : : * and the AggStatePerGroup array may be NULL; therefore we need to add a null
1742 : : * pointer check to the expression. Then, when reading spilled data from a
1743 : : * tape, we change the outer slot type to be a fixed minimal tuple slot.
1744 : : *
1745 : : * It would be wasteful to recompile every time, so cache the compiled
1746 : : * expressions in the AggStatePerPhase, and reuse when appropriate.
1747 : : */
1748 : : static void
2099 1749 : 33020 : hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
1750 : : {
1751 : : AggStatePerPhase phase;
2042 tgl@sss.pgh.pa.us 1752 : 33020 : int i = minslot ? 1 : 0;
1753 : 33020 : int j = nullcheck ? 1 : 0;
1754 : :
2099 jdavis@postgresql.or 1755 [ + + - + ]: 33020 : Assert(aggstate->aggstrategy == AGG_HASHED ||
1756 : : aggstate->aggstrategy == AGG_MIXED);
1757 : :
1758 [ + + ]: 33020 : if (aggstate->aggstrategy == AGG_HASHED)
1759 : 6734 : phase = &aggstate->phases[0];
1760 : : else /* AGG_MIXED */
1761 : 26286 : phase = &aggstate->phases[1];
1762 : :
1763 [ + + ]: 33020 : if (phase->evaltrans_cache[i][j] == NULL)
1764 : : {
2042 tgl@sss.pgh.pa.us 1765 : 42 : const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
1766 : 42 : bool outerfixed = aggstate->ss.ps.outeropsfixed;
1767 : 42 : bool dohash = true;
1816 jdavis@postgresql.or 1768 : 42 : bool dosort = false;
1769 : :
1770 : : /*
1771 : : * If minslot is true, that means we are processing a spilled batch
1772 : : * (inside agg_refill_hash_table()), and we must not advance the
1773 : : * sorted grouping sets.
1774 : : */
1775 [ + + + + ]: 42 : if (aggstate->aggstrategy == AGG_MIXED && !minslot)
1776 : 6 : dosort = true;
1777 : :
1778 : : /* temporarily change the outerops while compiling the expression */
2099 1779 [ + + ]: 42 : if (minslot)
1780 : : {
1781 : 21 : aggstate->ss.ps.outerops = &TTSOpsMinimalTuple;
1782 : 21 : aggstate->ss.ps.outeropsfixed = true;
1783 : : }
1784 : :
2042 tgl@sss.pgh.pa.us 1785 : 42 : phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
1786 : : dosort, dohash,
1787 : : nullcheck);
1788 : :
1789 : : /* change back */
2099 jdavis@postgresql.or 1790 : 42 : aggstate->ss.ps.outerops = outerops;
1791 : 42 : aggstate->ss.ps.outeropsfixed = outerfixed;
1792 : : }
1793 : :
1794 : 33020 : phase->evaltrans = phase->evaltrans_cache[i][j];
1795 : 33020 : }
1796 : :
1797 : : /*
1798 : : * Set limits that trigger spilling to avoid exceeding hash_mem. Consider the
1799 : : * number of partitions we expect to create (if we do spill).
1800 : : *
1801 : : * There are two limits: a memory limit, and also an ngroups limit. The
1802 : : * ngroups limit becomes important when we expect transition values to grow
1803 : : * substantially larger than the initial value.
1804 : : */
1805 : : void
1967 1806 : 33333 : hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits,
1807 : : Size *mem_limit, uint64 *ngroups_limit,
1808 : : int *num_partitions)
1809 : : {
1810 : : int npartitions;
1811 : : Size partition_mem;
1605 tgl@sss.pgh.pa.us 1812 : 33333 : Size hash_mem_limit = get_hash_memory_limit();
1813 : :
1814 : : /* if not expected to spill, use all of hash_mem */
1815 [ + + ]: 33333 : if (input_groups * hashentrysize <= hash_mem_limit)
1816 : : {
2089 jdavis@postgresql.or 1817 [ + + ]: 32118 : if (num_partitions != NULL)
1818 : 19818 : *num_partitions = 0;
1605 tgl@sss.pgh.pa.us 1819 : 32118 : *mem_limit = hash_mem_limit;
1820 : 32118 : *ngroups_limit = hash_mem_limit / hashentrysize;
2099 jdavis@postgresql.or 1821 : 32118 : return;
1822 : : }
1823 : :
1824 : : /*
1825 : : * Calculate expected memory requirements for spilling, which is the size
1826 : : * of the buffers needed for all the tapes that need to be open at once.
1827 : : * Then, subtract that from the memory available for holding hash tables.
1828 : : */
1829 : 1215 : npartitions = hash_choose_num_partitions(input_groups,
1830 : : hashentrysize,
1831 : : used_bits,
1832 : : NULL);
1833 [ + + ]: 1215 : if (num_partitions != NULL)
1834 : 48 : *num_partitions = npartitions;
1835 : :
1836 : 1215 : partition_mem =
1837 : 1215 : HASHAGG_READ_BUFFER_SIZE +
1838 : : HASHAGG_WRITE_BUFFER_SIZE * npartitions;
1839 : :
1840 : : /*
1841 : : * Don't set the limit below 3/4 of hash_mem. In that case, we are at the
1842 : : * minimum number of partitions, so we aren't going to dramatically exceed
1843 : : * work mem anyway.
1844 : : */
1605 tgl@sss.pgh.pa.us 1845 [ - + ]: 1215 : if (hash_mem_limit > 4 * partition_mem)
1605 tgl@sss.pgh.pa.us 1846 :UBC 0 : *mem_limit = hash_mem_limit - partition_mem;
1847 : : else
1605 tgl@sss.pgh.pa.us 1848 :CBC 1215 : *mem_limit = hash_mem_limit * 0.75;
1849 : :
2099 jdavis@postgresql.or 1850 [ + - ]: 1215 : if (*mem_limit > hashentrysize)
1851 : 1215 : *ngroups_limit = *mem_limit / hashentrysize;
1852 : : else
2099 jdavis@postgresql.or 1853 :UBC 0 : *ngroups_limit = 1;
1854 : : }
1855 : :
1856 : : /*
1857 : : * hash_agg_check_limits
1858 : : *
1859 : : * After adding a new group to the hash table, check whether we need to enter
1860 : : * spill mode. Allocations may happen without adding new groups (for instance,
1861 : : * if the transition state size grows), so this check is imperfect.
1862 : : */
1863 : : static void
2099 jdavis@postgresql.or 1864 :CBC 259643 : hash_agg_check_limits(AggState *aggstate)
1865 : : {
2042 tgl@sss.pgh.pa.us 1866 : 259643 : uint64 ngroups = aggstate->hash_ngroups_current;
1867 : 259643 : Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
1868 : : true);
47 tgl@sss.pgh.pa.us 1869 :GNC 259643 : Size entry_mem = MemoryContextMemAllocated(aggstate->hash_tuplescxt,
1870 : : true);
267 jdavis@postgresql.or 1871 :CBC 259643 : Size tval_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
1872 : : true);
1873 : 259643 : Size total_mem = meta_mem + entry_mem + tval_mem;
308 1874 : 259643 : bool do_spill = false;
1875 : :
1876 : : #ifdef USE_INJECTION_POINTS
1877 : : if (ngroups >= 1000)
1878 : : {
1879 : : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-spill-1000"))
1880 : : {
1881 : : do_spill = true;
1882 : : INJECTION_POINT_CACHED("hash-aggregate-spill-1000", NULL);
1883 : : }
1884 : : }
1885 : : #endif
1886 : :
1887 : : /*
1888 : : * Don't spill unless there's at least one group in the hash table so we
1889 : : * can be sure to make progress even in edge cases.
1890 : : */
2099 1891 [ + - ]: 259643 : if (aggstate->hash_ngroups_current > 0 &&
267 1892 [ + + ]: 259643 : (total_mem > aggstate->hash_mem_limit ||
2099 1893 [ + + ]: 246446 : ngroups > aggstate->hash_ngroups_limit))
1894 : : {
308 1895 : 13224 : do_spill = true;
1896 : : }
1897 : :
1898 [ + + ]: 259643 : if (do_spill)
1899 : 13224 : hash_agg_enter_spill_mode(aggstate);
2099 1900 : 259643 : }
1901 : :
1902 : : /*
1903 : : * Enter "spill mode", meaning that no new groups are added to any of the hash
1904 : : * tables. Tuples that would create a new group are instead spilled, and
1905 : : * processed later.
1906 : : */
1907 : : static void
1908 : 13224 : hash_agg_enter_spill_mode(AggState *aggstate)
1909 : : {
1910 : : INJECTION_POINT("hash-aggregate-enter-spill-mode", NULL);
1911 : 13224 : aggstate->hash_spill_mode = true;
1912 : 13224 : hashagg_recompile_expressions(aggstate, aggstate->table_filled, true);
1913 : :
1914 [ + + ]: 13224 : if (!aggstate->hash_ever_spilled)
1915 : : {
1520 heikki.linnakangas@i 1916 [ - + ]: 30 : Assert(aggstate->hash_tapeset == NULL);
2099 jdavis@postgresql.or 1917 [ - + ]: 30 : Assert(aggstate->hash_spills == NULL);
1918 : :
1919 : 30 : aggstate->hash_ever_spilled = true;
1920 : :
1520 heikki.linnakangas@i 1921 : 30 : aggstate->hash_tapeset = LogicalTapeSetCreate(true, NULL, -1);
1922 : :
6 michael@paquier.xyz 1923 :GNC 30 : aggstate->hash_spills = palloc_array(HashAggSpill, aggstate->num_hashes);
1924 : :
2099 jdavis@postgresql.or 1925 [ + + ]:CBC 90 : for (int setno = 0; setno < aggstate->num_hashes; setno++)
1926 : : {
2042 tgl@sss.pgh.pa.us 1927 : 60 : AggStatePerHash perhash = &aggstate->perhash[setno];
1928 : 60 : HashAggSpill *spill = &aggstate->hash_spills[setno];
1929 : :
1520 heikki.linnakangas@i 1930 : 60 : hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
2099 jdavis@postgresql.or 1931 : 60 : perhash->aggnode->numGroups,
1932 : : aggstate->hashentrysize);
1933 : : }
1934 : : }
1935 : 13224 : }
1936 : :
1937 : : /*
1938 : : * Update metrics after filling the hash table.
1939 : : *
1940 : : * If reading from the outer plan, from_tape should be false; if reading from
1941 : : * another tape, from_tape should be true.
1942 : : */
1943 : : static void
1944 : 22261 : hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
1945 : : {
1946 : : Size meta_mem;
1947 : : Size entry_mem;
1948 : : Size hashkey_mem;
1949 : : Size buffer_mem;
1950 : : Size total_mem;
1951 : :
1952 [ + + ]: 22261 : if (aggstate->aggstrategy != AGG_MIXED &&
1953 [ - + ]: 9058 : aggstate->aggstrategy != AGG_HASHED)
2099 jdavis@postgresql.or 1954 :UBC 0 : return;
1955 : :
1956 : : /* memory for the hash table itself */
2099 jdavis@postgresql.or 1957 :CBC 22261 : meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
1958 : :
1959 : : /* memory for hash entries */
47 tgl@sss.pgh.pa.us 1960 :GNC 22261 : entry_mem = MemoryContextMemAllocated(aggstate->hash_tuplescxt, true);
1961 : :
1962 : : /* memory for byref transition states */
1978 pg@bowt.ie 1963 :CBC 22261 : hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
1964 : :
1965 : : /* memory for read/write tape buffers, if spilled */
2099 jdavis@postgresql.or 1966 : 22261 : buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
1967 [ + + ]: 22261 : if (from_tape)
1968 : 13467 : buffer_mem += HASHAGG_READ_BUFFER_SIZE;
1969 : :
1970 : : /* update peak mem */
267 1971 : 22261 : total_mem = meta_mem + entry_mem + hashkey_mem + buffer_mem;
2099 1972 [ + + ]: 22261 : if (total_mem > aggstate->hash_mem_peak)
1973 : 2489 : aggstate->hash_mem_peak = total_mem;
1974 : :
1975 : : /* update disk usage */
1520 heikki.linnakangas@i 1976 [ + + ]: 22261 : if (aggstate->hash_tapeset != NULL)
1977 : : {
1978 : 13497 : uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (BLCKSZ / 1024);
1979 : :
2099 jdavis@postgresql.or 1980 [ + + ]: 13497 : if (aggstate->hash_disk_used < disk_used)
1981 : 24 : aggstate->hash_disk_used = disk_used;
1982 : : }
1983 : :
1984 : : /* update hashentrysize estimate based on contents */
1985 [ + + ]: 22261 : if (aggstate->hash_ngroups_current > 0)
1986 : : {
1987 : 22039 : aggstate->hashentrysize =
267 1988 : 22039 : TupleHashEntrySize() +
1978 pg@bowt.ie 1989 : 22039 : (hashkey_mem / (double) aggstate->hash_ngroups_current);
1990 : : }
1991 : : }
1992 : :
1993 : : /*
1994 : : * Create memory contexts used for hash aggregation.
1995 : : */
1996 : : static void
267 jdavis@postgresql.or 1997 : 3518 : hash_create_memory(AggState *aggstate)
1998 : : {
1999 : 3518 : Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
2000 : :
2001 : : /*
2002 : : * The hashcontext's per-tuple memory will be used for byref transition
2003 : : * values and returned by AggCheckCallContext().
2004 : : */
2005 : 3518 : aggstate->hashcontext = CreateWorkExprContext(aggstate->ss.ps.state);
2006 : :
2007 : : /*
2008 : : * The meta context will be used for the bucket array of
2009 : : * TupleHashEntryData (or arrays, in the case of grouping sets). As the
2010 : : * hash table grows, the bucket array will double in size and the old one
2011 : : * will be freed, so an AllocSet is appropriate. For large bucket arrays,
2012 : : * the large allocation path will be used, so it's not worth worrying
2013 : : * about wasting space due to power-of-two allocations.
2014 : : */
2015 : 3518 : aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
2016 : : "HashAgg meta context",
2017 : : ALLOCSET_DEFAULT_SIZES);
2018 : :
2019 : : /*
2020 : : * The hash entries themselves, which include the grouping key
2021 : : * (firstTuple) and pergroup data, are stored in the table context. The
2022 : : * bump allocator can be used because the entries are not freed until the
2023 : : * entire hash table is reset. The bump allocator is faster for
2024 : : * allocations and avoids wasting space on the chunk header or
2025 : : * power-of-two allocations.
2026 : : *
2027 : : * Like CreateWorkExprContext(), use smaller sizings for smaller work_mem,
2028 : : * to avoid large jumps in memory usage.
2029 : : */
2030 : :
2031 : : /*
2032 : : * Like CreateWorkExprContext(), use smaller sizings for smaller work_mem,
2033 : : * to avoid large jumps in memory usage.
2034 : : */
2035 : 3518 : maxBlockSize = pg_prevpower2_size_t(work_mem * (Size) 1024 / 16);
2036 : :
2037 : : /* But no bigger than ALLOCSET_DEFAULT_MAXSIZE */
2038 : 3518 : maxBlockSize = Min(maxBlockSize, ALLOCSET_DEFAULT_MAXSIZE);
2039 : :
2040 : : /* and no smaller than ALLOCSET_DEFAULT_INITSIZE */
2041 : 3518 : maxBlockSize = Max(maxBlockSize, ALLOCSET_DEFAULT_INITSIZE);
2042 : :
47 tgl@sss.pgh.pa.us 2043 :GNC 3518 : aggstate->hash_tuplescxt = BumpContextCreate(aggstate->ss.ps.state->es_query_cxt,
2044 : : "HashAgg hashed tuples",
2045 : : ALLOCSET_DEFAULT_MINSIZE,
2046 : : ALLOCSET_DEFAULT_INITSIZE,
2047 : : maxBlockSize);
2048 : :
267 jdavis@postgresql.or 2049 :CBC 3518 : }
2050 : :
2051 : : /*
2052 : : * Choose a reasonable number of buckets for the initial hash table size.
2053 : : */
2054 : : static double
44 tgl@sss.pgh.pa.us 2055 :GNC 2731 : hash_choose_num_buckets(double hashentrysize, double ngroups, Size memory)
2056 : : {
2057 : : double max_nbuckets;
2058 : 2731 : double nbuckets = ngroups;
2059 : :
2099 jdavis@postgresql.or 2060 :CBC 2731 : max_nbuckets = memory / hashentrysize;
2061 : :
2062 : : /*
2063 : : * Underestimating is better than overestimating. Too many buckets crowd
2064 : : * out space for group keys and transition state values.
2065 : : */
44 tgl@sss.pgh.pa.us 2066 :GNC 2731 : max_nbuckets /= 2;
2067 : :
2099 jdavis@postgresql.or 2068 [ + + ]:CBC 2731 : if (nbuckets > max_nbuckets)
2069 : 36 : nbuckets = max_nbuckets;
2070 : :
2071 : : /*
2072 : : * BuildTupleHashTable will clamp any obviously-insane result, so we don't
2073 : : * need to be too careful here.
2074 : : */
44 tgl@sss.pgh.pa.us 2075 :GNC 2731 : return nbuckets;
2076 : : }
2077 : :
2078 : : /*
2079 : : * Determine the number of partitions to create when spilling, which will
2080 : : * always be a power of two. If log2_npartitions is non-NULL, set
2081 : : * *log2_npartitions to the log2() of the number of partitions.
2082 : : */
2083 : : static int
1967 jdavis@postgresql.or 2084 :CBC 7521 : hash_choose_num_partitions(double input_groups, double hashentrysize,
2085 : : int used_bits, int *log2_npartitions)
2086 : : {
1605 tgl@sss.pgh.pa.us 2087 : 7521 : Size hash_mem_limit = get_hash_memory_limit();
2088 : : double partition_limit;
2089 : : double mem_wanted;
2090 : : double dpartitions;
2091 : : int npartitions;
2092 : : int partition_bits;
2093 : :
2094 : : /*
2095 : : * Avoid creating so many partitions that the memory requirements of the
2096 : : * open partition files are greater than 1/4 of hash_mem.
2097 : : */
2099 jdavis@postgresql.or 2098 : 7521 : partition_limit =
1605 tgl@sss.pgh.pa.us 2099 : 7521 : (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
2100 : : HASHAGG_WRITE_BUFFER_SIZE;
2101 : :
2099 jdavis@postgresql.or 2102 : 7521 : mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
2103 : :
2104 : : /* make enough partitions so that each one is likely to fit in memory */
1605 tgl@sss.pgh.pa.us 2105 : 7521 : dpartitions = 1 + (mem_wanted / hash_mem_limit);
2106 : :
2107 [ + + ]: 7521 : if (dpartitions > partition_limit)
2108 : 7488 : dpartitions = partition_limit;
2109 : :
2110 [ + - ]: 7521 : if (dpartitions < HASHAGG_MIN_PARTITIONS)
2111 : 7521 : dpartitions = HASHAGG_MIN_PARTITIONS;
2112 [ - + ]: 7521 : if (dpartitions > HASHAGG_MAX_PARTITIONS)
1605 tgl@sss.pgh.pa.us 2113 :UBC 0 : dpartitions = HASHAGG_MAX_PARTITIONS;
2114 : :
2115 : : /* HASHAGG_MAX_PARTITIONS limit makes this safe */
1605 tgl@sss.pgh.pa.us 2116 :CBC 7521 : npartitions = (int) dpartitions;
2117 : :
2118 : : /* ceil(log2(npartitions)) */
97 michael@paquier.xyz 2119 :GNC 7521 : partition_bits = pg_ceil_log2_32(npartitions);
2120 : :
2121 : : /* make sure that we don't exhaust the hash bits */
2099 jdavis@postgresql.or 2122 [ - + ]:CBC 7521 : if (partition_bits + used_bits >= 32)
2099 jdavis@postgresql.or 2123 :UBC 0 : partition_bits = 32 - used_bits;
2124 : :
2099 jdavis@postgresql.or 2125 [ + + ]:CBC 7521 : if (log2_npartitions != NULL)
2126 : 6306 : *log2_npartitions = partition_bits;
2127 : :
2128 : : /* number of partitions will be a power of two */
1605 tgl@sss.pgh.pa.us 2129 : 7521 : npartitions = 1 << partition_bits;
2130 : :
2099 jdavis@postgresql.or 2131 : 7521 : return npartitions;
2132 : : }
2133 : :
2134 : : /*
2135 : : * Initialize a freshly-created TupleHashEntry.
2136 : : */
2137 : : static void
1969 2138 : 259643 : initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable,
2139 : : TupleHashEntry entry)
2140 : : {
2141 : : AggStatePerGroup pergroup;
2142 : : int transno;
2143 : :
2144 : 259643 : aggstate->hash_ngroups_current++;
2145 : 259643 : hash_agg_check_limits(aggstate);
2146 : :
2147 : : /* no need to allocate or initialize per-group state */
2148 [ + + ]: 259643 : if (aggstate->numtrans == 0)
2149 : 101738 : return;
2150 : :
267 2151 : 157905 : pergroup = (AggStatePerGroup) TupleHashEntryGetAdditional(hashtable, entry);
2152 : :
2153 : : /*
2154 : : * Initialize aggregates for new tuple group, lookup_hash_entries()
2155 : : * already has selected the relevant grouping set.
2156 : : */
1969 2157 [ + + ]: 390811 : for (transno = 0; transno < aggstate->numtrans; transno++)
2158 : : {
2159 : 232906 : AggStatePerTrans pertrans = &aggstate->pertrans[transno];
2160 : 232906 : AggStatePerGroup pergroupstate = &pergroup[transno];
2161 : :
2162 : 232906 : initialize_aggregate(aggstate, pertrans, pergroupstate);
2163 : : }
2164 : : }
2165 : :
2166 : : /*
2167 : : * Look up hash entries for the current tuple in all hashed grouping sets.
2168 : : *
2169 : : * Some entries may be left NULL if we are in "spill mode". The same tuple
2170 : : * will belong to different groups for each grouping set, so may match a group
2171 : : * already in memory for one set and match a group not in memory for another
2172 : : * set. When in "spill mode", the tuple will be spilled for each grouping set
2173 : : * where it doesn't match a group in memory.
2174 : : *
2175 : : * NB: It's possible to spill the same tuple for several different grouping
2176 : : * sets. This may seem wasteful, but it's actually a trade-off: if we spill
2177 : : * the tuple multiple times for multiple grouping sets, it can be partitioned
2178 : : * for each grouping set, making the refilling of the hash table very
2179 : : * efficient.
2180 : : */
2181 : : static void
3186 rhodiumtoad@postgres 2182 : 3453699 : lookup_hash_entries(AggState *aggstate)
2183 : : {
2184 : 3453699 : AggStatePerGroup *pergroup = aggstate->hash_pergroup;
1969 jdavis@postgresql.or 2185 : 3453699 : TupleTableSlot *outerslot = aggstate->tmpcontext->ecxt_outertuple;
2186 : : int setno;
2187 : :
2099 2188 [ + + ]: 6974624 : for (setno = 0; setno < aggstate->num_hashes; setno++)
2189 : : {
2042 tgl@sss.pgh.pa.us 2190 : 3520925 : AggStatePerHash perhash = &aggstate->perhash[setno];
1969 jdavis@postgresql.or 2191 : 3520925 : TupleHashTable hashtable = perhash->hashtable;
2192 : 3520925 : TupleTableSlot *hashslot = perhash->hashslot;
2193 : : TupleHashEntry entry;
2194 : : uint32 hash;
2195 : 3520925 : bool isnew = false;
2196 : : bool *p_isnew;
2197 : :
2198 : : /* if hash table already spilled, don't create new entries */
2199 [ + + ]: 3520925 : p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
2200 : :
3186 rhodiumtoad@postgres 2201 : 3520925 : select_current_set(aggstate, setno, true);
1969 jdavis@postgresql.or 2202 : 3520925 : prepare_hash_slot(perhash,
2203 : : outerslot,
2204 : : hashslot);
2205 : :
2206 : 3520925 : entry = LookupTupleHashEntry(hashtable, hashslot,
2207 : : p_isnew, &hash);
2208 : :
2209 [ + + ]: 3520925 : if (entry != NULL)
2210 : : {
2211 [ + + ]: 3140459 : if (isnew)
2212 : 185441 : initialize_hash_entry(aggstate, hashtable, entry);
267 2213 : 3140459 : pergroup[setno] = TupleHashEntryGetAdditional(hashtable, entry);
2214 : : }
2215 : : else
2216 : : {
2042 tgl@sss.pgh.pa.us 2217 : 380466 : HashAggSpill *spill = &aggstate->hash_spills[setno];
2218 : 380466 : TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
2219 : :
2099 jdavis@postgresql.or 2220 [ - + ]: 380466 : if (spill->partitions == NULL)
1520 heikki.linnakangas@i 2221 :UBC 0 : hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
2099 jdavis@postgresql.or 2222 : 0 : perhash->aggnode->numGroups,
2223 : : aggstate->hashentrysize);
2224 : :
1983 jdavis@postgresql.or 2225 :CBC 380466 : hashagg_spill_tuple(aggstate, spill, slot, hash);
1969 2226 : 380466 : pergroup[setno] = NULL;
2227 : : }
2228 : : }
3186 rhodiumtoad@postgres 2229 : 3453699 : }
2230 : :
2231 : : /*
2232 : : * ExecAgg -
2233 : : *
2234 : : * ExecAgg receives tuples from its outer subplan and aggregates over
2235 : : * the appropriate attribute for each aggregate function use (Aggref
2236 : : * node) appearing in the targetlist or qual of the node. The number
2237 : : * of tuples to aggregate over depends on whether grouped or plain
2238 : : * aggregation is selected. In grouped aggregation, we produce a result
2239 : : * row for each group; in plain aggregation there's a single result row
2240 : : * for the whole query. In either case, the value of each aggregate is
2241 : : * stored in the expression context to be used when ExecProject evaluates
2242 : : * the result tuple.
2243 : : */
2244 : : static TupleTableSlot *
3074 andres@anarazel.de 2245 : 411723 : ExecAgg(PlanState *pstate)
2246 : : {
2247 : 411723 : AggState *node = castNode(AggState, pstate);
3186 rhodiumtoad@postgres 2248 : 411723 : TupleTableSlot *result = NULL;
2249 : :
3066 andres@anarazel.de 2250 [ + + ]: 411723 : CHECK_FOR_INTERRUPTS();
2251 : :
3867 2252 [ + + ]: 411723 : if (!node->agg_done)
2253 : : {
2254 : : /* Dispatch based on strategy */
3186 rhodiumtoad@postgres 2255 [ + + + - ]: 377070 : switch (node->phase->aggstrategy)
2256 : : {
3867 andres@anarazel.de 2257 : 238171 : case AGG_HASHED:
2258 [ + + ]: 238171 : if (!node->table_filled)
2259 : 8722 : agg_fill_hash_table(node);
2260 : : /* FALLTHROUGH */
2261 : : case AGG_MIXED:
2262 : 251852 : result = agg_retrieve_hash_table(node);
2263 : 251852 : break;
3186 rhodiumtoad@postgres 2264 : 125218 : case AGG_PLAIN:
2265 : : case AGG_SORTED:
3867 andres@anarazel.de 2266 : 125218 : result = agg_retrieve_direct(node);
2267 : 125127 : break;
2268 : : }
2269 : :
2270 [ + + + - ]: 376979 : if (!TupIsNull(result))
2271 : 367434 : return result;
2272 : : }
2273 : :
2274 : 44198 : return NULL;
2275 : : }
2276 : :
2277 : : /*
2278 : : * ExecAgg for non-hashed case
2279 : : */
2280 : : static TupleTableSlot *
8412 tgl@sss.pgh.pa.us 2281 : 125218 : agg_retrieve_direct(AggState *aggstate)
2282 : : {
3867 andres@anarazel.de 2283 : 125218 : Agg *node = aggstate->phase->aggnode;
2284 : : ExprContext *econtext;
2285 : : ExprContext *tmpcontext;
2286 : : AggStatePerAgg peragg;
2287 : : AggStatePerGroup *pergroups;
2288 : : TupleTableSlot *outerslot;
2289 : : TupleTableSlot *firstSlot;
2290 : : TupleTableSlot *result;
2291 : 125218 : bool hasGroupingSets = aggstate->phase->numsets > 0;
2292 : 125218 : int numGroupingSets = Max(aggstate->phase->numsets, 1);
2293 : : int currentSet;
2294 : : int nextSetSize;
2295 : : int numReset;
2296 : : int i;
2297 : :
2298 : : /*
2299 : : * get state info from node
2300 : : *
2301 : : * econtext is the per-output-tuple expression context
2302 : : *
2303 : : * tmpcontext is the per-input-tuple expression context
2304 : : */
8412 tgl@sss.pgh.pa.us 2305 : 125218 : econtext = aggstate->ss.ps.ps_ExprContext;
8441 2306 : 125218 : tmpcontext = aggstate->tmpcontext;
2307 : :
9578 2308 : 125218 : peragg = aggstate->peragg;
2905 andres@anarazel.de 2309 : 125218 : pergroups = aggstate->pergroups;
8412 tgl@sss.pgh.pa.us 2310 : 125218 : firstSlot = aggstate->ss.ss_ScanTupleSlot;
2311 : :
2312 : : /*
2313 : : * We loop retrieving groups until we find one matching
2314 : : * aggstate->ss.ps.qual
2315 : : *
2316 : : * For grouping sets, we have the invariant that aggstate->projected_set
2317 : : * is either -1 (initial call) or the index (starting from 0) in
2318 : : * gset_lengths for the group we just completed (either by projecting a
2319 : : * row or by discarding it in the qual).
2320 : : */
7829 2321 [ + + ]: 160634 : while (!aggstate->agg_done)
2322 : : {
2323 : : /*
2324 : : * Clear the per-output-tuple context for each group, as well as
2325 : : * aggcontext (which contains any pass-by-ref transvalues of the old
2326 : : * group). Some aggregate functions store working state in child
2327 : : * contexts; those now get reset automatically without us needing to
2328 : : * do anything special.
2329 : : *
2330 : : * We use ReScanExprContext not just ResetExprContext because we want
2331 : : * any registered shutdown callbacks to be called. That allows
2332 : : * aggregate functions to ensure they've cleaned up any non-memory
2333 : : * resources.
2334 : : */
3867 andres@anarazel.de 2335 : 160529 : ReScanExprContext(econtext);
2336 : :
2337 : : /*
2338 : : * Determine how many grouping sets need to be reset at this boundary.
2339 : : */
2340 [ + + ]: 160529 : if (aggstate->projected_set >= 0 &&
2341 [ + + ]: 123915 : aggstate->projected_set < numGroupingSets)
2342 : 123906 : numReset = aggstate->projected_set + 1;
2343 : : else
2344 : 36623 : numReset = numGroupingSets;
2345 : :
2346 : : /*
2347 : : * numReset can change on a phase boundary, but that's OK; we want to
2348 : : * reset the contexts used in _this_ phase, and later, after possibly
2349 : : * changing phase, initialize the right number of aggregates for the
2350 : : * _new_ phase.
2351 : : */
2352 : :
2353 [ + + ]: 332197 : for (i = 0; i < numReset; i++)
2354 : : {
2355 : 171668 : ReScanExprContext(aggstate->aggcontexts[i]);
2356 : : }
2357 : :
2358 : : /*
2359 : : * Check if input is complete and there are no more groups to project
2360 : : * in this phase; move to next phase or mark as done.
2361 : : */
2362 [ + + ]: 160529 : if (aggstate->input_done == true &&
2363 [ + + ]: 807 : aggstate->projected_set >= (numGroupingSets - 1))
2364 : : {
2365 [ + + ]: 399 : if (aggstate->current_phase < aggstate->numphases - 1)
2366 : : {
2367 : 102 : initialize_phase(aggstate, aggstate->current_phase + 1);
2368 : 102 : aggstate->input_done = false;
2369 : 102 : aggstate->projected_set = -1;
2370 : 102 : numGroupingSets = Max(aggstate->phase->numsets, 1);
2371 : 102 : node = aggstate->phase->aggnode;
2372 : 102 : numReset = numGroupingSets;
2373 : : }
3186 rhodiumtoad@postgres 2374 [ + + ]: 297 : else if (aggstate->aggstrategy == AGG_MIXED)
2375 : : {
2376 : : /*
2377 : : * Mixed mode; we've output all the grouped stuff and have
2378 : : * full hashtables, so switch to outputting those.
2379 : : */
2380 : 78 : initialize_phase(aggstate, 0);
2381 : 78 : aggstate->table_filled = true;
2382 : 78 : ResetTupleHashIterator(aggstate->perhash[0].hashtable,
2383 : : &aggstate->perhash[0].hashiter);
2384 : 78 : select_current_set(aggstate, 0, true);
2385 : 78 : return agg_retrieve_hash_table(aggstate);
2386 : : }
2387 : : else
2388 : : {
8441 tgl@sss.pgh.pa.us 2389 : 219 : aggstate->agg_done = true;
3867 andres@anarazel.de 2390 : 219 : break;
2391 : : }
2392 : : }
2393 : :
2394 : : /*
2395 : : * Get the number of columns in the next grouping set after the last
2396 : : * projected one (if any). This is the number of columns to compare to
2397 : : * see if we reached the boundary of that set too.
2398 : : */
2399 [ + + ]: 160232 : if (aggstate->projected_set >= 0 &&
2400 [ + + ]: 123516 : aggstate->projected_set < (numGroupingSets - 1))
2401 : 13647 : nextSetSize = aggstate->phase->gset_lengths[aggstate->projected_set + 1];
2402 : : else
2403 : 146585 : nextSetSize = 0;
2404 : :
2405 : : /*----------
2406 : : * If a subgroup for the current grouping set is present, project it.
2407 : : *
2408 : : * We have a new group if:
2409 : : * - we're out of input but haven't projected all grouping sets
2410 : : * (checked above)
2411 : : * OR
2412 : : * - we already projected a row that wasn't from the last grouping
2413 : : * set
2414 : : * AND
2415 : : * - the next grouping set has at least one grouping column (since
2416 : : * empty grouping sets project only once input is exhausted)
2417 : : * AND
2418 : : * - the previous and pending rows differ on the grouping columns
2419 : : * of the next grouping set
2420 : : *----------
2421 : : */
2861 2422 : 160232 : tmpcontext->ecxt_innertuple = econtext->ecxt_outertuple;
3867 2423 [ + + ]: 160232 : if (aggstate->input_done ||
3186 rhodiumtoad@postgres 2424 [ + + ]: 159824 : (node->aggstrategy != AGG_PLAIN &&
3867 andres@anarazel.de 2425 [ + + ]: 124089 : aggstate->projected_set != -1 &&
2426 [ + + + + ]: 123108 : aggstate->projected_set < (numGroupingSets - 1) &&
2427 : 9973 : nextSetSize > 0 &&
2861 2428 [ + + ]: 9973 : !ExecQualAndReset(aggstate->phase->eqfunctions[nextSetSize - 1],
2429 : : tmpcontext)))
2430 : : {
3867 2431 : 7075 : aggstate->projected_set += 1;
2432 : :
2433 [ - + ]: 7075 : Assert(aggstate->projected_set < numGroupingSets);
2434 [ + + - + ]: 7075 : Assert(nextSetSize > 0 || aggstate->input_done);
2435 : : }
2436 : : else
2437 : : {
2438 : : /*
2439 : : * We no longer care what group we just projected, the next
2440 : : * projection will always be the first (or only) grouping set
2441 : : * (unless the input proves to be empty).
2442 : : */
2443 : 153157 : aggstate->projected_set = 0;
2444 : :
2445 : : /*
2446 : : * If we don't already have the first tuple of the new group,
2447 : : * fetch it from the outer plan.
2448 : : */
2449 [ + + ]: 153157 : if (aggstate->grp_firstTuple == NULL)
2450 : : {
2451 : 36716 : outerslot = fetch_input_tuple(aggstate);
2452 [ + + + + ]: 36686 : if (!TupIsNull(outerslot))
2453 : : {
2454 : : /*
2455 : : * Make a copy of the first input tuple; we will use this
2456 : : * for comparisons (in group mode) and for projection.
2457 : : */
2587 2458 : 30142 : aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
2459 : : }
2460 : : else
2461 : : {
2462 : : /* outer plan produced no tuples at all */
3867 2463 [ + + ]: 6544 : if (hasGroupingSets)
2464 : : {
2465 : : /*
2466 : : * If there was no input at all, we need to project
2467 : : * rows only if there are grouping sets of size 0.
2468 : : * Note that this implies that there can't be any
2469 : : * references to ungrouped Vars, which would otherwise
2470 : : * cause issues with the empty output slot.
2471 : : *
2472 : : * XXX: This is no longer true, we currently deal with
2473 : : * this in finalize_aggregates().
2474 : : */
2475 : 39 : aggstate->input_done = true;
2476 : :
2477 [ + + ]: 54 : while (aggstate->phase->gset_lengths[aggstate->projected_set] > 0)
2478 : : {
2479 : 24 : aggstate->projected_set += 1;
2480 [ + + ]: 24 : if (aggstate->projected_set >= numGroupingSets)
2481 : : {
2482 : : /*
2483 : : * We can't set agg_done here because we might
2484 : : * have more phases to do, even though the
2485 : : * input is empty. So we need to restart the
2486 : : * whole outer loop.
2487 : : */
2488 : 9 : break;
2489 : : }
2490 : : }
2491 : :
2492 [ + + ]: 39 : if (aggstate->projected_set >= numGroupingSets)
2493 : 9 : continue;
2494 : : }
2495 : : else
2496 : : {
2497 : 6505 : aggstate->agg_done = true;
2498 : : /* If we are grouping, we should produce no tuples too */
2499 [ + + ]: 6505 : if (node->aggstrategy != AGG_PLAIN)
2500 : 30 : return NULL;
2501 : : }
2502 : : }
2503 : : }
2504 : :
2505 : : /*
2506 : : * Initialize working state for a new input tuple group.
2507 : : */
2905 2508 : 153088 : initialize_aggregates(aggstate, pergroups, numReset);
2509 : :
3867 2510 [ + + ]: 153088 : if (aggstate->grp_firstTuple != NULL)
2511 : : {
2512 : : /*
2513 : : * Store the copied first input tuple in the tuple table slot
2514 : : * reserved for it. The tuple will be deleted when it is
2515 : : * cleared from the slot.
2516 : : */
2587 2517 : 146583 : ExecForceStoreHeapTuple(aggstate->grp_firstTuple,
2518 : : firstSlot, true);
3100 tgl@sss.pgh.pa.us 2519 : 146583 : aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
2520 : :
2521 : : /* set up for first advance_aggregates call */
3867 andres@anarazel.de 2522 : 146583 : tmpcontext->ecxt_outertuple = firstSlot;
2523 : :
2524 : : /*
2525 : : * Process each outer-plan tuple, and then fetch the next one,
2526 : : * until we exhaust the outer plan or cross a group boundary.
2527 : : */
2528 : : for (;;)
2529 : : {
2530 : : /*
2531 : : * During phase 1 only of a mixed agg, we need to update
2532 : : * hashtables as well in advance_aggregates.
2533 : : */
3186 rhodiumtoad@postgres 2534 [ + + ]: 11104857 : if (aggstate->aggstrategy == AGG_MIXED &&
2535 [ + - ]: 19031 : aggstate->current_phase == 1)
2536 : : {
2898 andres@anarazel.de 2537 : 19031 : lookup_hash_entries(aggstate);
2538 : : }
2539 : :
2540 : : /* Advance the aggregates (or combine functions) */
2541 : 11104857 : advance_aggregates(aggstate);
2542 : :
2543 : : /* Reset per-input-tuple context after each tuple */
3867 2544 : 11104818 : ResetExprContext(tmpcontext);
2545 : :
2546 : 11104818 : outerslot = fetch_input_tuple(aggstate);
2547 [ + + + + ]: 11104808 : if (TupIsNull(outerslot))
2548 : : {
2549 : : /* no more outer-plan tuples available */
2550 : :
2551 : : /* if we built hash tables, finalize any spills */
2099 jdavis@postgresql.or 2552 [ + + ]: 30081 : if (aggstate->aggstrategy == AGG_MIXED &&
2553 [ + - ]: 72 : aggstate->current_phase == 1)
2554 : 72 : hashagg_finish_initial_spills(aggstate);
2555 : :
3867 andres@anarazel.de 2556 [ + + ]: 30081 : if (hasGroupingSets)
2557 : : {
2558 : 360 : aggstate->input_done = true;
2559 : 360 : break;
2560 : : }
2561 : : else
2562 : : {
2563 : 29721 : aggstate->agg_done = true;
2564 : 29721 : break;
2565 : : }
2566 : : }
2567 : : /* set up for next advance_aggregates call */
2568 : 11074727 : tmpcontext->ecxt_outertuple = outerslot;
2569 : :
2570 : : /*
2571 : : * If we are grouping, check whether we've crossed a group
2572 : : * boundary.
2573 : : */
1063 tgl@sss.pgh.pa.us 2574 [ + + + + ]: 11074727 : if (node->aggstrategy != AGG_PLAIN && node->numCols > 0)
2575 : : {
2861 andres@anarazel.de 2576 : 1236654 : tmpcontext->ecxt_innertuple = firstSlot;
2577 [ + + ]: 1236654 : if (!ExecQual(aggstate->phase->eqfunctions[node->numCols - 1],
2578 : : tmpcontext))
2579 : : {
2587 2580 : 116453 : aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
3867 2581 : 116453 : break;
2582 : : }
2583 : : }
2584 : : }
2585 : : }
2586 : :
2587 : : /*
2588 : : * Use the representative input tuple for any references to
2589 : : * non-aggregated input columns in aggregate direct args, the node
2590 : : * qual, and the tlist. (If we are not grouping, and there are no
2591 : : * input rows at all, we will come here with an empty firstSlot
2592 : : * ... but if not grouping, there can't be any references to
2593 : : * non-aggregated input columns, so no problem.)
2594 : : */
2595 : 153039 : econtext->ecxt_outertuple = firstSlot;
2596 : : }
2597 : :
2598 [ - + ]: 160114 : Assert(aggstate->projected_set >= 0);
2599 : :
2600 : 160114 : currentSet = aggstate->projected_set;
2601 : :
2602 : 160114 : prepare_projection_slot(aggstate, econtext->ecxt_outertuple, currentSet);
2603 : :
3186 rhodiumtoad@postgres 2604 : 160114 : select_current_set(aggstate, currentSet, false);
2605 : :
2606 : 160114 : finalize_aggregates(aggstate,
2607 : : peragg,
2905 andres@anarazel.de 2608 : 160114 : pergroups[currentSet]);
2609 : :
2610 : : /*
2611 : : * If there's no row to project right now, we must continue rather
2612 : : * than returning a null since there might be more groups.
2613 : : */
3867 2614 : 160108 : result = project_aggregates(aggstate);
2615 [ + + ]: 160102 : if (result)
2616 : 124695 : return result;
2617 : : }
2618 : :
2619 : : /* No more groups */
7829 tgl@sss.pgh.pa.us 2620 : 324 : return NULL;
2621 : : }
2622 : :
2623 : : /*
2624 : : * ExecAgg for hashed case: read input and build hash table
2625 : : */
2626 : : static void
8412 2627 : 8722 : agg_fill_hash_table(AggState *aggstate)
2628 : : {
2629 : : TupleTableSlot *outerslot;
3186 rhodiumtoad@postgres 2630 : 8722 : ExprContext *tmpcontext = aggstate->tmpcontext;
2631 : :
2632 : : /*
2633 : : * Process each outer-plan tuple, and then fetch the next one, until we
2634 : : * exhaust the outer plan.
2635 : : */
2636 : : for (;;)
2637 : : {
3867 andres@anarazel.de 2638 : 3443390 : outerslot = fetch_input_tuple(aggstate);
8441 tgl@sss.pgh.pa.us 2639 [ + + + + ]: 3443390 : if (TupIsNull(outerslot))
2640 : : break;
2641 : :
2642 : : /* set up for lookup_hash_entries and advance_aggregates */
6872 2643 : 3434668 : tmpcontext->ecxt_outertuple = outerslot;
2644 : :
2645 : : /* Find or build hashtable entries */
2898 andres@anarazel.de 2646 : 3434668 : lookup_hash_entries(aggstate);
2647 : :
2648 : : /* Advance the aggregates (or combine functions) */
2649 : 3434668 : advance_aggregates(aggstate);
2650 : :
2651 : : /*
2652 : : * Reset per-input-tuple context after each tuple, but note that the
2653 : : * hash lookups do this too
2654 : : */
3186 rhodiumtoad@postgres 2655 : 3434668 : ResetExprContext(aggstate->tmpcontext);
2656 : : }
2657 : :
2658 : : /* finalize spills, if any */
2099 jdavis@postgresql.or 2659 : 8722 : hashagg_finish_initial_spills(aggstate);
2660 : :
8441 tgl@sss.pgh.pa.us 2661 : 8722 : aggstate->table_filled = true;
2662 : : /* Initialize to walk the first hash table */
3186 rhodiumtoad@postgres 2663 : 8722 : select_current_set(aggstate, 0, true);
2664 : 8722 : ResetTupleHashIterator(aggstate->perhash[0].hashtable,
2665 : : &aggstate->perhash[0].hashiter);
8441 tgl@sss.pgh.pa.us 2666 : 8722 : }
2667 : :
2668 : : /*
2669 : : * If any data was spilled during hash aggregation, reset the hash table and
2670 : : * reprocess one batch of spilled data. After reprocessing a batch, the hash
2671 : : * table will again contain data, ready to be consumed by
2672 : : * agg_retrieve_hash_table_in_memory().
2673 : : *
2674 : : * Should only be called after all in memory hash table entries have been
2675 : : * finalized and emitted.
2676 : : *
2677 : : * Return false when input is exhausted and there's no more work to be done;
2678 : : * otherwise return true.
2679 : : */
2680 : : static bool
2099 jdavis@postgresql.or 2681 : 22658 : agg_refill_hash_table(AggState *aggstate)
2682 : : {
2683 : : HashAggBatch *batch;
2684 : : AggStatePerHash perhash;
2685 : : HashAggSpill spill;
1520 heikki.linnakangas@i 2686 : 22658 : LogicalTapeSet *tapeset = aggstate->hash_tapeset;
2042 tgl@sss.pgh.pa.us 2687 : 22658 : bool spill_initialized = false;
2688 : :
2099 jdavis@postgresql.or 2689 [ + + ]: 22658 : if (aggstate->hash_batches == NIL)
2690 : 9191 : return false;
2691 : :
2692 : : /* hash_batches is a stack, with the top item at the end of the list */
1506 tgl@sss.pgh.pa.us 2693 : 13467 : batch = llast(aggstate->hash_batches);
2694 : 13467 : aggstate->hash_batches = list_delete_last(aggstate->hash_batches);
2695 : :
1967 jdavis@postgresql.or 2696 : 13467 : hash_agg_set_limits(aggstate->hashentrysize, batch->input_card,
2697 : : batch->used_bits, &aggstate->hash_mem_limit,
2698 : : &aggstate->hash_ngroups_limit, NULL);
2699 : :
2700 : : /*
2701 : : * Each batch only processes one grouping set; set the rest to NULL so
2702 : : * that advance_aggregates() knows to ignore them. We don't touch
2703 : : * pergroups for sorted grouping sets here, because they will be needed if
2704 : : * we rescan later. The expressions for sorted grouping sets will not be
2705 : : * evaluated after we recompile anyway.
2706 : : */
1816 2707 [ + - + - : 103704 : MemSet(aggstate->hash_pergroup, 0,
+ - + - +
+ ]
2708 : : sizeof(AggStatePerGroup) * aggstate->num_hashes);
2709 : :
2710 : : /* free memory and reset hash tables */
2099 2711 : 13467 : ReScanExprContext(aggstate->hashcontext);
2712 [ + + ]: 103704 : for (int setno = 0; setno < aggstate->num_hashes; setno++)
2713 : 90237 : ResetTupleHashTable(aggstate->perhash[setno].hashtable);
2714 : :
2715 : 13467 : aggstate->hash_ngroups_current = 0;
2716 : :
2717 : : /*
2718 : : * In AGG_MIXED mode, hash aggregation happens in phase 1 and the output
2719 : : * happens in phase 0. So, we switch to phase 1 when processing a batch,
2720 : : * and back to phase 0 after the batch is done.
2721 : : */
2722 [ - + ]: 13467 : Assert(aggstate->current_phase == 0);
2723 [ + + ]: 13467 : if (aggstate->phase->aggstrategy == AGG_MIXED)
2724 : : {
2725 : 13131 : aggstate->current_phase = 1;
2726 : 13131 : aggstate->phase = &aggstate->phases[aggstate->current_phase];
2727 : : }
2728 : :
2729 : 13467 : select_current_set(aggstate, batch->setno, true);
2730 : :
1969 2731 : 13467 : perhash = &aggstate->perhash[aggstate->current_set];
2732 : :
2733 : : /*
2734 : : * Spilled tuples are always read back as MinimalTuples, which may be
2735 : : * different from the outer plan, so recompile the aggregate expressions.
2736 : : *
2737 : : * We still need the NULL check, because we are only processing one
2738 : : * grouping set at a time and the rest will be NULL.
2739 : : */
2099 2740 : 13467 : hashagg_recompile_expressions(aggstate, true, true);
2741 : :
2742 : : INJECTION_POINT("hash-aggregate-process-batch", NULL);
2743 : : for (;;)
2042 tgl@sss.pgh.pa.us 2744 : 592812 : {
1969 jdavis@postgresql.or 2745 : 606279 : TupleTableSlot *spillslot = aggstate->hash_spill_rslot;
2746 : 606279 : TupleTableSlot *hashslot = perhash->hashslot;
267 2747 : 606279 : TupleHashTable hashtable = perhash->hashtable;
2748 : : TupleHashEntry entry;
2749 : : MinimalTuple tuple;
2750 : : uint32 hash;
1969 2751 : 606279 : bool isnew = false;
2752 [ + + ]: 606279 : bool *p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
2753 : :
2099 2754 [ - + ]: 606279 : CHECK_FOR_INTERRUPTS();
2755 : :
2756 : 606279 : tuple = hashagg_batch_read(batch, &hash);
2757 [ + + ]: 606279 : if (tuple == NULL)
2758 : 13467 : break;
2759 : :
1969 2760 : 592812 : ExecStoreMinimalTuple(tuple, spillslot, true);
2761 : 592812 : aggstate->tmpcontext->ecxt_outertuple = spillslot;
2762 : :
2763 : 592812 : prepare_hash_slot(perhash,
2764 : 592812 : aggstate->tmpcontext->ecxt_outertuple,
2765 : : hashslot);
267 2766 : 592812 : entry = LookupTupleHashEntryHash(hashtable, hashslot,
2767 : : p_isnew, hash);
2768 : :
1969 2769 [ + + ]: 592812 : if (entry != NULL)
2770 : : {
2771 [ + + ]: 380466 : if (isnew)
267 2772 : 74202 : initialize_hash_entry(aggstate, hashtable, entry);
2773 : 380466 : aggstate->hash_pergroup[batch->setno] = TupleHashEntryGetAdditional(hashtable, entry);
2099 2774 : 380466 : advance_aggregates(aggstate);
2775 : : }
2776 : : else
2777 : : {
2778 [ + + ]: 212346 : if (!spill_initialized)
2779 : : {
2780 : : /*
2781 : : * Avoid initializing the spill until we actually need it so
2782 : : * that we don't assign tapes that will never be used.
2783 : : */
2784 : 6246 : spill_initialized = true;
1520 heikki.linnakangas@i 2785 : 6246 : hashagg_spill_init(&spill, tapeset, batch->used_bits,
2786 : : batch->input_card, aggstate->hashentrysize);
2787 : : }
2788 : : /* no memory for a new group, spill */
1969 jdavis@postgresql.or 2789 : 212346 : hashagg_spill_tuple(aggstate, &spill, spillslot, hash);
2790 : :
2791 : 212346 : aggstate->hash_pergroup[batch->setno] = NULL;
2792 : : }
2793 : :
2794 : : /*
2795 : : * Reset per-input-tuple context after each tuple, but note that the
2796 : : * hash lookups do this too
2797 : : */
2099 2798 : 592812 : ResetExprContext(aggstate->tmpcontext);
2799 : : }
2800 : :
1520 heikki.linnakangas@i 2801 : 13467 : LogicalTapeClose(batch->input_tape);
2802 : :
2803 : : /* change back to phase 0 */
2099 jdavis@postgresql.or 2804 : 13467 : aggstate->current_phase = 0;
2805 : 13467 : aggstate->phase = &aggstate->phases[aggstate->current_phase];
2806 : :
2807 [ + + ]: 13467 : if (spill_initialized)
2808 : : {
2809 : 6246 : hashagg_spill_finish(aggstate, &spill, batch->setno);
1918 2810 : 6246 : hash_agg_update_metrics(aggstate, true, spill.npartitions);
2811 : : }
2812 : : else
2099 2813 : 7221 : hash_agg_update_metrics(aggstate, true, 0);
2814 : :
2815 : 13467 : aggstate->hash_spill_mode = false;
2816 : :
2817 : : /* prepare to walk the first hash table */
2818 : 13467 : select_current_set(aggstate, batch->setno, true);
2819 : 13467 : ResetTupleHashIterator(aggstate->perhash[batch->setno].hashtable,
2820 : : &aggstate->perhash[batch->setno].hashiter);
2821 : :
2822 : 13467 : pfree(batch);
2823 : :
2824 : 13467 : return true;
2825 : : }
2826 : :
2827 : : /*
2828 : : * ExecAgg for hashed case: retrieving groups from hash table
2829 : : *
2830 : : * After exhausting in-memory tuples, also try refilling the hash table using
2831 : : * previously-spilled tuples. Only returns NULL after all in-memory and
2832 : : * spilled tuples are exhausted.
2833 : : */
2834 : : static TupleTableSlot *
8412 tgl@sss.pgh.pa.us 2835 : 251930 : agg_retrieve_hash_table(AggState *aggstate)
2836 : : {
2099 jdavis@postgresql.or 2837 : 251930 : TupleTableSlot *result = NULL;
2838 : :
2839 [ + + ]: 508136 : while (result == NULL)
2840 : : {
2841 : 265397 : result = agg_retrieve_hash_table_in_memory(aggstate);
2842 [ + + ]: 265397 : if (result == NULL)
2843 : : {
2844 [ + + ]: 22658 : if (!agg_refill_hash_table(aggstate))
2845 : : {
2846 : 9191 : aggstate->agg_done = true;
2847 : 9191 : break;
2848 : : }
2849 : : }
2850 : : }
2851 : :
2852 : 251930 : return result;
2853 : : }
2854 : :
2855 : : /*
2856 : : * Retrieve the groups from the in-memory hash tables without considering any
2857 : : * spilled tuples.
2858 : : */
2859 : : static TupleTableSlot *
2860 : 265397 : agg_retrieve_hash_table_in_memory(AggState *aggstate)
2861 : : {
2862 : : ExprContext *econtext;
2863 : : AggStatePerAgg peragg;
2864 : : AggStatePerGroup pergroup;
2865 : : TupleHashEntry entry;
2866 : : TupleTableSlot *firstSlot;
2867 : : TupleTableSlot *result;
2868 : : AggStatePerHash perhash;
2869 : :
2870 : : /*
2871 : : * get state info from node.
2872 : : *
2873 : : * econtext is the per-output-tuple expression context.
2874 : : */
8412 tgl@sss.pgh.pa.us 2875 : 265397 : econtext = aggstate->ss.ps.ps_ExprContext;
8441 2876 : 265397 : peragg = aggstate->peragg;
8412 2877 : 265397 : firstSlot = aggstate->ss.ss_ScanTupleSlot;
2878 : :
2879 : : /*
2880 : : * Note that perhash (and therefore anything accessed through it) can
2881 : : * change inside the loop, as we change between grouping sets.
2882 : : */
3186 rhodiumtoad@postgres 2883 : 265397 : perhash = &aggstate->perhash[aggstate->current_set];
2884 : :
2885 : : /*
2886 : : * We loop retrieving groups until we find one satisfying
2887 : : * aggstate->ss.ps.qual
2888 : : */
2889 : : for (;;)
8441 tgl@sss.pgh.pa.us 2890 : 67977 : {
3186 rhodiumtoad@postgres 2891 : 333374 : TupleTableSlot *hashslot = perhash->hashslot;
267 jdavis@postgresql.or 2892 : 333374 : TupleHashTable hashtable = perhash->hashtable;
2893 : : int i;
2894 : :
3066 andres@anarazel.de 2895 [ - + ]: 333374 : CHECK_FOR_INTERRUPTS();
2896 : :
2897 : : /*
2898 : : * Find the next entry in the hash table
2899 : : */
267 jdavis@postgresql.or 2900 : 333374 : entry = ScanTupleHashTable(hashtable, &perhash->hashiter);
8376 tgl@sss.pgh.pa.us 2901 [ + + ]: 333374 : if (entry == NULL)
2902 : : {
3186 rhodiumtoad@postgres 2903 : 72804 : int nextset = aggstate->current_set + 1;
2904 : :
2905 [ + + ]: 72804 : if (nextset < aggstate->num_hashes)
2906 : : {
2907 : : /*
2908 : : * Switch to next grouping set, reinitialize, and restart the
2909 : : * loop.
2910 : : */
2911 : 50146 : select_current_set(aggstate, nextset, true);
2912 : :
2913 : 50146 : perhash = &aggstate->perhash[aggstate->current_set];
2914 : :
59 drowley@postgresql.o 2915 : 50146 : ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
2916 : :
3186 rhodiumtoad@postgres 2917 : 50146 : continue;
2918 : : }
2919 : : else
2920 : : {
2921 : 22658 : return NULL;
2922 : : }
2923 : : }
2924 : :
2925 : : /*
2926 : : * Clear the per-output-tuple context for each group
2927 : : *
2928 : : * We intentionally don't use ReScanExprContext here; if any aggs have
2929 : : * registered shutdown callbacks, they mustn't be called yet, since we
2930 : : * might not be done with that agg.
2931 : : */
8441 tgl@sss.pgh.pa.us 2932 : 260570 : ResetExprContext(econtext);
2933 : :
2934 : : /*
2935 : : * Transform representative tuple back into one with the right
2936 : : * columns.
2937 : : */
267 jdavis@postgresql.or 2938 : 260570 : ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry), hashslot, false);
3303 andres@anarazel.de 2939 : 260570 : slot_getallattrs(hashslot);
2940 : :
2941 : 260570 : ExecClearTuple(firstSlot);
2942 : 260570 : memset(firstSlot->tts_isnull, true,
2943 : 260570 : firstSlot->tts_tupleDescriptor->natts * sizeof(bool));
2944 : :
3186 rhodiumtoad@postgres 2945 [ + + ]: 686950 : for (i = 0; i < perhash->numhashGrpCols; i++)
2946 : : {
2947 : 426380 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
2948 : :
3303 andres@anarazel.de 2949 : 426380 : firstSlot->tts_values[varNumber] = hashslot->tts_values[i];
2950 : 426380 : firstSlot->tts_isnull[varNumber] = hashslot->tts_isnull[i];
2951 : : }
2952 : 260570 : ExecStoreVirtualTuple(firstSlot);
2953 : :
267 jdavis@postgresql.or 2954 : 260570 : pergroup = (AggStatePerGroup) TupleHashEntryGetAdditional(hashtable, entry);
2955 : :
2956 : : /*
2957 : : * Use the representative input tuple for any references to
2958 : : * non-aggregated input columns in the qual and tlist.
2959 : : */
6872 tgl@sss.pgh.pa.us 2960 : 260570 : econtext->ecxt_outertuple = firstSlot;
2961 : :
3186 rhodiumtoad@postgres 2962 : 260570 : prepare_projection_slot(aggstate,
2963 : : econtext->ecxt_outertuple,
2964 : : aggstate->current_set);
2965 : :
2966 : 260570 : finalize_aggregates(aggstate, peragg, pergroup);
2967 : :
3867 andres@anarazel.de 2968 : 260570 : result = project_aggregates(aggstate);
2969 [ + + ]: 260570 : if (result)
2970 : 242739 : return result;
2971 : : }
2972 : :
2973 : : /* No more groups */
2974 : : return NULL;
2975 : : }
2976 : :
2977 : : /*
2978 : : * hashagg_spill_init
2979 : : *
2980 : : * Called after we determined that spilling is necessary. Chooses the number
2981 : : * of partitions to create, and initializes them.
2982 : : */
2983 : : static void
1520 heikki.linnakangas@i 2984 : 6306 : hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset, int used_bits,
2985 : : double input_groups, double hashentrysize)
2986 : : {
2987 : : int npartitions;
2988 : : int partition_bits;
2989 : :
2042 tgl@sss.pgh.pa.us 2990 : 6306 : npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
2991 : : used_bits, &partition_bits);
2992 : :
2993 : : #ifdef USE_INJECTION_POINTS
2994 : : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-single-partition"))
2995 : : {
2996 : : npartitions = 1;
2997 : : partition_bits = 0;
2998 : : INJECTION_POINT_CACHED("hash-aggregate-single-partition", NULL);
2999 : : }
3000 : : #endif
3001 : :
6 michael@paquier.xyz 3002 :GNC 6306 : spill->partitions = palloc0_array(LogicalTape *, npartitions);
3003 : 6306 : spill->ntuples = palloc0_array(int64, npartitions);
3004 : 6306 : spill->hll_card = palloc0_array(hyperLogLogState, npartitions);
3005 : :
1520 heikki.linnakangas@i 3006 [ + + ]:CBC 31530 : for (int i = 0; i < npartitions; i++)
3007 : 25224 : spill->partitions[i] = LogicalTapeCreate(tapeset);
3008 : :
2099 jdavis@postgresql.or 3009 : 6306 : spill->shift = 32 - used_bits - partition_bits;
308 3010 [ + - ]: 6306 : if (spill->shift < 32)
3011 : 6306 : spill->mask = (npartitions - 1) << spill->shift;
3012 : : else
308 jdavis@postgresql.or 3013 :UBC 0 : spill->mask = 0;
2099 jdavis@postgresql.or 3014 :CBC 6306 : spill->npartitions = npartitions;
3015 : :
1967 3016 [ + + ]: 31530 : for (int i = 0; i < npartitions; i++)
3017 : 25224 : initHyperLogLog(&spill->hll_card[i], HASHAGG_HLL_BIT_WIDTH);
2099 3018 : 6306 : }
3019 : :
3020 : : /*
3021 : : * hashagg_spill_tuple
3022 : : *
3023 : : * No room for new groups in the hash table. Save for later in the appropriate
3024 : : * partition.
3025 : : */
3026 : : static Size
1983 3027 : 592812 : hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
3028 : : TupleTableSlot *inputslot, uint32 hash)
3029 : : {
3030 : : TupleTableSlot *spillslot;
3031 : : int partition;
3032 : : MinimalTuple tuple;
3033 : : LogicalTape *tape;
2042 tgl@sss.pgh.pa.us 3034 : 592812 : int total_written = 0;
3035 : : bool shouldFree;
3036 : :
2099 jdavis@postgresql.or 3037 [ - + ]: 592812 : Assert(spill->partitions != NULL);
3038 : :
3039 : : /* spill only attributes that we actually need */
1983 3040 [ + + ]: 592812 : if (!aggstate->all_cols_needed)
3041 : : {
3042 : 1938 : spillslot = aggstate->hash_spill_wslot;
3043 : 1938 : slot_getsomeattrs(inputslot, aggstate->max_colno_needed);
3044 : 1938 : ExecClearTuple(spillslot);
3045 [ + + ]: 5814 : for (int i = 0; i < spillslot->tts_tupleDescriptor->natts; i++)
3046 : : {
3047 [ + + ]: 3876 : if (bms_is_member(i + 1, aggstate->colnos_needed))
3048 : : {
3049 : 1938 : spillslot->tts_values[i] = inputslot->tts_values[i];
3050 : 1938 : spillslot->tts_isnull[i] = inputslot->tts_isnull[i];
3051 : : }
3052 : : else
3053 : 1938 : spillslot->tts_isnull[i] = true;
3054 : : }
3055 : 1938 : ExecStoreVirtualTuple(spillslot);
3056 : : }
3057 : : else
3058 : 590874 : spillslot = inputslot;
3059 : :
3060 : 592812 : tuple = ExecFetchSlotMinimalTuple(spillslot, &shouldFree);
3061 : :
308 3062 [ + - ]: 592812 : if (spill->shift < 32)
3063 : 592812 : partition = (hash & spill->mask) >> spill->shift;
3064 : : else
308 jdavis@postgresql.or 3065 :UBC 0 : partition = 0;
3066 : :
2099 jdavis@postgresql.or 3067 :CBC 592812 : spill->ntuples[partition]++;
3068 : :
3069 : : /*
3070 : : * All hash values destined for a given partition have some bits in
3071 : : * common, which causes bad HLL cardinality estimates. Hash the hash to
3072 : : * get a more uniform distribution.
3073 : : */
1967 3074 : 592812 : addHyperLogLog(&spill->hll_card[partition], hash_bytes_uint32(hash));
3075 : :
1520 heikki.linnakangas@i 3076 : 592812 : tape = spill->partitions[partition];
3077 : :
1082 peter@eisentraut.org 3078 : 592812 : LogicalTapeWrite(tape, &hash, sizeof(uint32));
2099 jdavis@postgresql.or 3079 : 592812 : total_written += sizeof(uint32);
3080 : :
1082 peter@eisentraut.org 3081 : 592812 : LogicalTapeWrite(tape, tuple, tuple->t_len);
2099 jdavis@postgresql.or 3082 : 592812 : total_written += tuple->t_len;
3083 : :
3084 [ + + ]: 592812 : if (shouldFree)
3085 : 380466 : pfree(tuple);
3086 : :
3087 : 592812 : return total_written;
3088 : : }
3089 : :
3090 : : /*
3091 : : * hashagg_batch_new
3092 : : *
3093 : : * Construct a HashAggBatch item, which represents one iteration of HashAgg to
3094 : : * be done.
3095 : : */
3096 : : static HashAggBatch *
1520 heikki.linnakangas@i 3097 : 13467 : hashagg_batch_new(LogicalTape *input_tape, int setno,
3098 : : int64 input_tuples, double input_card, int used_bits)
3099 : : {
6 michael@paquier.xyz 3100 :GNC 13467 : HashAggBatch *batch = palloc0_object(HashAggBatch);
3101 : :
2099 jdavis@postgresql.or 3102 :CBC 13467 : batch->setno = setno;
3103 : 13467 : batch->used_bits = used_bits;
1520 heikki.linnakangas@i 3104 : 13467 : batch->input_tape = input_tape;
2099 jdavis@postgresql.or 3105 : 13467 : batch->input_tuples = input_tuples;
1967 3106 : 13467 : batch->input_card = input_card;
3107 : :
2099 3108 : 13467 : return batch;
3109 : : }
3110 : :
3111 : : /*
3112 : : * hashagg_batch_read
3113 : : * read the next tuple from a batch's tape. Return NULL if no more.
3114 : : */
3115 : : static MinimalTuple
3116 : 606279 : hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
3117 : : {
1520 heikki.linnakangas@i 3118 : 606279 : LogicalTape *tape = batch->input_tape;
3119 : : MinimalTuple tuple;
3120 : : uint32 t_len;
3121 : : size_t nread;
3122 : : uint32 hash;
3123 : :
3124 : 606279 : nread = LogicalTapeRead(tape, &hash, sizeof(uint32));
2099 jdavis@postgresql.or 3125 [ + + ]: 606279 : if (nread == 0)
3126 : 13467 : return NULL;
3127 [ - + ]: 592812 : if (nread != sizeof(uint32))
2099 jdavis@postgresql.or 3128 [ # # ]:UBC 0 : ereport(ERROR,
3129 : : (errcode_for_file_access(),
3130 : : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3131 : : tape, sizeof(uint32), nread)));
2099 jdavis@postgresql.or 3132 [ + - ]:CBC 592812 : if (hashp != NULL)
3133 : 592812 : *hashp = hash;
3134 : :
1520 heikki.linnakangas@i 3135 : 592812 : nread = LogicalTapeRead(tape, &t_len, sizeof(t_len));
2099 jdavis@postgresql.or 3136 [ - + ]: 592812 : if (nread != sizeof(uint32))
2099 jdavis@postgresql.or 3137 [ # # ]:UBC 0 : ereport(ERROR,
3138 : : (errcode_for_file_access(),
3139 : : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3140 : : tape, sizeof(uint32), nread)));
3141 : :
2099 jdavis@postgresql.or 3142 :CBC 592812 : tuple = (MinimalTuple) palloc(t_len);
3143 : 592812 : tuple->t_len = t_len;
3144 : :
1520 heikki.linnakangas@i 3145 : 592812 : nread = LogicalTapeRead(tape,
3146 : : (char *) tuple + sizeof(uint32),
3147 : : t_len - sizeof(uint32));
2099 jdavis@postgresql.or 3148 [ - + ]: 592812 : if (nread != t_len - sizeof(uint32))
2099 jdavis@postgresql.or 3149 [ # # ]:UBC 0 : ereport(ERROR,
3150 : : (errcode_for_file_access(),
3151 : : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3152 : : tape, t_len - sizeof(uint32), nread)));
3153 : :
2099 jdavis@postgresql.or 3154 :CBC 592812 : return tuple;
3155 : : }
3156 : :
3157 : : /*
3158 : : * hashagg_finish_initial_spills
3159 : : *
3160 : : * After a HashAggBatch has been processed, it may have spilled tuples to
3161 : : * disk. If so, turn the spilled partitions into new batches that must later
3162 : : * be executed.
3163 : : */
3164 : : static void
3165 : 8794 : hashagg_finish_initial_spills(AggState *aggstate)
3166 : : {
3167 : : int setno;
2042 tgl@sss.pgh.pa.us 3168 : 8794 : int total_npartitions = 0;
3169 : :
2099 jdavis@postgresql.or 3170 [ + + ]: 8794 : if (aggstate->hash_spills != NULL)
3171 : : {
3172 [ + + ]: 90 : for (setno = 0; setno < aggstate->num_hashes; setno++)
3173 : : {
3174 : 60 : HashAggSpill *spill = &aggstate->hash_spills[setno];
3175 : :
3176 : 60 : total_npartitions += spill->npartitions;
3177 : 60 : hashagg_spill_finish(aggstate, spill, setno);
3178 : : }
3179 : :
3180 : : /*
3181 : : * We're not processing tuples from outer plan any more; only
3182 : : * processing batches of spilled tuples. The initial spill structures
3183 : : * are no longer needed.
3184 : : */
3185 : 30 : pfree(aggstate->hash_spills);
3186 : 30 : aggstate->hash_spills = NULL;
3187 : : }
3188 : :
3189 : 8794 : hash_agg_update_metrics(aggstate, false, total_npartitions);
3190 : 8794 : aggstate->hash_spill_mode = false;
3191 : 8794 : }
3192 : :
3193 : : /*
3194 : : * hashagg_spill_finish
3195 : : *
3196 : : * Transform spill partitions into new batches.
3197 : : */
3198 : : static void
3199 : 6306 : hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
3200 : : {
3201 : : int i;
2042 tgl@sss.pgh.pa.us 3202 : 6306 : int used_bits = 32 - spill->shift;
3203 : :
2099 jdavis@postgresql.or 3204 [ - + ]: 6306 : if (spill->npartitions == 0)
2042 tgl@sss.pgh.pa.us 3205 :UBC 0 : return; /* didn't spill */
3206 : :
2099 jdavis@postgresql.or 3207 [ + + ]:CBC 31530 : for (i = 0; i < spill->npartitions; i++)
3208 : : {
1520 heikki.linnakangas@i 3209 : 25224 : LogicalTape *tape = spill->partitions[i];
3210 : : HashAggBatch *new_batch;
3211 : : double cardinality;
3212 : :
3213 : : /* if the partition is empty, don't create a new batch of work */
2099 jdavis@postgresql.or 3214 [ + + ]: 25224 : if (spill->ntuples[i] == 0)
3215 : 11757 : continue;
3216 : :
1967 3217 : 13467 : cardinality = estimateHyperLogLog(&spill->hll_card[i]);
3218 : 13467 : freeHyperLogLog(&spill->hll_card[i]);
3219 : :
3220 : : /* rewinding frees the buffer while not in use */
1520 heikki.linnakangas@i 3221 : 13467 : LogicalTapeRewindForRead(tape, HASHAGG_READ_BUFFER_SIZE);
3222 : :
3223 : 13467 : new_batch = hashagg_batch_new(tape, setno,
1918 jdavis@postgresql.or 3224 : 13467 : spill->ntuples[i], cardinality,
3225 : : used_bits);
1506 tgl@sss.pgh.pa.us 3226 : 13467 : aggstate->hash_batches = lappend(aggstate->hash_batches, new_batch);
2099 jdavis@postgresql.or 3227 : 13467 : aggstate->hash_batches_used++;
3228 : : }
3229 : :
3230 : 6306 : pfree(spill->ntuples);
1967 3231 : 6306 : pfree(spill->hll_card);
2099 3232 : 6306 : pfree(spill->partitions);
3233 : : }
3234 : :
3235 : : /*
3236 : : * Free resources related to a spilled HashAgg.
3237 : : */
3238 : : static void
3239 : 32234 : hashagg_reset_spill_state(AggState *aggstate)
3240 : : {
3241 : : /* free spills from initial pass */
3242 [ - + ]: 32234 : if (aggstate->hash_spills != NULL)
3243 : : {
3244 : : int setno;
3245 : :
2099 jdavis@postgresql.or 3246 [ # # ]:UBC 0 : for (setno = 0; setno < aggstate->num_hashes; setno++)
3247 : : {
3248 : 0 : HashAggSpill *spill = &aggstate->hash_spills[setno];
3249 : :
3250 : 0 : pfree(spill->ntuples);
3251 : 0 : pfree(spill->partitions);
3252 : : }
3253 : 0 : pfree(aggstate->hash_spills);
3254 : 0 : aggstate->hash_spills = NULL;
3255 : : }
3256 : :
3257 : : /* free batches */
1506 tgl@sss.pgh.pa.us 3258 :CBC 32234 : list_free_deep(aggstate->hash_batches);
2099 jdavis@postgresql.or 3259 : 32234 : aggstate->hash_batches = NIL;
3260 : :
3261 : : /* close tape set */
1520 heikki.linnakangas@i 3262 [ + + ]: 32234 : if (aggstate->hash_tapeset != NULL)
3263 : : {
3264 : 30 : LogicalTapeSetClose(aggstate->hash_tapeset);
3265 : 30 : aggstate->hash_tapeset = NULL;
3266 : : }
2099 jdavis@postgresql.or 3267 : 32234 : }
3268 : :
3269 : :
3270 : : /* -----------------
3271 : : * ExecInitAgg
3272 : : *
3273 : : * Creates the run-time information for the agg node produced by the
3274 : : * planner and initializes its outer subtree.
3275 : : *
3276 : : * -----------------
3277 : : */
3278 : : AggState *
7231 tgl@sss.pgh.pa.us 3279 : 26004 : ExecInitAgg(Agg *node, EState *estate, int eflags)
3280 : : {
3281 : : AggState *aggstate;
3282 : : AggStatePerAgg peraggs;
3283 : : AggStatePerTrans pertransstates;
3284 : : AggStatePerGroup *pergroups;
3285 : : Plan *outerPlan;
3286 : : ExprContext *econtext;
3287 : : TupleDesc scanDesc;
3288 : : int max_aggno;
3289 : : int max_transno;
3290 : : int numaggrefs;
3291 : : int numaggs;
3292 : : int numtrans;
3293 : : int phase;
3294 : : int phaseidx;
3295 : : ListCell *l;
3867 andres@anarazel.de 3296 : 26004 : Bitmapset *all_grouped_cols = NULL;
3297 : 26004 : int numGroupingSets = 1;
3298 : : int numPhases;
3299 : : int numHashes;
3300 : 26004 : int i = 0;
3301 : 26004 : int j = 0;
3186 rhodiumtoad@postgres 3302 [ + + ]: 48609 : bool use_hashing = (node->aggstrategy == AGG_HASHED ||
3303 [ + + ]: 22605 : node->aggstrategy == AGG_MIXED);
3304 : :
3305 : : /* check for unsupported flags */
7231 tgl@sss.pgh.pa.us 3306 [ - + ]: 26004 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3307 : :
3308 : : /*
3309 : : * create state structure
3310 : : */
10327 bruce@momjian.us 3311 : 26004 : aggstate = makeNode(AggState);
8412 tgl@sss.pgh.pa.us 3312 : 26004 : aggstate->ss.ps.plan = (Plan *) node;
3313 : 26004 : aggstate->ss.ps.state = estate;
3074 andres@anarazel.de 3314 : 26004 : aggstate->ss.ps.ExecProcNode = ExecAgg;
3315 : :
8412 tgl@sss.pgh.pa.us 3316 : 26004 : aggstate->aggs = NIL;
3317 : 26004 : aggstate->numaggs = 0;
3787 heikki.linnakangas@i 3318 : 26004 : aggstate->numtrans = 0;
3186 rhodiumtoad@postgres 3319 : 26004 : aggstate->aggstrategy = node->aggstrategy;
3460 tgl@sss.pgh.pa.us 3320 : 26004 : aggstate->aggsplit = node->aggsplit;
3867 andres@anarazel.de 3321 : 26004 : aggstate->maxsets = 0;
3322 : 26004 : aggstate->projected_set = -1;
3323 : 26004 : aggstate->current_set = 0;
8441 tgl@sss.pgh.pa.us 3324 : 26004 : aggstate->peragg = NULL;
3787 heikki.linnakangas@i 3325 : 26004 : aggstate->pertrans = NULL;
2987 tgl@sss.pgh.pa.us 3326 : 26004 : aggstate->curperagg = NULL;
3787 heikki.linnakangas@i 3327 : 26004 : aggstate->curpertrans = NULL;
3867 andres@anarazel.de 3328 : 26004 : aggstate->input_done = false;
3460 tgl@sss.pgh.pa.us 3329 : 26004 : aggstate->agg_done = false;
2905 andres@anarazel.de 3330 : 26004 : aggstate->pergroups = NULL;
8441 tgl@sss.pgh.pa.us 3331 : 26004 : aggstate->grp_firstTuple = NULL;
3867 andres@anarazel.de 3332 : 26004 : aggstate->sort_in = NULL;
3333 : 26004 : aggstate->sort_out = NULL;
3334 : :
3335 : : /*
3336 : : * phases[0] always exists, but is dummy in sorted/plain mode
3337 : : */
3186 rhodiumtoad@postgres 3338 [ + + ]: 26004 : numPhases = (use_hashing ? 1 : 2);
3339 : 26004 : numHashes = (use_hashing ? 1 : 0);
3340 : :
3341 : : /*
3342 : : * Calculate the maximum number of grouping sets in any phase; this
3343 : : * determines the size of some allocations. Also calculate the number of
3344 : : * phases, since all hashed/mixed nodes contribute to only a single phase.
3345 : : */
3867 andres@anarazel.de 3346 [ + + ]: 26004 : if (node->groupingSets)
3347 : : {
3348 : 460 : numGroupingSets = list_length(node->groupingSets);
3349 : :
3350 [ + + + + : 962 : foreach(l, node->chain)
+ + ]
3351 : : {
3860 bruce@momjian.us 3352 : 502 : Agg *agg = lfirst(l);
3353 : :
3867 andres@anarazel.de 3354 [ + + ]: 502 : numGroupingSets = Max(numGroupingSets,
3355 : : list_length(agg->groupingSets));
3356 : :
3357 : : /*
3358 : : * additional AGG_HASHED aggs become part of phase 0, but all
3359 : : * others add an extra phase.
3360 : : */
3186 rhodiumtoad@postgres 3361 [ + + ]: 502 : if (agg->aggstrategy != AGG_HASHED)
3362 : 245 : ++numPhases;
3363 : : else
3364 : 257 : ++numHashes;
3365 : : }
3366 : : }
3367 : :
3867 andres@anarazel.de 3368 : 26004 : aggstate->maxsets = numGroupingSets;
3186 rhodiumtoad@postgres 3369 : 26004 : aggstate->numphases = numPhases;
3370 : :
6 michael@paquier.xyz 3371 :GNC 26004 : aggstate->aggcontexts = palloc0_array(ExprContext *, numGroupingSets);
3372 : :
3373 : : /*
3374 : : * Create expression contexts. We need three or more, one for
3375 : : * per-input-tuple processing, one for per-output-tuple processing, one
3376 : : * for all the hashtables, and one for each grouping set. The per-tuple
3377 : : * memory context of the per-grouping-set ExprContexts (aggcontexts)
3378 : : * replaces the standalone memory context formerly used to hold transition
3379 : : * values. We cheat a little by using ExecAssignExprContext() to build
3380 : : * all of them.
3381 : : *
3382 : : * NOTE: the details of what is stored in aggcontexts and what is stored
3383 : : * in the regular per-query memory context are driven by a simple
3384 : : * decision: we want to reset the aggcontext at group boundaries (if not
3385 : : * hashing) and in ExecReScanAgg to recover no-longer-wanted space.
3386 : : */
3867 andres@anarazel.de 3387 :CBC 26004 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3388 : 26004 : aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
3389 : :
3390 [ + + ]: 52440 : for (i = 0; i < numGroupingSets; ++i)
3391 : : {
3392 : 26436 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3393 : 26436 : aggstate->aggcontexts[i] = aggstate->ss.ps.ps_ExprContext;
3394 : : }
3395 : :
3186 rhodiumtoad@postgres 3396 [ + + ]: 26004 : if (use_hashing)
267 jdavis@postgresql.or 3397 : 3518 : hash_create_memory(aggstate);
3398 : :
3867 andres@anarazel.de 3399 : 26004 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3400 : :
3401 : : /*
3402 : : * Initialize child nodes.
3403 : : *
3404 : : * If we are doing a hashed aggregation then the child plan does not need
3405 : : * to handle REWIND efficiently; see ExecReScanAgg.
3406 : : */
7231 tgl@sss.pgh.pa.us 3407 [ + + ]: 26004 : if (node->aggstrategy == AGG_HASHED)
3408 : 3399 : eflags &= ~EXEC_FLAG_REWIND;
8412 3409 : 26004 : outerPlan = outerPlan(node);
7231 3410 : 26004 : outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags);
3411 : :
3412 : : /*
3413 : : * initialize source tuple type.
3414 : : */
2336 andres@anarazel.de 3415 : 26004 : aggstate->ss.ps.outerops =
3416 : 26004 : ExecGetResultSlotOps(outerPlanState(&aggstate->ss),
3417 : : &aggstate->ss.ps.outeropsfixed);
3418 : 26004 : aggstate->ss.ps.outeropsset = true;
3419 : :
2588 3420 : 26004 : ExecCreateScanSlotFromOuterPlan(estate, &aggstate->ss,
3421 : : aggstate->ss.ps.outerops);
2861 3422 : 26004 : scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
3423 : :
3424 : : /*
3425 : : * If there are more than two phases (including a potential dummy phase
3426 : : * 0), input will be resorted using tuplesort. Need a slot for that.
3427 : : */
2336 3428 [ + + ]: 26004 : if (numPhases > 2)
3429 : : {
2588 3430 : 105 : aggstate->sort_slot = ExecInitExtraTupleSlot(estate, scanDesc,
3431 : : &TTSOpsMinimalTuple);
3432 : :
3433 : : /*
3434 : : * The output of the tuplesort, and the output from the outer child
3435 : : * might not use the same type of slot. In most cases the child will
3436 : : * be a Sort, and thus return a TTSOpsMinimalTuple type slot - but the
3437 : : * input can also be presorted due an index, in which case it could be
3438 : : * a different type of slot.
3439 : : *
3440 : : * XXX: For efficiency it would be good to instead/additionally
3441 : : * generate expressions with corresponding settings of outerops* for
3442 : : * the individual phases - deforming is often a bottleneck for
3443 : : * aggregations with lots of rows per group. If there's multiple
3444 : : * sorts, we know that all but the first use TTSOpsMinimalTuple (via
3445 : : * the nodeAgg.c internal tuplesort).
3446 : : */
2336 3447 [ + - ]: 105 : if (aggstate->ss.ps.outeropsfixed &&
3448 [ + + ]: 105 : aggstate->ss.ps.outerops != &TTSOpsMinimalTuple)
3449 : 6 : aggstate->ss.ps.outeropsfixed = false;
3450 : : }
3451 : :
3452 : : /*
3453 : : * Initialize result type, slot and projection.
3454 : : */
2588 3455 : 26004 : ExecInitResultTupleSlotTL(&aggstate->ss.ps, &TTSOpsVirtual);
6892 tgl@sss.pgh.pa.us 3456 : 26004 : ExecAssignProjectionInfo(&aggstate->ss.ps, NULL);
3457 : :
3458 : : /*
3459 : : * initialize child expressions
3460 : : *
3461 : : * We expect the parser to have checked that no aggs contain other agg
3462 : : * calls in their arguments (and just to be sure, we verify it again while
3463 : : * initializing the plan node). This would make no sense under SQL
3464 : : * semantics, and it's forbidden by the spec. Because it is true, we
3465 : : * don't need to worry about evaluating the aggs in any particular order.
3466 : : *
3467 : : * Note: execExpr.c finds Aggrefs for us, and adds them to aggstate->aggs.
3468 : : * Aggrefs in the qual are found here; Aggrefs in the targetlist are found
3469 : : * during ExecAssignProjectionInfo, above.
3470 : : */
2860 andres@anarazel.de 3471 : 26004 : aggstate->ss.ps.qual =
3472 : 26004 : ExecInitQual(node->plan.qual, (PlanState *) aggstate);
3473 : :
3474 : : /*
3475 : : * We should now have found all Aggrefs in the targetlist and quals.
3476 : : */
1848 heikki.linnakangas@i 3477 : 26004 : numaggrefs = list_length(aggstate->aggs);
3478 : 26004 : max_aggno = -1;
3479 : 26004 : max_transno = -1;
3480 [ + + + + : 55032 : foreach(l, aggstate->aggs)
+ + ]
3481 : : {
3482 : 29028 : Aggref *aggref = (Aggref *) lfirst(l);
3483 : :
3484 : 29028 : max_aggno = Max(max_aggno, aggref->aggno);
3485 : 29028 : max_transno = Max(max_transno, aggref->aggtransno);
3486 : : }
343 jdavis@postgresql.or 3487 : 26004 : aggstate->numaggs = numaggs = max_aggno + 1;
3488 : 26004 : aggstate->numtrans = numtrans = max_transno + 1;
3489 : :
3490 : : /*
3491 : : * For each phase, prepare grouping set data and fmgr lookup data for
3492 : : * compare functions. Accumulate all_grouped_cols in passing.
3493 : : */
6 michael@paquier.xyz 3494 :GNC 26004 : aggstate->phases = palloc0_array(AggStatePerPhaseData, numPhases);
3495 : :
3186 rhodiumtoad@postgres 3496 :CBC 26004 : aggstate->num_hashes = numHashes;
3497 [ + + ]: 26004 : if (numHashes)
3498 : : {
6 michael@paquier.xyz 3499 :GNC 3518 : aggstate->perhash = palloc0_array(AggStatePerHashData, numHashes);
3186 rhodiumtoad@postgres 3500 :CBC 3518 : aggstate->phases[0].numsets = 0;
6 michael@paquier.xyz 3501 :GNC 3518 : aggstate->phases[0].gset_lengths = palloc_array(int, numHashes);
3502 : 3518 : aggstate->phases[0].grouped_cols = palloc_array(Bitmapset *, numHashes);
3503 : : }
3504 : :
3186 rhodiumtoad@postgres 3505 :CBC 26004 : phase = 0;
3506 [ + + ]: 52510 : for (phaseidx = 0; phaseidx <= list_length(node->chain); ++phaseidx)
3507 : : {
3508 : : Agg *aggnode;
3509 : : Sort *sortnode;
3510 : :
3511 [ + + ]: 26506 : if (phaseidx > 0)
3512 : : {
3172 tgl@sss.pgh.pa.us 3513 : 502 : aggnode = list_nth_node(Agg, node->chain, phaseidx - 1);
1258 3514 : 502 : sortnode = castNode(Sort, outerPlan(aggnode));
3515 : : }
3516 : : else
3517 : : {
3867 andres@anarazel.de 3518 : 26004 : aggnode = node;
3519 : 26004 : sortnode = NULL;
3520 : : }
3521 : :
3186 rhodiumtoad@postgres 3522 [ + + - + ]: 26506 : Assert(phase <= 1 || sortnode);
3523 : :
3524 [ + + ]: 26506 : if (aggnode->aggstrategy == AGG_HASHED
3525 [ + + ]: 22850 : || aggnode->aggstrategy == AGG_MIXED)
3867 andres@anarazel.de 3526 : 3775 : {
3186 rhodiumtoad@postgres 3527 : 3775 : AggStatePerPhase phasedata = &aggstate->phases[0];
3528 : : AggStatePerHash perhash;
3529 : 3775 : Bitmapset *cols = NULL;
3530 : :
3531 [ - + ]: 3775 : Assert(phase == 0);
3532 : 3775 : i = phasedata->numsets++;
3533 : 3775 : perhash = &aggstate->perhash[i];
3534 : :
3535 : : /* phase 0 always points to the "real" Agg in the hash case */
3536 : 3775 : phasedata->aggnode = node;
3537 : 3775 : phasedata->aggstrategy = node->aggstrategy;
3538 : :
3539 : : /* but the actual Agg node representing this hash is saved here */
3540 : 3775 : perhash->aggnode = aggnode;
3541 : :
3542 : 3775 : phasedata->gset_lengths[i] = perhash->numCols = aggnode->numCols;
3543 : :
3544 [ + + ]: 9522 : for (j = 0; j < aggnode->numCols; ++j)
3545 : 5747 : cols = bms_add_member(cols, aggnode->grpColIdx[j]);
3546 : :
3547 : 3775 : phasedata->grouped_cols[i] = cols;
3548 : :
3549 : 3775 : all_grouped_cols = bms_add_members(all_grouped_cols, cols);
3550 : 3775 : continue;
3551 : : }
3552 : : else
3553 : : {
3554 : 22731 : AggStatePerPhase phasedata = &aggstate->phases[++phase];
3555 : : int num_sets;
3556 : :
3557 : 22731 : phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
3558 : :
3559 [ + + ]: 22731 : if (num_sets)
3560 : : {
3561 : 503 : phasedata->gset_lengths = palloc(num_sets * sizeof(int));
3562 : 503 : phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
3563 : :
3564 : 503 : i = 0;
3565 [ + - + + : 1474 : foreach(l, aggnode->groupingSets)
+ + ]
3566 : : {
3567 : 971 : int current_length = list_length(lfirst(l));
3568 : 971 : Bitmapset *cols = NULL;
3569 : :
3570 : : /* planner forces this to be correct */
3571 [ + + ]: 1905 : for (j = 0; j < current_length; ++j)
3572 : 934 : cols = bms_add_member(cols, aggnode->grpColIdx[j]);
3573 : :
3574 : 971 : phasedata->grouped_cols[i] = cols;
3575 : 971 : phasedata->gset_lengths[i] = current_length;
3576 : :
3577 : 971 : ++i;
3578 : : }
3579 : :
3580 : 503 : all_grouped_cols = bms_add_members(all_grouped_cols,
3100 tgl@sss.pgh.pa.us 3581 : 503 : phasedata->grouped_cols[0]);
3582 : : }
3583 : : else
3584 : : {
3186 rhodiumtoad@postgres 3585 [ - + ]: 22228 : Assert(phaseidx == 0);
3586 : :
3587 : 22228 : phasedata->gset_lengths = NULL;
3588 : 22228 : phasedata->grouped_cols = NULL;
3589 : : }
3590 : :
3591 : : /*
3592 : : * If we are grouping, precompute fmgr lookup data for inner loop.
3593 : : */
3594 [ + + ]: 22731 : if (aggnode->aggstrategy == AGG_SORTED)
3595 : : {
3596 : : /*
3597 : : * Build a separate function for each subset of columns that
3598 : : * need to be compared.
3599 : : */
6 michael@paquier.xyz 3600 :GNC 1327 : phasedata->eqfunctions = palloc0_array(ExprState *, aggnode->numCols);
3601 : :
3602 : : /* for each grouping set */
1168 drowley@postgresql.o 3603 [ + + ]:CBC 2147 : for (int k = 0; k < phasedata->numsets; k++)
3604 : : {
3605 : 820 : int length = phasedata->gset_lengths[k];
3606 : :
3607 : : /* nothing to do for empty grouping set */
1079 tgl@sss.pgh.pa.us 3608 [ + + ]: 820 : if (length == 0)
3609 : 169 : continue;
3610 : :
3611 : : /* if we already had one of this length, it'll do */
2861 andres@anarazel.de 3612 [ + + ]: 651 : if (phasedata->eqfunctions[length - 1] != NULL)
3613 : 69 : continue;
3614 : :
3615 : 582 : phasedata->eqfunctions[length - 1] =
3616 : 582 : execTuplesMatchPrepare(scanDesc,
3617 : : length,
3618 : 582 : aggnode->grpColIdx,
3619 : 582 : aggnode->grpOperators,
2461 peter@eisentraut.org 3620 : 582 : aggnode->grpCollations,
3621 : : (PlanState *) aggstate);
3622 : : }
3623 : :
3624 : : /* and for all grouped columns, unless already computed */
1063 tgl@sss.pgh.pa.us 3625 [ + + ]: 1327 : if (aggnode->numCols > 0 &&
3626 [ + + ]: 1280 : phasedata->eqfunctions[aggnode->numCols - 1] == NULL)
3627 : : {
2861 andres@anarazel.de 3628 : 892 : phasedata->eqfunctions[aggnode->numCols - 1] =
3629 : 892 : execTuplesMatchPrepare(scanDesc,
3630 : : aggnode->numCols,
3631 : 892 : aggnode->grpColIdx,
3632 : 892 : aggnode->grpOperators,
2461 peter@eisentraut.org 3633 : 892 : aggnode->grpCollations,
3634 : : (PlanState *) aggstate);
3635 : : }
3636 : : }
3637 : :
3186 rhodiumtoad@postgres 3638 : 22731 : phasedata->aggnode = aggnode;
3639 : 22731 : phasedata->aggstrategy = aggnode->aggstrategy;
3640 : 22731 : phasedata->sortnode = sortnode;
3641 : : }
3642 : : }
3643 : :
3644 : : /*
3645 : : * Convert all_grouped_cols to a descending-order list.
3646 : : */
3867 andres@anarazel.de 3647 : 26004 : i = -1;
3648 [ + + ]: 32061 : while ((i = bms_next_member(all_grouped_cols, i)) >= 0)
3649 : 6057 : aggstate->all_grouped_cols = lcons_int(i, aggstate->all_grouped_cols);
3650 : :
3651 : : /*
3652 : : * Set up aggregate-result storage in the output expr context, and also
3653 : : * allocate my private per-agg working storage
3654 : : */
8412 tgl@sss.pgh.pa.us 3655 : 26004 : econtext = aggstate->ss.ps.ps_ExprContext;
6 michael@paquier.xyz 3656 :GNC 26004 : econtext->ecxt_aggvalues = palloc0_array(Datum, numaggs);
3657 : 26004 : econtext->ecxt_aggnulls = palloc0_array(bool, numaggs);
3658 : :
3659 : 26004 : peraggs = palloc0_array(AggStatePerAggData, numaggs);
3660 : 26004 : pertransstates = palloc0_array(AggStatePerTransData, numtrans);
3661 : :
3787 heikki.linnakangas@i 3662 :CBC 26004 : aggstate->peragg = peraggs;
3663 : 26004 : aggstate->pertrans = pertransstates;
3664 : :
3665 : :
6 michael@paquier.xyz 3666 :GNC 26004 : aggstate->all_pergroups = palloc0_array(AggStatePerGroup, numGroupingSets + numHashes);
2898 andres@anarazel.de 3667 :CBC 26004 : pergroups = aggstate->all_pergroups;
3668 : :
3669 [ + + ]: 26004 : if (node->aggstrategy != AGG_HASHED)
3670 : : {
3671 [ + + ]: 45642 : for (i = 0; i < numGroupingSets; i++)
3672 : : {
6 michael@paquier.xyz 3673 :GNC 23037 : pergroups[i] = palloc0_array(AggStatePerGroupData, numaggs);
3674 : : }
3675 : :
2898 andres@anarazel.de 3676 :CBC 22605 : aggstate->pergroups = pergroups;
3677 : 22605 : pergroups += numGroupingSets;
3678 : : }
3679 : :
3680 : : /*
3681 : : * Hashing can only appear in the initial phase.
3682 : : */
3186 rhodiumtoad@postgres 3683 [ + + ]: 26004 : if (use_hashing)
3684 : : {
2042 tgl@sss.pgh.pa.us 3685 : 3518 : Plan *outerplan = outerPlan(node);
44 tgl@sss.pgh.pa.us 3686 :GNC 3518 : double totalGroups = 0;
3687 : :
1983 jdavis@postgresql.or 3688 :CBC 3518 : aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
3689 : : &TTSOpsMinimalTuple);
3690 : 3518 : aggstate->hash_spill_wslot = ExecInitExtraTupleSlot(estate, scanDesc,
3691 : : &TTSOpsVirtual);
3692 : :
3693 : : /* this is an array of pointers, not structures */
2898 andres@anarazel.de 3694 : 3518 : aggstate->hash_pergroup = pergroups;
3695 : :
2042 tgl@sss.pgh.pa.us 3696 : 7036 : aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
3697 : 3518 : outerplan->plan_width,
3698 : : node->transitionSpace);
3699 : :
3700 : : /*
3701 : : * Consider all of the grouping sets together when setting the limits
3702 : : * and estimating the number of partitions. This can be inaccurate
3703 : : * when there is more than one grouping set, but should still be
3704 : : * reasonable.
3705 : : */
1168 drowley@postgresql.o 3706 [ + + ]: 7293 : for (int k = 0; k < aggstate->num_hashes; k++)
3707 : 3775 : totalGroups += aggstate->perhash[k].aggnode->numGroups;
3708 : :
2099 jdavis@postgresql.or 3709 : 3518 : hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
3710 : : &aggstate->hash_mem_limit,
3711 : : &aggstate->hash_ngroups_limit,
3712 : : &aggstate->hash_planned_partitions);
3186 rhodiumtoad@postgres 3713 : 3518 : find_hash_columns(aggstate);
3714 : :
3715 : : /* Skip massive memory allocation if we are just doing EXPLAIN */
1854 heikki.linnakangas@i 3716 [ + + ]: 3518 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
3717 : 2589 : build_hash_tables(aggstate);
3718 : :
8441 tgl@sss.pgh.pa.us 3719 : 3518 : aggstate->table_filled = false;
3720 : :
3721 : : /* Initialize this to 1, meaning nothing spilled, yet */
1966 drowley@postgresql.o 3722 : 3518 : aggstate->hash_batches_used = 1;
3723 : : }
3724 : :
3725 : : /*
3726 : : * Initialize current phase-dependent values to initial phase. The initial
3727 : : * phase is 1 (first sort pass) for all strategies that use sorting (if
3728 : : * hashing is being done too, then phase 0 is processed last); but if only
3729 : : * hashing is being done, then phase 0 is all there is.
3730 : : */
3186 rhodiumtoad@postgres 3731 [ + + ]: 26004 : if (node->aggstrategy == AGG_HASHED)
3732 : : {
3733 : 3399 : aggstate->current_phase = 0;
3734 : 3399 : initialize_phase(aggstate, 0);
3735 : 3399 : select_current_set(aggstate, 0, true);
3736 : : }
3737 : : else
3738 : : {
3739 : 22605 : aggstate->current_phase = 1;
3740 : 22605 : initialize_phase(aggstate, 1);
3741 : 22605 : select_current_set(aggstate, 0, false);
3742 : : }
3743 : :
3744 : : /*
3745 : : * Perform lookups of aggregate function info, and initialize the
3746 : : * unchanging fields of the per-agg and per-trans data.
3747 : : */
7874 neilc@samurai.com 3748 [ + + + + : 55029 : foreach(l, aggstate->aggs)
+ + ]
3749 : : {
1848 heikki.linnakangas@i 3750 : 29028 : Aggref *aggref = lfirst(l);
3751 : : AggStatePerAgg peragg;
3752 : : AggStatePerTrans pertrans;
3753 : : Oid aggTransFnInputTypes[FUNC_MAX_ARGS];
3754 : : int numAggTransFnArgs;
3755 : : int numDirectArgs;
3756 : : HeapTuple aggTuple;
3757 : : Form_pg_aggregate aggform;
3758 : : AclResult aclresult;
3759 : : Oid finalfn_oid;
3760 : : Oid serialfn_oid,
3761 : : deserialfn_oid;
3762 : : Oid aggOwner;
3763 : : Expr *finalfnexpr;
3764 : : Oid aggtranstype;
3765 : :
3766 : : /* Planner should have assigned aggregate to correct level */
8229 tgl@sss.pgh.pa.us 3767 [ - + ]: 29028 : Assert(aggref->agglevelsup == 0);
3768 : : /* ... and the split mode should match */
3460 3769 [ - + ]: 29028 : Assert(aggref->aggsplit == aggstate->aggsplit);
3770 : :
1848 heikki.linnakangas@i 3771 : 29028 : peragg = &peraggs[aggref->aggno];
3772 : :
3773 : : /* Check if we initialized the state for this aggregate already. */
3774 [ + + ]: 29028 : if (peragg->aggref != NULL)
8351 tgl@sss.pgh.pa.us 3775 : 244 : continue;
3776 : :
3787 heikki.linnakangas@i 3777 : 28784 : peragg->aggref = aggref;
1848 3778 : 28784 : peragg->transno = aggref->aggtransno;
3779 : :
3780 : : /* Fetch the pg_aggregate row */
5784 rhaas@postgresql.org 3781 : 28784 : aggTuple = SearchSysCache1(AGGFNOID,
3782 : : ObjectIdGetDatum(aggref->aggfnoid));
9578 tgl@sss.pgh.pa.us 3783 [ - + ]: 28784 : if (!HeapTupleIsValid(aggTuple))
8184 tgl@sss.pgh.pa.us 3784 [ # # ]:UBC 0 : elog(ERROR, "cache lookup failed for aggregate %u",
3785 : : aggref->aggfnoid);
9578 tgl@sss.pgh.pa.us 3786 :CBC 28784 : aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
3787 : :
3788 : : /* Check permission to call aggregate function */
1129 peter@eisentraut.org 3789 : 28784 : aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
3790 : : ACL_EXECUTE);
8632 tgl@sss.pgh.pa.us 3791 [ + + ]: 28784 : if (aclresult != ACLCHECK_OK)
2936 peter_e@gmx.net 3792 : 3 : aclcheck_error(aclresult, OBJECT_AGGREGATE,
8173 tgl@sss.pgh.pa.us 3793 : 3 : get_func_name(aggref->aggfnoid));
4631 rhaas@postgresql.org 3794 [ - + ]: 28781 : InvokeFunctionExecuteHook(aggref->aggfnoid);
3795 : :
3796 : : /* planner recorded transition state type in the Aggref itself */
3469 tgl@sss.pgh.pa.us 3797 : 28781 : aggtranstype = aggref->aggtranstype;
3798 [ - + ]: 28781 : Assert(OidIsValid(aggtranstype));
3799 : :
3800 : : /* Final function only required if we're finalizing the aggregates */
3460 3801 [ + + ]: 28781 : if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
3618 rhaas@postgresql.org 3802 : 2781 : peragg->finalfn_oid = finalfn_oid = InvalidOid;
3803 : : else
3460 tgl@sss.pgh.pa.us 3804 : 26000 : peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
3805 : :
3549 rhaas@postgresql.org 3806 : 28781 : serialfn_oid = InvalidOid;
3807 : 28781 : deserialfn_oid = InvalidOid;
3808 : :
3809 : : /*
3810 : : * Check if serialization/deserialization is required. We only do it
3811 : : * for aggregates that have transtype INTERNAL.
3812 : : */
3460 tgl@sss.pgh.pa.us 3813 [ + + ]: 28781 : if (aggtranstype == INTERNALOID)
3814 : : {
3815 : : /*
3816 : : * The planner should only have generated a serialize agg node if
3817 : : * every aggregate with an INTERNAL state has a serialization
3818 : : * function. Verify that.
3819 : : */
3820 [ + + ]: 10451 : if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
3821 : : {
3822 : : /* serialization only valid when not running finalfn */
3823 [ - + ]: 168 : Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
3824 : :
3825 [ - + ]: 168 : if (!OidIsValid(aggform->aggserialfn))
3460 tgl@sss.pgh.pa.us 3826 [ # # ]:UBC 0 : elog(ERROR, "serialfunc not provided for serialization aggregation");
3549 rhaas@postgresql.org 3827 :CBC 168 : serialfn_oid = aggform->aggserialfn;
3828 : : }
3829 : :
3830 : : /* Likewise for deserialization functions */
3460 tgl@sss.pgh.pa.us 3831 [ + + ]: 10451 : if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
3832 : : {
3833 : : /* deserialization only valid when combining states */
3834 [ - + ]: 60 : Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
3835 : :
3836 [ - + ]: 60 : if (!OidIsValid(aggform->aggdeserialfn))
3460 tgl@sss.pgh.pa.us 3837 [ # # ]:UBC 0 : elog(ERROR, "deserialfunc not provided for deserialization aggregation");
3549 rhaas@postgresql.org 3838 :CBC 60 : deserialfn_oid = aggform->aggdeserialfn;
3839 : : }
3840 : : }
3841 : :
3842 : : /* Check that aggregate owner has permission to call component fns */
3843 : : {
3844 : : HeapTuple procTuple;
3845 : :
5784 3846 : 28781 : procTuple = SearchSysCache1(PROCOID,
3847 : : ObjectIdGetDatum(aggref->aggfnoid));
7628 tgl@sss.pgh.pa.us 3848 [ - + ]: 28781 : if (!HeapTupleIsValid(procTuple))
7628 tgl@sss.pgh.pa.us 3849 [ # # ]:UBC 0 : elog(ERROR, "cache lookup failed for function %u",
3850 : : aggref->aggfnoid);
7628 tgl@sss.pgh.pa.us 3851 :CBC 28781 : aggOwner = ((Form_pg_proc) GETSTRUCT(procTuple))->proowner;
3852 : 28781 : ReleaseSysCache(procTuple);
3853 : :
3854 [ + + ]: 28781 : if (OidIsValid(finalfn_oid))
3855 : : {
1129 peter@eisentraut.org 3856 : 11262 : aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
3857 : : ACL_EXECUTE);
7628 tgl@sss.pgh.pa.us 3858 [ - + ]: 11262 : if (aclresult != ACLCHECK_OK)
2936 peter_e@gmx.net 3859 :UBC 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
7628 tgl@sss.pgh.pa.us 3860 : 0 : get_func_name(finalfn_oid));
4631 rhaas@postgresql.org 3861 [ - + ]:CBC 11262 : InvokeFunctionExecuteHook(finalfn_oid);
3862 : : }
3549 3863 [ + + ]: 28781 : if (OidIsValid(serialfn_oid))
3864 : : {
1129 peter@eisentraut.org 3865 : 168 : aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
3866 : : ACL_EXECUTE);
3549 rhaas@postgresql.org 3867 [ - + ]: 168 : if (aclresult != ACLCHECK_OK)
2936 peter_e@gmx.net 3868 :UBC 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3549 rhaas@postgresql.org 3869 : 0 : get_func_name(serialfn_oid));
3549 rhaas@postgresql.org 3870 [ - + ]:CBC 168 : InvokeFunctionExecuteHook(serialfn_oid);
3871 : : }
3872 [ + + ]: 28781 : if (OidIsValid(deserialfn_oid))
3873 : : {
1129 peter@eisentraut.org 3874 : 60 : aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
3875 : : ACL_EXECUTE);
3549 rhaas@postgresql.org 3876 [ - + ]: 60 : if (aclresult != ACLCHECK_OK)
2936 peter_e@gmx.net 3877 :UBC 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3549 rhaas@postgresql.org 3878 : 0 : get_func_name(deserialfn_oid));
3549 rhaas@postgresql.org 3879 [ - + ]:CBC 60 : InvokeFunctionExecuteHook(deserialfn_oid);
3880 : : }
3881 : : }
3882 : :
3883 : : /*
3884 : : * Get actual datatypes of the (nominal) aggregate inputs. These
3885 : : * could be different from the agg's declared input types, when the
3886 : : * agg accepts ANY or a polymorphic type.
3887 : : */
1626 drowley@postgresql.o 3888 : 28781 : numAggTransFnArgs = get_aggregate_argtypes(aggref,
3889 : : aggTransFnInputTypes);
3890 : :
3891 : : /* Count the "direct" arguments, if any */
4376 tgl@sss.pgh.pa.us 3892 : 28781 : numDirectArgs = list_length(aggref->aggdirectargs);
3893 : :
3894 : : /* Detect how many arguments to pass to the finalfn */
3787 heikki.linnakangas@i 3895 [ + + ]: 28781 : if (aggform->aggfinalextra)
1626 drowley@postgresql.o 3896 : 7217 : peragg->numFinalArgs = numAggTransFnArgs + 1;
3897 : : else
3787 heikki.linnakangas@i 3898 : 21564 : peragg->numFinalArgs = numDirectArgs + 1;
3899 : :
3900 : : /* Initialize any direct-argument expressions */
2983 tgl@sss.pgh.pa.us 3901 : 28781 : peragg->aggdirectargs = ExecInitExprList(aggref->aggdirectargs,
3902 : : (PlanState *) aggstate);
3903 : :
3904 : : /*
3905 : : * build expression trees using actual argument & result types for the
3906 : : * finalfn, if it exists and is required.
3907 : : */
8204 3908 [ + + ]: 28781 : if (OidIsValid(finalfn_oid))
3909 : : {
1626 drowley@postgresql.o 3910 : 11262 : build_aggregate_finalfn_expr(aggTransFnInputTypes,
3911 : : peragg->numFinalArgs,
3912 : : aggtranstype,
3913 : : aggref->aggtype,
3914 : : aggref->inputcollid,
3915 : : finalfn_oid,
3916 : : &finalfnexpr);
3787 heikki.linnakangas@i 3917 : 11262 : fmgr_info(finalfn_oid, &peragg->finalfn);
3918 : 11262 : fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
3919 : : }
3920 : :
3921 : : /* get info about the output value's datatype */
3460 tgl@sss.pgh.pa.us 3922 : 28781 : get_typlenbyval(aggref->aggtype,
3923 : : &peragg->resulttypeLen,
3924 : : &peragg->resulttypeByVal);
3925 : :
3926 : : /*
3927 : : * Build working state for invoking the transition function, if we
3928 : : * haven't done it already.
3929 : : */
1848 heikki.linnakangas@i 3930 : 28781 : pertrans = &pertransstates[aggref->aggtransno];
3931 [ + + ]: 28781 : if (pertrans->aggref == NULL)
3932 : : {
3933 : : Datum textInitVal;
3934 : : Datum initValue;
3935 : : bool initValueIsNull;
3936 : : Oid transfn_oid;
3937 : :
3938 : : /*
3939 : : * If this aggregation is performing state combines, then instead
3940 : : * of using the transition function, we'll use the combine
3941 : : * function.
3942 : : */
3943 [ + + ]: 28640 : if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
3944 : : {
3945 : 1121 : transfn_oid = aggform->aggcombinefn;
3946 : :
3947 : : /* If not set then the planner messed up */
3948 [ - + ]: 1121 : if (!OidIsValid(transfn_oid))
1848 heikki.linnakangas@i 3949 [ # # ]:UBC 0 : elog(ERROR, "combinefn not set for aggregate function");
3950 : : }
3951 : : else
1848 heikki.linnakangas@i 3952 :CBC 27519 : transfn_oid = aggform->aggtransfn;
3953 : :
1129 peter@eisentraut.org 3954 : 28640 : aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, ACL_EXECUTE);
1848 heikki.linnakangas@i 3955 [ - + ]: 28640 : if (aclresult != ACLCHECK_OK)
1848 heikki.linnakangas@i 3956 :UBC 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3957 : 0 : get_func_name(transfn_oid));
1848 heikki.linnakangas@i 3958 [ - + ]:CBC 28640 : InvokeFunctionExecuteHook(transfn_oid);
3959 : :
3960 : : /*
3961 : : * initval is potentially null, so don't try to access it as a
3962 : : * struct field. Must do it the hard way with SysCacheGetAttr.
3963 : : */
3964 : 28640 : textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
3965 : : Anum_pg_aggregate_agginitval,
3966 : : &initValueIsNull);
3967 [ + + ]: 28640 : if (initValueIsNull)
3968 : 15100 : initValue = (Datum) 0;
3969 : : else
3970 : 13540 : initValue = GetAggInitVal(textInitVal, aggtranstype);
3971 : :
1626 drowley@postgresql.o 3972 [ + + ]: 28640 : if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
3973 : : {
3974 : 1121 : Oid combineFnInputTypes[] = {aggtranstype,
3975 : : aggtranstype};
3976 : :
3977 : : /*
3978 : : * When combining there's only one input, the to-be-combined
3979 : : * transition value. The transition value is not counted
3980 : : * here.
3981 : : */
3982 : 1121 : pertrans->numTransInputs = 1;
3983 : :
3984 : : /* aggcombinefn always has two arguments of aggtranstype */
3985 : 1121 : build_pertrans_for_aggref(pertrans, aggstate, estate,
3986 : : aggref, transfn_oid, aggtranstype,
3987 : : serialfn_oid, deserialfn_oid,
3988 : : initValue, initValueIsNull,
3989 : : combineFnInputTypes, 2);
3990 : :
3991 : : /*
3992 : : * Ensure that a combine function to combine INTERNAL states
3993 : : * is not strict. This should have been checked during CREATE
3994 : : * AGGREGATE, but the strict property could have been changed
3995 : : * since then.
3996 : : */
3997 [ + + - + ]: 1121 : if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
1626 drowley@postgresql.o 3998 [ # # ]:UBC 0 : ereport(ERROR,
3999 : : (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
4000 : : errmsg("combine function with transition type %s must not be declared STRICT",
4001 : : format_type_be(aggtranstype))));
4002 : : }
4003 : : else
4004 : : {
4005 : : /* Detect how many arguments to pass to the transfn */
1626 drowley@postgresql.o 4006 [ + + ]:CBC 27519 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
4007 : 126 : pertrans->numTransInputs = list_length(aggref->args);
4008 : : else
4009 : 27393 : pertrans->numTransInputs = numAggTransFnArgs;
4010 : :
4011 : 27519 : build_pertrans_for_aggref(pertrans, aggstate, estate,
4012 : : aggref, transfn_oid, aggtranstype,
4013 : : serialfn_oid, deserialfn_oid,
4014 : : initValue, initValueIsNull,
4015 : : aggTransFnInputTypes,
4016 : : numAggTransFnArgs);
4017 : :
4018 : : /*
4019 : : * If the transfn is strict and the initval is NULL, make sure
4020 : : * input type and transtype are the same (or at least
4021 : : * binary-compatible), so that it's OK to use the first
4022 : : * aggregated input value as the initial transValue. This
4023 : : * should have been checked at agg definition time, but we
4024 : : * must check again in case the transfn's strictness property
4025 : : * has been changed.
4026 : : */
4027 [ + + + + ]: 27519 : if (pertrans->transfn.fn_strict && pertrans->initValueIsNull)
4028 : : {
4029 [ + - ]: 2569 : if (numAggTransFnArgs <= numDirectArgs ||
4030 [ - + ]: 2569 : !IsBinaryCoercible(aggTransFnInputTypes[numDirectArgs],
4031 : : aggtranstype))
1626 drowley@postgresql.o 4032 [ # # ]:UBC 0 : ereport(ERROR,
4033 : : (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
4034 : : errmsg("aggregate %u needs to have compatible input type and transition type",
4035 : : aggref->aggfnoid)));
4036 : : }
4037 : : }
4038 : : }
4039 : : else
1848 heikki.linnakangas@i 4040 :CBC 141 : pertrans->aggshared = true;
3787 4041 : 28781 : ReleaseSysCache(aggTuple);
4042 : : }
4043 : :
4044 : : /*
4045 : : * Last, check whether any more aggregates got added onto the node while
4046 : : * we processed the expressions for the aggregate arguments (including not
4047 : : * only the regular arguments and FILTER expressions handled immediately
4048 : : * above, but any direct arguments we might've handled earlier). If so,
4049 : : * we have nested aggregate functions, which is semantically nonsensical,
4050 : : * so complain. (This should have been caught by the parser, so we don't
4051 : : * need to work hard on a helpful error message; but we defend against it
4052 : : * here anyway, just to be sure.)
4053 : : */
1848 4054 [ - + ]: 26001 : if (numaggrefs != list_length(aggstate->aggs))
2898 andres@anarazel.de 4055 [ # # ]:UBC 0 : ereport(ERROR,
4056 : : (errcode(ERRCODE_GROUPING_ERROR),
4057 : : errmsg("aggregate function calls cannot be nested")));
4058 : :
4059 : : /*
4060 : : * Build expressions doing all the transition work at once. We build a
4061 : : * different one for each phase, as the number of transition function
4062 : : * invocation can differ between phases. Note this'll work both for
4063 : : * transition and combination functions (although there'll only be one
4064 : : * phase in the latter case).
4065 : : */
2898 andres@anarazel.de 4066 [ + + ]:CBC 74730 : for (phaseidx = 0; phaseidx < aggstate->numphases; phaseidx++)
4067 : : {
4068 : 48729 : AggStatePerPhase phase = &aggstate->phases[phaseidx];
4069 : 48729 : bool dohash = false;
4070 : 48729 : bool dosort = false;
4071 : :
4072 : : /* phase 0 doesn't necessarily exist */
4073 [ + + ]: 48729 : if (!phase->aggnode)
4074 : 22483 : continue;
4075 : :
4076 [ + + + + ]: 26246 : if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 1)
4077 : : {
4078 : : /*
4079 : : * Phase one, and only phase one, in a mixed agg performs both
4080 : : * sorting and aggregation.
4081 : : */
4082 : 119 : dohash = true;
4083 : 119 : dosort = true;
4084 : : }
4085 [ + + + + ]: 26127 : else if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 0)
4086 : : {
4087 : : /*
4088 : : * No need to compute a transition function for an AGG_MIXED phase
4089 : : * 0 - the contents of the hashtables will have been computed
4090 : : * during phase 1.
4091 : : */
4092 : 119 : continue;
4093 : : }
4094 [ + + ]: 26008 : else if (phase->aggstrategy == AGG_PLAIN ||
4095 [ + + ]: 4695 : phase->aggstrategy == AGG_SORTED)
4096 : : {
4097 : 22609 : dohash = false;
4098 : 22609 : dosort = true;
4099 : : }
4100 [ + - ]: 3399 : else if (phase->aggstrategy == AGG_HASHED)
4101 : : {
4102 : 3399 : dohash = true;
4103 : 3399 : dosort = false;
4104 : : }
4105 : : else
2898 andres@anarazel.de 4106 :UBC 0 : Assert(false);
4107 : :
2113 jdavis@postgresql.or 4108 :CBC 26127 : phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash,
4109 : : false);
4110 : :
4111 : : /* cache compiled expression for outer slot without NULL check */
2099 4112 : 26127 : phase->evaltrans_cache[0][0] = phase->evaltrans;
4113 : : }
4114 : :
3787 heikki.linnakangas@i 4115 : 26001 : return aggstate;
4116 : : }
4117 : :
4118 : : /*
4119 : : * Build the state needed to calculate a state value for an aggregate.
4120 : : *
4121 : : * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
4122 : : * to initialize the state for. 'transfn_oid', 'aggtranstype', and the rest
4123 : : * of the arguments could be calculated from 'aggref', but the caller has
4124 : : * calculated them already, so might as well pass them.
4125 : : *
4126 : : * 'transfn_oid' may be either the Oid of the aggtransfn or the aggcombinefn.
4127 : : */
4128 : : static void
4129 : 28640 : build_pertrans_for_aggref(AggStatePerTrans pertrans,
4130 : : AggState *aggstate, EState *estate,
4131 : : Aggref *aggref,
4132 : : Oid transfn_oid, Oid aggtranstype,
4133 : : Oid aggserialfn, Oid aggdeserialfn,
4134 : : Datum initValue, bool initValueIsNull,
4135 : : Oid *inputTypes, int numArguments)
4136 : : {
4137 : 28640 : int numGroupingSets = Max(aggstate->maxsets, 1);
4138 : : Expr *transfnexpr;
4139 : : int numTransArgs;
3549 rhaas@postgresql.org 4140 : 28640 : Expr *serialfnexpr = NULL;
4141 : 28640 : Expr *deserialfnexpr = NULL;
4142 : : ListCell *lc;
4143 : : int numInputs;
4144 : : int numDirectArgs;
4145 : : List *sortlist;
4146 : : int numSortCols;
4147 : : int numDistinctCols;
4148 : : int i;
4149 : :
4150 : : /* Begin filling in the pertrans data */
3787 heikki.linnakangas@i 4151 : 28640 : pertrans->aggref = aggref;
2983 tgl@sss.pgh.pa.us 4152 : 28640 : pertrans->aggshared = false;
3787 heikki.linnakangas@i 4153 : 28640 : pertrans->aggCollation = aggref->inputcollid;
1626 drowley@postgresql.o 4154 : 28640 : pertrans->transfn_oid = transfn_oid;
3549 rhaas@postgresql.org 4155 : 28640 : pertrans->serialfn_oid = aggserialfn;
4156 : 28640 : pertrans->deserialfn_oid = aggdeserialfn;
3787 heikki.linnakangas@i 4157 : 28640 : pertrans->initValue = initValue;
4158 : 28640 : pertrans->initValueIsNull = initValueIsNull;
4159 : :
4160 : : /* Count the "direct" arguments, if any */
4161 : 28640 : numDirectArgs = list_length(aggref->aggdirectargs);
4162 : :
4163 : : /* Count the number of aggregated input columns */
4164 : 28640 : pertrans->numInputs = numInputs = list_length(aggref->args);
4165 : :
4166 : 28640 : pertrans->aggtranstype = aggtranstype;
4167 : :
4168 : : /* account for the current transition state */
1626 drowley@postgresql.o 4169 : 28640 : numTransArgs = pertrans->numTransInputs + 1;
4170 : :
4171 : : /*
4172 : : * Set up infrastructure for calling the transfn. Note that invtransfn is
4173 : : * not needed here.
4174 : : */
4175 : 28640 : build_aggregate_transfn_expr(inputTypes,
4176 : : numArguments,
4177 : : numDirectArgs,
4178 : 28640 : aggref->aggvariadic,
4179 : : aggtranstype,
4180 : : aggref->inputcollid,
4181 : : transfn_oid,
4182 : : InvalidOid,
4183 : : &transfnexpr,
4184 : : NULL);
4185 : :
4186 : 28640 : fmgr_info(transfn_oid, &pertrans->transfn);
4187 : 28640 : fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
4188 : :
4189 : 28640 : pertrans->transfn_fcinfo =
4190 : 28640 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(numTransArgs));
4191 : 28640 : InitFunctionCallInfoData(*pertrans->transfn_fcinfo,
4192 : : &pertrans->transfn,
4193 : : numTransArgs,
4194 : : pertrans->aggCollation,
4195 : : (Node *) aggstate, NULL);
4196 : :
4197 : : /* get info about the state value's datatype */
3787 heikki.linnakangas@i 4198 : 28640 : get_typlenbyval(aggtranstype,
4199 : : &pertrans->transtypeLen,
4200 : : &pertrans->transtypeByVal);
4201 : :
3549 rhaas@postgresql.org 4202 [ + + ]: 28640 : if (OidIsValid(aggserialfn))
4203 : : {
3464 tgl@sss.pgh.pa.us 4204 : 168 : build_aggregate_serialfn_expr(aggserialfn,
4205 : : &serialfnexpr);
3549 rhaas@postgresql.org 4206 : 168 : fmgr_info(aggserialfn, &pertrans->serialfn);
4207 : 168 : fmgr_info_set_expr((Node *) serialfnexpr, &pertrans->serialfn);
4208 : :
2516 andres@anarazel.de 4209 : 168 : pertrans->serialfn_fcinfo =
4210 : 168 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(1));
4211 : 168 : InitFunctionCallInfoData(*pertrans->serialfn_fcinfo,
4212 : : &pertrans->serialfn,
4213 : : 1,
4214 : : InvalidOid,
4215 : : (Node *) aggstate, NULL);
4216 : : }
4217 : :
3549 rhaas@postgresql.org 4218 [ + + ]: 28640 : if (OidIsValid(aggdeserialfn))
4219 : : {
3464 tgl@sss.pgh.pa.us 4220 : 60 : build_aggregate_deserialfn_expr(aggdeserialfn,
4221 : : &deserialfnexpr);
3549 rhaas@postgresql.org 4222 : 60 : fmgr_info(aggdeserialfn, &pertrans->deserialfn);
4223 : 60 : fmgr_info_set_expr((Node *) deserialfnexpr, &pertrans->deserialfn);
4224 : :
2516 andres@anarazel.de 4225 : 60 : pertrans->deserialfn_fcinfo =
4226 : 60 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(2));
4227 : 60 : InitFunctionCallInfoData(*pertrans->deserialfn_fcinfo,
4228 : : &pertrans->deserialfn,
4229 : : 2,
4230 : : InvalidOid,
4231 : : (Node *) aggstate, NULL);
4232 : : }
4233 : :
4234 : : /*
4235 : : * If we're doing either DISTINCT or ORDER BY for a plain agg, then we
4236 : : * have a list of SortGroupClause nodes; fish out the data in them and
4237 : : * stick them into arrays. We ignore ORDER BY for an ordered-set agg,
4238 : : * however; the agg's transfn and finalfn are responsible for that.
4239 : : *
4240 : : * When the planner has set the aggpresorted flag, the input to the
4241 : : * aggregate is already correctly sorted. For ORDER BY aggregates we can
4242 : : * simply treat these as normal aggregates. For presorted DISTINCT
4243 : : * aggregates an extra step must be added to remove duplicate consecutive
4244 : : * inputs.
4245 : : *
4246 : : * Note that by construction, if there is a DISTINCT clause then the ORDER
4247 : : * BY clause is a prefix of it (see transformDistinctClause).
4248 : : */
3787 heikki.linnakangas@i 4249 [ + + ]: 28640 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
4250 : : {
4251 : 126 : sortlist = NIL;
4252 : 126 : numSortCols = numDistinctCols = 0;
1232 drowley@postgresql.o 4253 : 126 : pertrans->aggsortrequired = false;
4254 : : }
4255 [ + + + + ]: 28514 : else if (aggref->aggpresorted && aggref->aggdistinct == NIL)
4256 : : {
4257 : 1065 : sortlist = NIL;
4258 : 1065 : numSortCols = numDistinctCols = 0;
4259 : 1065 : pertrans->aggsortrequired = false;
4260 : : }
3787 heikki.linnakangas@i 4261 [ + + ]: 27449 : else if (aggref->aggdistinct)
4262 : : {
4263 : 294 : sortlist = aggref->aggdistinct;
4264 : 294 : numSortCols = numDistinctCols = list_length(sortlist);
4265 [ - + ]: 294 : Assert(numSortCols >= list_length(aggref->aggorder));
1232 drowley@postgresql.o 4266 : 294 : pertrans->aggsortrequired = !aggref->aggpresorted;
4267 : : }
4268 : : else
4269 : : {
3787 heikki.linnakangas@i 4270 : 27155 : sortlist = aggref->aggorder;
4271 : 27155 : numSortCols = list_length(sortlist);
4272 : 27155 : numDistinctCols = 0;
1232 drowley@postgresql.o 4273 : 27155 : pertrans->aggsortrequired = (numSortCols > 0);
4274 : : }
4275 : :
3787 heikki.linnakangas@i 4276 : 28640 : pertrans->numSortCols = numSortCols;
4277 : 28640 : pertrans->numDistinctCols = numDistinctCols;
4278 : :
4279 : : /*
4280 : : * If we have either sorting or filtering to do, create a tupledesc and
4281 : : * slot corresponding to the aggregated inputs (including sort
4282 : : * expressions) of the agg.
4283 : : */
2983 tgl@sss.pgh.pa.us 4284 [ + + + + ]: 28640 : if (numSortCols > 0 || aggref->aggfilter)
4285 : : {
2583 andres@anarazel.de 4286 : 718 : pertrans->sortdesc = ExecTypeFromTL(aggref->args);
2860 4287 : 718 : pertrans->sortslot =
2588 4288 : 718 : ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
4289 : : &TTSOpsMinimalTuple);
4290 : : }
4291 : :
2983 tgl@sss.pgh.pa.us 4292 [ + + ]: 28640 : if (numSortCols > 0)
4293 : : {
4294 : : /*
4295 : : * We don't implement DISTINCT or ORDER BY aggs in the HASHED case
4296 : : * (yet)
4297 : : */
3186 rhodiumtoad@postgres 4298 [ + - - + ]: 363 : Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED);
4299 : :
4300 : : /* ORDER BY aggregates are not supported with partial aggregation */
1626 drowley@postgresql.o 4301 [ - + ]: 363 : Assert(!DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
4302 : :
4303 : : /* If we have only one input, we need its len/byval info. */
3787 heikki.linnakangas@i 4304 [ + + ]: 363 : if (numInputs == 1)
4305 : : {
4306 : 288 : get_typlenbyval(inputTypes[numDirectArgs],
4307 : : &pertrans->inputtypeLen,
4308 : : &pertrans->inputtypeByVal);
4309 : : }
4310 [ + + ]: 75 : else if (numDistinctCols > 0)
4311 : : {
4312 : : /* we will need an extra slot to store prior values */
2860 andres@anarazel.de 4313 : 54 : pertrans->uniqslot =
2588 4314 : 54 : ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
4315 : : &TTSOpsMinimalTuple);
4316 : : }
4317 : :
4318 : : /* Extract the sort information for use later */
3787 heikki.linnakangas@i 4319 : 363 : pertrans->sortColIdx =
4320 : 363 : (AttrNumber *) palloc(numSortCols * sizeof(AttrNumber));
4321 : 363 : pertrans->sortOperators =
4322 : 363 : (Oid *) palloc(numSortCols * sizeof(Oid));
4323 : 363 : pertrans->sortCollations =
4324 : 363 : (Oid *) palloc(numSortCols * sizeof(Oid));
4325 : 363 : pertrans->sortNullsFirst =
4326 : 363 : (bool *) palloc(numSortCols * sizeof(bool));
4327 : :
4328 : 363 : i = 0;
4329 [ + - + + : 825 : foreach(lc, sortlist)
+ + ]
4330 : : {
4331 : 462 : SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc);
4332 : 462 : TargetEntry *tle = get_sortgroupclause_tle(sortcl, aggref->args);
4333 : :
4334 : : /* the parser should have made sure of this */
4335 [ - + ]: 462 : Assert(OidIsValid(sortcl->sortop));
4336 : :
4337 : 462 : pertrans->sortColIdx[i] = tle->resno;
4338 : 462 : pertrans->sortOperators[i] = sortcl->sortop;
4339 : 462 : pertrans->sortCollations[i] = exprCollation((Node *) tle->expr);
4340 : 462 : pertrans->sortNullsFirst[i] = sortcl->nulls_first;
4341 : 462 : i++;
4342 : : }
4343 [ - + ]: 363 : Assert(i == numSortCols);
4344 : : }
4345 : :
4346 [ + + ]: 28640 : if (aggref->aggdistinct)
4347 : : {
4348 : : Oid *ops;
4349 : :
4350 [ - + ]: 294 : Assert(numArguments > 0);
2861 andres@anarazel.de 4351 [ - + ]: 294 : Assert(list_length(aggref->aggdistinct) == numDistinctCols);
4352 : :
4353 : 294 : ops = palloc(numDistinctCols * sizeof(Oid));
4354 : :
3787 heikki.linnakangas@i 4355 : 294 : i = 0;
4356 [ + - + + : 678 : foreach(lc, aggref->aggdistinct)
+ + ]
2861 andres@anarazel.de 4357 : 384 : ops[i++] = ((SortGroupClause *) lfirst(lc))->eqop;
4358 : :
4359 : : /* lookup / build the necessary comparators */
4360 [ + + ]: 294 : if (numDistinctCols == 1)
4361 : 240 : fmgr_info(get_opcode(ops[0]), &pertrans->equalfnOne);
4362 : : else
4363 : 54 : pertrans->equalfnMulti =
4364 : 54 : execTuplesMatchPrepare(pertrans->sortdesc,
4365 : : numDistinctCols,
4366 : 54 : pertrans->sortColIdx,
4367 : : ops,
2461 peter@eisentraut.org 4368 : 54 : pertrans->sortCollations,
4369 : : &aggstate->ss.ps);
2861 andres@anarazel.de 4370 : 294 : pfree(ops);
4371 : : }
4372 : :
6 michael@paquier.xyz 4373 :GNC 28640 : pertrans->sortstates = palloc0_array(Tuplesortstate *, numGroupingSets);
10752 scrappy@hub.org 4374 :CBC 28640 : }
4375 : :
4376 : :
4377 : : static Datum
8650 tgl@sss.pgh.pa.us 4378 : 13540 : GetAggInitVal(Datum textInitVal, Oid transtype)
4379 : : {
4380 : : Oid typinput,
4381 : : typioparam;
4382 : : char *strInitVal;
4383 : : Datum initVal;
4384 : :
7863 4385 : 13540 : getTypeInputInfo(transtype, &typinput, &typioparam);
6475 4386 : 13540 : strInitVal = TextDatumGetCString(textInitVal);
7196 4387 : 13540 : initVal = OidInputFunctionCall(typinput, strInitVal,
4388 : : typioparam, -1);
8650 4389 : 13540 : pfree(strInitVal);
4390 : 13540 : return initVal;
4391 : : }
4392 : :
4393 : : void
8412 4394 : 25905 : ExecEndAgg(AggState *node)
4395 : : {
4396 : : PlanState *outerPlan;
4397 : : int transno;
3867 andres@anarazel.de 4398 : 25905 : int numGroupingSets = Max(node->maxsets, 1);
4399 : : int setno;
4400 : :
4401 : : /*
4402 : : * When ending a parallel worker, copy the statistics gathered by the
4403 : : * worker back into shared memory so that it can be picked up by the main
4404 : : * process to report in EXPLAIN ANALYZE.
4405 : : */
2006 drowley@postgresql.o 4406 [ + + + + ]: 25905 : if (node->shared_info && IsParallelWorker())
4407 : : {
4408 : : AggregateInstrumentation *si;
4409 : :
4410 [ - + ]: 84 : Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
4411 : 84 : si = &node->shared_info->sinstrument[ParallelWorkerNumber];
4412 : 84 : si->hash_batches_used = node->hash_batches_used;
4413 : 84 : si->hash_disk_used = node->hash_disk_used;
4414 : 84 : si->hash_mem_peak = node->hash_mem_peak;
4415 : : }
4416 : :
4417 : : /* Make sure we have closed any open tuplesorts */
4418 : :
3867 andres@anarazel.de 4419 [ + + ]: 25905 : if (node->sort_in)
4420 : 81 : tuplesort_end(node->sort_in);
4421 [ + + ]: 25905 : if (node->sort_out)
4422 : 24 : tuplesort_end(node->sort_out);
4423 : :
2099 jdavis@postgresql.or 4424 : 25905 : hashagg_reset_spill_state(node);
4425 : :
4426 : : /* Release hash tables too */
4427 [ + + ]: 25905 : if (node->hash_metacxt != NULL)
4428 : : {
4429 : 3514 : MemoryContextDelete(node->hash_metacxt);
4430 : 3514 : node->hash_metacxt = NULL;
4431 : : }
47 tgl@sss.pgh.pa.us 4432 [ + + ]:GNC 25905 : if (node->hash_tuplescxt != NULL)
4433 : : {
4434 : 3514 : MemoryContextDelete(node->hash_tuplescxt);
4435 : 3514 : node->hash_tuplescxt = NULL;
4436 : : }
4437 : :
3787 heikki.linnakangas@i 4438 [ + + ]:CBC 54447 : for (transno = 0; transno < node->numtrans; transno++)
4439 : : {
4440 : 28542 : AggStatePerTrans pertrans = &node->pertrans[transno];
4441 : :
3867 andres@anarazel.de 4442 [ + + ]: 57603 : for (setno = 0; setno < numGroupingSets; setno++)
4443 : : {
3787 heikki.linnakangas@i 4444 [ - + ]: 29061 : if (pertrans->sortstates[setno])
3787 heikki.linnakangas@i 4445 :UBC 0 : tuplesort_end(pertrans->sortstates[setno]);
4446 : : }
4447 : : }
4448 : :
4449 : : /* And ensure any agg shutdown callbacks have been called */
3867 andres@anarazel.de 4450 [ + + ]:CBC 52242 : for (setno = 0; setno < numGroupingSets; setno++)
4451 : 26337 : ReScanExprContext(node->aggcontexts[setno]);
3186 rhodiumtoad@postgres 4452 [ + + ]: 25905 : if (node->hashcontext)
4453 : 3514 : ReScanExprContext(node->hashcontext);
4454 : :
8412 tgl@sss.pgh.pa.us 4455 : 25905 : outerPlan = outerPlanState(node);
4456 : 25905 : ExecEndNode(outerPlan);
10752 scrappy@hub.org 4457 : 25905 : }
4458 : :
4459 : : void
5636 tgl@sss.pgh.pa.us 4460 : 27346 : ExecReScanAgg(AggState *node)
4461 : : {
8412 4462 : 27346 : ExprContext *econtext = node->ss.ps.ps_ExprContext;
3860 bruce@momjian.us 4463 : 27346 : PlanState *outerPlan = outerPlanState(node);
3867 andres@anarazel.de 4464 : 27346 : Agg *aggnode = (Agg *) node->ss.ps.plan;
4465 : : int transno;
3860 bruce@momjian.us 4466 : 27346 : int numGroupingSets = Max(node->maxsets, 1);
4467 : : int setno;
4468 : :
8236 tgl@sss.pgh.pa.us 4469 : 27346 : node->agg_done = false;
4470 : :
3186 rhodiumtoad@postgres 4471 [ + + ]: 27346 : if (node->aggstrategy == AGG_HASHED)
4472 : : {
4473 : : /*
4474 : : * In the hashed case, if we haven't yet built the hash table then we
4475 : : * can just return; nothing done yet, so nothing to undo. If subnode's
4476 : : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
4477 : : * else no reason to re-scan it at all.
4478 : : */
8236 tgl@sss.pgh.pa.us 4479 [ + + ]: 6823 : if (!node->table_filled)
4480 : 76 : return;
4481 : :
4482 : : /*
4483 : : * If we do have the hash table, and it never spilled, and the subplan
4484 : : * does not have any parameter changes, and none of our own parameter
4485 : : * changes affect input expressions of the aggregated functions, then
4486 : : * we can just rescan the existing hash table; no need to build it
4487 : : * again.
4488 : : */
2099 jdavis@postgresql.or 4489 [ + + + - ]: 6747 : if (outerPlan->chgParam == NULL && !node->hash_ever_spilled &&
3401 tgl@sss.pgh.pa.us 4490 [ + + ]: 445 : !bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
4491 : : {
3186 rhodiumtoad@postgres 4492 : 433 : ResetTupleHashIterator(node->perhash[0].hashtable,
4493 : : &node->perhash[0].hashiter);
4494 : 433 : select_current_set(node, 0, true);
8236 tgl@sss.pgh.pa.us 4495 : 433 : return;
4496 : : }
4497 : : }
4498 : :
4499 : : /* Make sure we have closed any open tuplesorts */
3787 heikki.linnakangas@i 4500 [ + + ]: 60914 : for (transno = 0; transno < node->numtrans; transno++)
4501 : : {
3867 andres@anarazel.de 4502 [ + + ]: 68172 : for (setno = 0; setno < numGroupingSets; setno++)
4503 : : {
3787 heikki.linnakangas@i 4504 : 34095 : AggStatePerTrans pertrans = &node->pertrans[transno];
4505 : :
4506 [ - + ]: 34095 : if (pertrans->sortstates[setno])
4507 : : {
3787 heikki.linnakangas@i 4508 :UBC 0 : tuplesort_end(pertrans->sortstates[setno]);
4509 : 0 : pertrans->sortstates[setno] = NULL;
4510 : : }
4511 : : }
4512 : : }
4513 : :
4514 : : /*
4515 : : * We don't need to ReScanExprContext the output tuple context here;
4516 : : * ExecReScan already did it. But we do need to reset our per-grouping-set
4517 : : * contexts, which may have transvalues stored in them. (We use rescan
4518 : : * rather than just reset because transfns may have registered callbacks
4519 : : * that need to be run now.) For the AGG_HASHED case, see below.
4520 : : */
4521 : :
3867 andres@anarazel.de 4522 [ + + ]:CBC 53692 : for (setno = 0; setno < numGroupingSets; setno++)
4523 : : {
4524 : 26855 : ReScanExprContext(node->aggcontexts[setno]);
4525 : : }
4526 : :
4527 : : /* Release first tuple of group, if we have made a copy */
8412 tgl@sss.pgh.pa.us 4528 [ - + ]: 26837 : if (node->grp_firstTuple != NULL)
4529 : : {
8412 tgl@sss.pgh.pa.us 4530 :UBC 0 : heap_freetuple(node->grp_firstTuple);
4531 : 0 : node->grp_firstTuple = NULL;
4532 : : }
3867 andres@anarazel.de 4533 :CBC 26837 : ExecClearTuple(node->ss.ss_ScanTupleSlot);
4534 : :
4535 : : /* Forget current agg values */
8412 tgl@sss.pgh.pa.us 4536 [ + - + - : 60914 : MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numaggs);
+ - + - +
+ ]
4537 [ + - + + : 26837 : MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
+ - + - -
+ ]
4538 : :
4539 : : /*
4540 : : * With AGG_HASHED/MIXED, the hash table is allocated in a sub-context of
4541 : : * the hashcontext. This used to be an issue, but now, resetting a context
4542 : : * automatically deletes sub-contexts too.
4543 : : */
3186 rhodiumtoad@postgres 4544 [ + + + + ]: 26837 : if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
4545 : : {
2099 jdavis@postgresql.or 4546 : 6329 : hashagg_reset_spill_state(node);
4547 : :
4548 : 6329 : node->hash_ever_spilled = false;
4549 : 6329 : node->hash_spill_mode = false;
4550 : 6329 : node->hash_ngroups_current = 0;
4551 : :
3186 rhodiumtoad@postgres 4552 : 6329 : ReScanExprContext(node->hashcontext);
4553 : : /* Rebuild empty hash table(s) */
2127 jdavis@postgresql.or 4554 : 6329 : build_hash_tables(node);
8412 tgl@sss.pgh.pa.us 4555 : 6329 : node->table_filled = false;
4556 : : /* iterator will be reset when the table is filled */
4557 : :
2099 jdavis@postgresql.or 4558 : 6329 : hashagg_recompile_expressions(node, false, false);
4559 : : }
4560 : :
3186 rhodiumtoad@postgres 4561 [ + + ]: 26837 : if (node->aggstrategy != AGG_HASHED)
4562 : : {
4563 : : /*
4564 : : * Reset the per-group state (in particular, mark transvalues null)
4565 : : */
2905 andres@anarazel.de 4566 [ + + ]: 41064 : for (setno = 0; setno < numGroupingSets; setno++)
4567 : : {
4568 [ + - + - : 88683 : MemSet(node->pergroups[setno], 0,
+ - + - +
+ ]
4569 : : sizeof(AggStatePerGroupData) * node->numaggs);
4570 : : }
4571 : :
4572 : : /* reset to phase 1 */
3186 rhodiumtoad@postgres 4573 : 20523 : initialize_phase(node, 1);
4574 : :
3867 andres@anarazel.de 4575 : 20523 : node->input_done = false;
4576 : 20523 : node->projected_set = -1;
4577 : : }
4578 : :
3879 rhaas@postgresql.org 4579 [ + + ]: 26837 : if (outerPlan->chgParam == NULL)
4580 : 94 : ExecReScan(outerPlan);
4581 : : }
4582 : :
4583 : :
4584 : : /***********************************************************************
4585 : : * API exposed to aggregate functions
4586 : : ***********************************************************************/
4587 : :
4588 : :
4589 : : /*
4590 : : * AggCheckCallContext - test if a SQL function is being called as an aggregate
4591 : : *
4592 : : * The transition and/or final functions of an aggregate may want to verify
4593 : : * that they are being called as aggregates, rather than as plain SQL
4594 : : * functions. They should use this function to do so. The return value
4595 : : * is nonzero if being called as an aggregate, or zero if not. (Specific
4596 : : * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
4597 : : * values could conceivably appear in future.)
4598 : : *
4599 : : * If aggcontext isn't NULL, the function also stores at *aggcontext the
4600 : : * identity of the memory context that aggregate transition values are being
4601 : : * stored in. Note that the same aggregate call site (flinfo) may be called
4602 : : * interleaved on different transition values in different contexts, so it's
4603 : : * not kosher to cache aggcontext under fn_extra. It is, however, kosher to
4604 : : * cache it in the transvalue itself (for internal-type transvalues).
4605 : : */
4606 : : int
5790 tgl@sss.pgh.pa.us 4607 : 2706439 : AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
4608 : : {
4609 [ + + + + ]: 2706439 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4610 : : {
4611 [ + + ]: 2700552 : if (aggcontext)
4612 : : {
3860 bruce@momjian.us 4613 : 1284488 : AggState *aggstate = ((AggState *) fcinfo->context);
3186 rhodiumtoad@postgres 4614 : 1284488 : ExprContext *cxt = aggstate->curaggcontext;
4615 : :
3867 andres@anarazel.de 4616 : 1284488 : *aggcontext = cxt->ecxt_per_tuple_memory;
4617 : : }
5790 tgl@sss.pgh.pa.us 4618 : 2700552 : return AGG_CONTEXT_AGGREGATE;
4619 : : }
4620 [ + + + - ]: 5887 : if (fcinfo->context && IsA(fcinfo->context, WindowAggState))
4621 : : {
4622 [ + + ]: 4950 : if (aggcontext)
4266 4623 : 415 : *aggcontext = ((WindowAggState *) fcinfo->context)->curaggcontext;
5790 4624 : 4950 : return AGG_CONTEXT_WINDOW;
4625 : : }
4626 : :
4627 : : /* this is just to prevent "uninitialized variable" warnings */
4628 [ + + ]: 937 : if (aggcontext)
4629 : 913 : *aggcontext = NULL;
4630 : 937 : return 0;
4631 : : }
4632 : :
4633 : : /*
4634 : : * AggGetAggref - allow an aggregate support function to get its Aggref
4635 : : *
4636 : : * If the function is being called as an aggregate support function,
4637 : : * return the Aggref node for the aggregate call. Otherwise, return NULL.
4638 : : *
4639 : : * Aggregates sharing the same inputs and transition functions can get
4640 : : * merged into a single transition calculation. If the transition function
4641 : : * calls AggGetAggref, it will get some one of the Aggrefs for which it is
4642 : : * executing. It must therefore not pay attention to the Aggref fields that
4643 : : * relate to the final function, as those are indeterminate. But if a final
4644 : : * function calls AggGetAggref, it will get a precise result.
4645 : : *
4646 : : * Note that if an aggregate is being used as a window function, this will
4647 : : * return NULL. We could provide a similar function to return the relevant
4648 : : * WindowFunc node in such cases, but it's not needed yet.
4649 : : */
4650 : : Aggref *
4376 4651 : 123 : AggGetAggref(FunctionCallInfo fcinfo)
4652 : : {
4653 [ + - + - ]: 123 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4654 : : {
2983 4655 : 123 : AggState *aggstate = (AggState *) fcinfo->context;
4656 : : AggStatePerAgg curperagg;
4657 : : AggStatePerTrans curpertrans;
4658 : :
4659 : : /* check curperagg (valid when in a final function) */
4660 : 123 : curperagg = aggstate->curperagg;
4661 : :
2987 4662 [ - + ]: 123 : if (curperagg)
2987 tgl@sss.pgh.pa.us 4663 :UBC 0 : return curperagg->aggref;
4664 : :
4665 : : /* check curpertrans (valid when in a transition function) */
2983 tgl@sss.pgh.pa.us 4666 :CBC 123 : curpertrans = aggstate->curpertrans;
4667 : :
3787 heikki.linnakangas@i 4668 [ + - ]: 123 : if (curpertrans)
4669 : 123 : return curpertrans->aggref;
4670 : : }
4376 tgl@sss.pgh.pa.us 4671 :UBC 0 : return NULL;
4672 : : }
4673 : :
4674 : : /*
4675 : : * AggGetTempMemoryContext - fetch short-term memory context for aggregates
4676 : : *
4677 : : * This is useful in agg final functions; the context returned is one that
4678 : : * the final function can safely reset as desired. This isn't useful for
4679 : : * transition functions, since the context returned MAY (we don't promise)
4680 : : * be the same as the context those are called in.
4681 : : *
4682 : : * As above, this is currently not useful for aggs called as window functions.
4683 : : */
4684 : : MemoryContext
4184 4685 : 0 : AggGetTempMemoryContext(FunctionCallInfo fcinfo)
4686 : : {
4376 4687 [ # # # # ]: 0 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4688 : : {
4689 : 0 : AggState *aggstate = (AggState *) fcinfo->context;
4690 : :
4184 4691 : 0 : return aggstate->tmpcontext->ecxt_per_tuple_memory;
4692 : : }
4376 4693 : 0 : return NULL;
4694 : : }
4695 : :
4696 : : /*
4697 : : * AggStateIsShared - find out whether transition state is shared
4698 : : *
4699 : : * If the function is being called as an aggregate support function,
4700 : : * return true if the aggregate's transition state is shared across
4701 : : * multiple aggregates, false if it is not.
4702 : : *
4703 : : * Returns true if not called as an aggregate support function.
4704 : : * This is intended as a conservative answer, ie "no you'd better not
4705 : : * scribble on your input". In particular, will return true if the
4706 : : * aggregate is being used as a window function, which is a scenario
4707 : : * in which changing the transition state is a bad idea. We might
4708 : : * want to refine the behavior for the window case in future.
4709 : : */
4710 : : bool
2983 tgl@sss.pgh.pa.us 4711 :CBC 123 : AggStateIsShared(FunctionCallInfo fcinfo)
4712 : : {
4713 [ + - + - ]: 123 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4714 : : {
4715 : 123 : AggState *aggstate = (AggState *) fcinfo->context;
4716 : : AggStatePerAgg curperagg;
4717 : : AggStatePerTrans curpertrans;
4718 : :
4719 : : /* check curperagg (valid when in a final function) */
4720 : 123 : curperagg = aggstate->curperagg;
4721 : :
4722 [ - + ]: 123 : if (curperagg)
2983 tgl@sss.pgh.pa.us 4723 :UBC 0 : return aggstate->pertrans[curperagg->transno].aggshared;
4724 : :
4725 : : /* check curpertrans (valid when in a transition function) */
2983 tgl@sss.pgh.pa.us 4726 :CBC 123 : curpertrans = aggstate->curpertrans;
4727 : :
4728 [ + - ]: 123 : if (curpertrans)
4729 : 123 : return curpertrans->aggshared;
4730 : : }
2983 tgl@sss.pgh.pa.us 4731 :UBC 0 : return true;
4732 : : }
4733 : :
4734 : : /*
4735 : : * AggRegisterCallback - register a cleanup callback for an aggregate
4736 : : *
4737 : : * This is useful for aggs to register shutdown callbacks, which will ensure
4738 : : * that non-memory resources are freed. The callback will occur just before
4739 : : * the associated aggcontext (as returned by AggCheckCallContext) is reset,
4740 : : * either between groups or as a result of rescanning the query. The callback
4741 : : * will NOT be called on error paths. The typical use-case is for freeing of
4742 : : * tuplestores or tuplesorts maintained in aggcontext, or pins held by slots
4743 : : * created by the agg functions. (The callback will not be called until after
4744 : : * the result of the finalfn is no longer needed, so it's safe for the finalfn
4745 : : * to return data that will be freed by the callback.)
4746 : : *
4747 : : * As above, this is currently not useful for aggs called as window functions.
4748 : : */
4749 : : void
4184 tgl@sss.pgh.pa.us 4750 :CBC 330 : AggRegisterCallback(FunctionCallInfo fcinfo,
4751 : : ExprContextCallbackFunction func,
4752 : : Datum arg)
4753 : : {
4376 4754 [ + - + - ]: 330 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4755 : : {
4756 : 330 : AggState *aggstate = (AggState *) fcinfo->context;
3186 rhodiumtoad@postgres 4757 : 330 : ExprContext *cxt = aggstate->curaggcontext;
4758 : :
3867 andres@anarazel.de 4759 : 330 : RegisterExprContextCallback(cxt, func, arg);
4760 : :
4184 tgl@sss.pgh.pa.us 4761 : 330 : return;
4762 : : }
4184 tgl@sss.pgh.pa.us 4763 [ # # ]:UBC 0 : elog(ERROR, "aggregate function cannot register a callback in this context");
4764 : : }
4765 : :
4766 : :
4767 : : /* ----------------------------------------------------------------
4768 : : * Parallel Query Support
4769 : : * ----------------------------------------------------------------
4770 : : */
4771 : :
4772 : : /* ----------------------------------------------------------------
4773 : : * ExecAggEstimate
4774 : : *
4775 : : * Estimate space required to propagate aggregate statistics.
4776 : : * ----------------------------------------------------------------
4777 : : */
4778 : : void
2006 drowley@postgresql.o 4779 :CBC 293 : ExecAggEstimate(AggState *node, ParallelContext *pcxt)
4780 : : {
4781 : : Size size;
4782 : :
4783 : : /* don't need this if not instrumenting or no workers */
4784 [ + + - + ]: 293 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
4785 : 242 : return;
4786 : :
4787 : 51 : size = mul_size(pcxt->nworkers, sizeof(AggregateInstrumentation));
4788 : 51 : size = add_size(size, offsetof(SharedAggInfo, sinstrument));
4789 : 51 : shm_toc_estimate_chunk(&pcxt->estimator, size);
4790 : 51 : shm_toc_estimate_keys(&pcxt->estimator, 1);
4791 : : }
4792 : :
4793 : : /* ----------------------------------------------------------------
4794 : : * ExecAggInitializeDSM
4795 : : *
4796 : : * Initialize DSM space for aggregate statistics.
4797 : : * ----------------------------------------------------------------
4798 : : */
4799 : : void
4800 : 293 : ExecAggInitializeDSM(AggState *node, ParallelContext *pcxt)
4801 : : {
4802 : : Size size;
4803 : :
4804 : : /* don't need this if not instrumenting or no workers */
4805 [ + + - + ]: 293 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
4806 : 242 : return;
4807 : :
4808 : 51 : size = offsetof(SharedAggInfo, sinstrument)
4809 : 51 : + pcxt->nworkers * sizeof(AggregateInstrumentation);
4810 : 51 : node->shared_info = shm_toc_allocate(pcxt->toc, size);
4811 : : /* ensure any unfilled slots will contain zeroes */
4812 : 51 : memset(node->shared_info, 0, size);
4813 : 51 : node->shared_info->num_workers = pcxt->nworkers;
4814 : 51 : shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
4815 : 51 : node->shared_info);
4816 : : }
4817 : :
4818 : : /* ----------------------------------------------------------------
4819 : : * ExecAggInitializeWorker
4820 : : *
4821 : : * Attach worker to DSM space for aggregate statistics.
4822 : : * ----------------------------------------------------------------
4823 : : */
4824 : : void
4825 : 830 : ExecAggInitializeWorker(AggState *node, ParallelWorkerContext *pwcxt)
4826 : : {
4827 : 830 : node->shared_info =
4828 : 830 : shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
4829 : 830 : }
4830 : :
4831 : : /* ----------------------------------------------------------------
4832 : : * ExecAggRetrieveInstrumentation
4833 : : *
4834 : : * Transfer aggregate statistics from DSM to private memory.
4835 : : * ----------------------------------------------------------------
4836 : : */
4837 : : void
4838 : 51 : ExecAggRetrieveInstrumentation(AggState *node)
4839 : : {
4840 : : Size size;
4841 : : SharedAggInfo *si;
4842 : :
4843 [ - + ]: 51 : if (node->shared_info == NULL)
2006 drowley@postgresql.o 4844 :UBC 0 : return;
4845 : :
2006 drowley@postgresql.o 4846 :CBC 51 : size = offsetof(SharedAggInfo, sinstrument)
4847 : 51 : + node->shared_info->num_workers * sizeof(AggregateInstrumentation);
4848 : 51 : si = palloc(size);
4849 : 51 : memcpy(si, node->shared_info, size);
4850 : 51 : node->shared_info = si;
4851 : : }
|