Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeHash.c
4 : : * Routines to hash relations for hashjoin
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeHash.c
12 : : *
13 : : * See note on parallelism in nodeHashjoin.c.
14 : : *
15 : : *-------------------------------------------------------------------------
16 : : */
17 : : /*
18 : : * INTERFACE ROUTINES
19 : : * MultiExecHash - generate an in-memory hash table of the relation
20 : : * ExecInitHash - initialize node and subnodes
21 : : * ExecEndHash - shutdown node and subnodes
22 : : */
23 : :
24 : : #include "postgres.h"
25 : :
26 : : #include <math.h>
27 : : #include <limits.h>
28 : :
29 : : #include "access/htup_details.h"
30 : : #include "access/parallel.h"
31 : : #include "catalog/pg_statistic.h"
32 : : #include "commands/tablespace.h"
33 : : #include "executor/executor.h"
34 : : #include "executor/hashjoin.h"
35 : : #include "executor/nodeHash.h"
36 : : #include "executor/nodeHashjoin.h"
37 : : #include "miscadmin.h"
38 : : #include "port/pg_bitutils.h"
39 : : #include "utils/lsyscache.h"
40 : : #include "utils/memutils.h"
41 : : #include "utils/syscache.h"
42 : : #include "utils/wait_event.h"
43 : :
44 : : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
45 : : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
46 : : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
47 : : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
48 : : static void ExecHashBuildSkewHash(HashState *hashstate,
49 : : HashJoinTable hashtable, Hash *node,
50 : : int mcvsToUse);
51 : : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
52 : : TupleTableSlot *slot,
53 : : uint32 hashvalue,
54 : : int bucketNumber);
55 : : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
56 : :
57 : : static void *dense_alloc(HashJoinTable hashtable, Size size);
58 : : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
59 : : size_t size,
60 : : dsa_pointer *shared);
61 : : static void MultiExecPrivateHash(HashState *node);
62 : : static void MultiExecParallelHash(HashState *node);
63 : : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
64 : : int bucketno);
65 : : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
66 : : HashJoinTuple tuple);
67 : : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
68 : : HashJoinTuple tuple,
69 : : dsa_pointer tuple_shared);
70 : : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
71 : : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
72 : : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
73 : : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
74 : : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
75 : : dsa_pointer *shared);
76 : : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
77 : : int batchno,
78 : : size_t size);
79 : : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
80 : : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
81 : :
82 : :
83 : : /* ----------------------------------------------------------------
84 : : * ExecHash
85 : : *
86 : : * stub for pro forma compliance
87 : : * ----------------------------------------------------------------
88 : : */
89 : : static TupleTableSlot *
3024 andres@anarazel.de 90 :UBC 0 : ExecHash(PlanState *pstate)
91 : : {
7337 tgl@sss.pgh.pa.us 92 [ # # ]: 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
93 : : return NULL;
94 : : }
95 : :
96 : : /* ----------------------------------------------------------------
97 : : * MultiExecHash
98 : : *
99 : : * build hash table for hashjoin, doing partitioning if more
100 : : * than one batch is required.
101 : : * ----------------------------------------------------------------
102 : : */
103 : : Node *
7499 tgl@sss.pgh.pa.us 104 :CBC 13044 : MultiExecHash(HashState *node)
105 : : {
106 : : /* must provide our own instrumentation support */
2868 andres@anarazel.de 107 [ + + ]: 13044 : if (node->ps.instrument)
108 : 168 : InstrStartNode(node->ps.instrument);
109 : :
110 [ + + ]: 13044 : if (node->parallel_state != NULL)
111 : 207 : MultiExecParallelHash(node);
112 : : else
113 : 12837 : MultiExecPrivateHash(node);
114 : :
115 : : /* must provide our own instrumentation support */
116 [ + + ]: 13042 : if (node->ps.instrument)
117 : 168 : InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
118 : :
119 : : /*
120 : : * We do not return the hash table directly because it's not a subtype of
121 : : * Node, and so would violate the MultiExecProcNode API. Instead, our
122 : : * parent Hashjoin node is expected to know how to fish it out of our node
123 : : * state. Ugly but not really worth cleaning up, since Hashjoin knows
124 : : * quite a bit more about Hash besides that.
125 : : */
126 : 13042 : return NULL;
127 : : }
128 : :
129 : : /* ----------------------------------------------------------------
130 : : * MultiExecPrivateHash
131 : : *
132 : : * parallel-oblivious version, building a backend-private
133 : : * hash table and (if necessary) batch files.
134 : : * ----------------------------------------------------------------
135 : : */
136 : : static void
137 : 12837 : MultiExecPrivateHash(HashState *node)
138 : : {
139 : : PlanState *outerNode;
140 : : HashJoinTable hashtable;
141 : : TupleTableSlot *slot;
142 : : ExprContext *econtext;
143 : :
144 : : /*
145 : : * get state info from node
146 : : */
8362 tgl@sss.pgh.pa.us 147 : 12837 : outerNode = outerPlanState(node);
148 : 12837 : hashtable = node->hashtable;
149 : :
150 : : /*
151 : : * set expression context
152 : : */
153 : 12837 : econtext = node->ps.ps_ExprContext;
154 : :
155 : : /*
156 : : * Get all tuples from the node below the Hash node and insert into the
157 : : * hash table (or temp files).
158 : : */
159 : : for (;;)
10277 bruce@momjian.us 160 : 4445202 : {
161 : : bool isnull;
162 : : Datum hashdatum;
163 : :
7337 tgl@sss.pgh.pa.us 164 : 4458039 : slot = ExecProcNode(outerNode);
165 [ + + + + ]: 4458037 : if (TupIsNull(slot))
166 : : break;
167 : : /* We have to compute the hash value */
2278 andres@anarazel.de 168 : 4445202 : econtext->ecxt_outertuple = slot;
169 : :
433 drowley@postgresql.o 170 : 4445202 : ResetExprContext(econtext);
171 : :
172 : 4445202 : hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
173 : : &isnull);
174 : :
175 [ + + ]: 4445202 : if (!isnull)
176 : : {
177 : 4445153 : uint32 hashvalue = DatumGetUInt32(hashdatum);
178 : : int bucketNumber;
179 : :
6064 tgl@sss.pgh.pa.us 180 : 4445153 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
181 [ + + ]: 4445153 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
182 : : {
183 : : /* It's a skew tuple, so put it into that hash table */
184 : 294 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
185 : : bucketNumber);
4032 kgrittn@postgresql.o 186 : 294 : hashtable->skewTuples += 1;
187 : : }
188 : : else
189 : : {
190 : : /* Not subject to skew optimization, so insert normally */
6064 tgl@sss.pgh.pa.us 191 : 4444859 : ExecHashTableInsert(hashtable, slot, hashvalue);
192 : : }
6847 193 : 4445153 : hashtable->totalTuples += 1;
194 : : }
195 : : }
196 : :
197 : : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
4032 kgrittn@postgresql.o 198 [ + + ]: 12835 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
199 : 75 : ExecHashIncreaseNumBuckets(hashtable);
200 : :
201 : : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
202 : 12835 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
203 [ + + ]: 12835 : if (hashtable->spaceUsed > hashtable->spacePeak)
204 : 12811 : hashtable->spacePeak = hashtable->spaceUsed;
205 : :
2868 andres@anarazel.de 206 : 12835 : hashtable->partialTuples = hashtable->totalTuples;
207 : 12835 : }
208 : :
209 : : /* ----------------------------------------------------------------
210 : : * MultiExecParallelHash
211 : : *
212 : : * parallel-aware version, building a shared hash table and
213 : : * (if necessary) batch files using the combined effort of
214 : : * a set of co-operating backends.
215 : : * ----------------------------------------------------------------
216 : : */
217 : : static void
218 : 207 : MultiExecParallelHash(HashState *node)
219 : : {
220 : : ParallelHashJoinState *pstate;
221 : : PlanState *outerNode;
222 : : HashJoinTable hashtable;
223 : : TupleTableSlot *slot;
224 : : ExprContext *econtext;
225 : : uint32 hashvalue;
226 : : Barrier *build_barrier;
227 : : int i;
228 : :
229 : : /*
230 : : * get state info from node
231 : : */
232 : 207 : outerNode = outerPlanState(node);
233 : 207 : hashtable = node->hashtable;
234 : :
235 : : /*
236 : : * set expression context
237 : : */
238 : 207 : econtext = node->ps.ps_ExprContext;
239 : :
240 : : /*
241 : : * Synchronize the parallel hash table build. At this stage we know that
242 : : * the shared hash table has been or is being set up by
243 : : * ExecHashTableCreate(), but we don't know if our peers have returned
244 : : * from there or are here in MultiExecParallelHash(), and if so how far
245 : : * through they are. To find out, we check the build_barrier phase then
246 : : * and jump to the right step in the build algorithm.
247 : : */
248 : 207 : pstate = hashtable->parallel_state;
249 : 207 : build_barrier = &pstate->build_barrier;
949 tmunro@postgresql.or 250 [ - + ]: 207 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
2868 andres@anarazel.de 251 [ + + + ]: 207 : switch (BarrierPhase(build_barrier))
252 : : {
949 tmunro@postgresql.or 253 : 91 : case PHJ_BUILD_ALLOCATE:
254 : :
255 : : /*
256 : : * Either I just allocated the initial hash table in
257 : : * ExecHashTableCreate(), or someone else is doing that. Either
258 : : * way, wait for everyone to arrive here so we can proceed.
259 : : */
1990 tgl@sss.pgh.pa.us 260 : 91 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
261 : : /* Fall through. */
262 : :
949 tmunro@postgresql.or 263 : 179 : case PHJ_BUILD_HASH_INNER:
264 : :
265 : : /*
266 : : * It's time to begin hashing, or if we just arrived here then
267 : : * hashing is already underway, so join in that effort. While
268 : : * hashing we have to be prepared to help increase the number of
269 : : * batches or buckets at any time, and if we arrived here when
270 : : * that was already underway we'll have to help complete that work
271 : : * immediately so that it's safe to access batches and buckets
272 : : * below.
273 : : */
2868 andres@anarazel.de 274 [ + + ]: 179 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
275 : : PHJ_GROW_BATCHES_ELECT)
276 : 1 : ExecParallelHashIncreaseNumBatches(hashtable);
277 [ - + ]: 179 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
278 : : PHJ_GROW_BUCKETS_ELECT)
2868 andres@anarazel.de 279 :UBC 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
2868 andres@anarazel.de 280 :CBC 179 : ExecParallelHashEnsureBatchAccessors(hashtable);
281 : 179 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
282 : : for (;;)
283 : 1080096 : {
284 : : bool isnull;
285 : :
286 : 1080275 : slot = ExecProcNode(outerNode);
287 [ + + + + ]: 1080275 : if (TupIsNull(slot))
288 : : break;
2278 289 : 1080096 : econtext->ecxt_outertuple = slot;
290 : :
433 drowley@postgresql.o 291 : 1080096 : ResetExprContext(econtext);
292 : :
293 : 1080096 : hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
294 : : econtext,
295 : : &isnull));
296 : :
297 [ + - ]: 1080096 : if (!isnull)
2868 andres@anarazel.de 298 : 1080096 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
299 : 1080096 : hashtable->partialTuples++;
300 : : }
301 : :
302 : : /*
303 : : * Make sure that any tuples we wrote to disk are visible to
304 : : * others before anyone tries to load them.
305 : : */
306 [ + + ]: 833 : for (i = 0; i < hashtable->nbatch; ++i)
307 : 654 : sts_end_write(hashtable->batches[i].inner_tuples);
308 : :
309 : : /*
310 : : * Update shared counters. We need an accurate total tuple count
311 : : * to control the empty table optimization.
312 : : */
313 : 179 : ExecParallelHashMergeCounters(hashtable);
314 : :
2860 315 : 179 : BarrierDetach(&pstate->grow_buckets_barrier);
316 : 179 : BarrierDetach(&pstate->grow_batches_barrier);
317 : :
318 : : /*
319 : : * Wait for everyone to finish building and flushing files and
320 : : * counters.
321 : : */
2868 322 [ + + ]: 179 : if (BarrierArriveAndWait(build_barrier,
323 : : WAIT_EVENT_HASH_BUILD_HASH_INNER))
324 : : {
325 : : /*
326 : : * Elect one backend to disable any further growth. Batches
327 : : * are now fixed. While building them we made sure they'd fit
328 : : * in our memory budget when we load them back in later (or we
329 : : * tried to do that and gave up because we detected extreme
330 : : * skew).
331 : : */
332 : 87 : pstate->growth = PHJ_GROWTH_DISABLED;
333 : : }
334 : : }
335 : :
336 : : /*
337 : : * We're not yet attached to a batch. We all agree on the dimensions and
338 : : * number of inner tuples (for the empty table optimization).
339 : : */
340 : 207 : hashtable->curbatch = -1;
341 : 207 : hashtable->nbuckets = pstate->nbuckets;
47 michael@paquier.xyz 342 :GNC 207 : hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
2868 andres@anarazel.de 343 :CBC 207 : hashtable->totalTuples = pstate->total_tuples;
344 : :
345 : : /*
346 : : * Unless we're completely done and the batch state has been freed, make
347 : : * sure we have accessors.
348 : : */
949 tmunro@postgresql.or 349 [ + - ]: 207 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
951 350 : 207 : ExecParallelHashEnsureBatchAccessors(hashtable);
351 : :
352 : : /*
353 : : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
354 : : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
355 : : * there already).
356 : : */
949 357 [ + + - + : 207 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
- - ]
358 : : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
359 : : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
10702 scrappy@hub.org 360 : 207 : }
361 : :
362 : : /* ----------------------------------------------------------------
363 : : * ExecInitHash
364 : : *
365 : : * Init routine for Hash node
366 : : * ----------------------------------------------------------------
367 : : */
368 : : HashState *
7181 tgl@sss.pgh.pa.us 369 : 17845 : ExecInitHash(Hash *node, EState *estate, int eflags)
370 : : {
371 : : HashState *hashstate;
372 : :
373 : : /* check for unsupported flags */
374 [ - + ]: 17845 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
375 : :
376 : : /*
377 : : * create state structure
378 : : */
10277 bruce@momjian.us 379 : 17845 : hashstate = makeNode(HashState);
8362 tgl@sss.pgh.pa.us 380 : 17845 : hashstate->ps.plan = (Plan *) node;
381 : 17845 : hashstate->ps.state = estate;
3024 andres@anarazel.de 382 : 17845 : hashstate->ps.ExecProcNode = ExecHash;
383 : : /* delay building hashtable until ExecHashTableCreate() in executor run */
9659 tgl@sss.pgh.pa.us 384 : 17845 : hashstate->hashtable = NULL;
385 : :
386 : : /*
387 : : * Miscellaneous initialization
388 : : *
389 : : * create expression context for node
390 : : */
8362 391 : 17845 : ExecAssignExprContext(estate, &hashstate->ps);
392 : :
393 : : /*
394 : : * initialize child nodes
395 : : */
7181 396 : 17845 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
397 : :
398 : : /*
399 : : * initialize our result slot and type. No need to build projection
400 : : * because this node doesn't do projections.
401 : : */
2538 andres@anarazel.de 402 : 17845 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
8362 tgl@sss.pgh.pa.us 403 : 17845 : hashstate->ps.ps_ProjInfo = NULL;
404 : :
433 drowley@postgresql.o 405 [ - + ]: 17845 : Assert(node->plan.qual == NIL);
406 : :
407 : : /*
408 : : * Delay initialization of hash_expr until ExecInitHashJoin(). We cannot
409 : : * build the ExprState here as we don't yet know the join type we're going
410 : : * to be hashing values for and we need to know that before calling
411 : : * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
412 : : * type.
413 : : */
414 : 17845 : hashstate->hash_expr = NULL;
415 : :
8362 tgl@sss.pgh.pa.us 416 : 17845 : return hashstate;
417 : : }
418 : :
419 : : /* ---------------------------------------------------------------
420 : : * ExecEndHash
421 : : *
422 : : * clean up routine for Hash node
423 : : * ----------------------------------------------------------------
424 : : */
425 : : void
426 : 17787 : ExecEndHash(HashState *node)
427 : : {
428 : : PlanState *outerPlan;
429 : :
430 : : /*
431 : : * shut down the subplan
432 : : */
433 : 17787 : outerPlan = outerPlanState(node);
434 : 17787 : ExecEndNode(outerPlan);
10277 bruce@momjian.us 435 : 17787 : }
436 : :
437 : :
438 : : /* ----------------------------------------------------------------
439 : : * ExecHashTableCreate
440 : : *
441 : : * create an empty hashtable data structure for hashjoin.
442 : : * ----------------------------------------------------------------
443 : : */
444 : : HashJoinTable
433 drowley@postgresql.o 445 : 13044 : ExecHashTableCreate(HashState *state)
446 : : {
447 : : Hash *node;
448 : : HashJoinTable hashtable;
449 : : Plan *outerNode;
450 : : size_t space_allowed;
451 : : int nbuckets;
452 : : int nbatch;
453 : : double rows;
454 : : int num_skew_mcvs;
455 : : int log2_nbuckets;
456 : : MemoryContext oldcxt;
457 : :
458 : : /*
459 : : * Get information about the size of the relation to be hashed (it's the
460 : : * "outer" subtree of this node, but the inner relation of the hashjoin).
461 : : * Compute the appropriate size of the hash table.
462 : : */
2868 andres@anarazel.de 463 : 13044 : node = (Hash *) state->ps.plan;
10277 bruce@momjian.us 464 : 13044 : outerNode = outerPlan(node);
465 : :
466 : : /*
467 : : * If this is shared hash table with a partial plan, then we can't use
468 : : * outerNode->plan_rows to estimate its size. We need an estimate of the
469 : : * total number of rows across all copies of the partial plan.
470 : : */
2868 andres@anarazel.de 471 [ + + ]: 13044 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
472 : :
473 : 12837 : ExecChooseHashTableSize(rows, outerNode->plan_width,
6064 tgl@sss.pgh.pa.us 474 : 13044 : OidIsValid(node->skewTable),
2868 andres@anarazel.de 475 : 13044 : state->parallel_state != NULL,
476 [ + + ]: 13044 : state->parallel_state != NULL ?
477 : 207 : state->parallel_state->nparticipants - 1 : 0,
478 : : &space_allowed,
479 : : &nbuckets, &nbatch, &num_skew_mcvs);
480 : :
481 : : /* nbuckets must be a power of 2 */
47 michael@paquier.xyz 482 :GNC 13044 : log2_nbuckets = pg_ceil_log2_32(nbuckets);
6723 tgl@sss.pgh.pa.us 483 [ - + ]:CBC 13044 : Assert(nbuckets == (1 << log2_nbuckets));
484 : :
485 : : /*
486 : : * Initialize the hash table control block.
487 : : *
488 : : * The hashtable control block is just palloc'd from the executor's
489 : : * per-query memory context. Everything else should be kept inside the
490 : : * subsidiary hashCxt, batchCxt or spillCxt.
491 : : */
1141 peter@eisentraut.org 492 : 13044 : hashtable = palloc_object(HashJoinTableData);
10277 bruce@momjian.us 493 : 13044 : hashtable->nbuckets = nbuckets;
4032 kgrittn@postgresql.o 494 : 13044 : hashtable->nbuckets_original = nbuckets;
495 : 13044 : hashtable->nbuckets_optimal = nbuckets;
6723 tgl@sss.pgh.pa.us 496 : 13044 : hashtable->log2_nbuckets = log2_nbuckets;
4032 kgrittn@postgresql.o 497 : 13044 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
2868 andres@anarazel.de 498 : 13044 : hashtable->buckets.unshared = NULL;
6064 tgl@sss.pgh.pa.us 499 : 13044 : hashtable->skewEnabled = false;
500 : 13044 : hashtable->skewBucket = NULL;
501 : 13044 : hashtable->skewBucketLen = 0;
502 : 13044 : hashtable->nSkewBuckets = 0;
503 : 13044 : hashtable->skewBucketNums = NULL;
10277 bruce@momjian.us 504 : 13044 : hashtable->nbatch = nbatch;
505 : 13044 : hashtable->curbatch = 0;
7540 tgl@sss.pgh.pa.us 506 : 13044 : hashtable->nbatch_original = nbatch;
507 : 13044 : hashtable->nbatch_outstart = nbatch;
508 : 13044 : hashtable->growEnabled = true;
7499 509 : 13044 : hashtable->totalTuples = 0;
2868 andres@anarazel.de 510 : 13044 : hashtable->partialTuples = 0;
4032 kgrittn@postgresql.o 511 : 13044 : hashtable->skewTuples = 0;
9659 tgl@sss.pgh.pa.us 512 : 13044 : hashtable->innerBatchFile = NULL;
513 : 13044 : hashtable->outerBatchFile = NULL;
7540 514 : 13044 : hashtable->spaceUsed = 0;
5747 rhaas@postgresql.org 515 : 13044 : hashtable->spacePeak = 0;
2868 andres@anarazel.de 516 : 13044 : hashtable->spaceAllowed = space_allowed;
6064 tgl@sss.pgh.pa.us 517 : 13044 : hashtable->spaceUsedSkew = 0;
518 : 13044 : hashtable->spaceAllowedSkew =
1916 pg@bowt.ie 519 : 13044 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
4065 heikki.linnakangas@i 520 : 13044 : hashtable->chunks = NULL;
2868 andres@anarazel.de 521 : 13044 : hashtable->current_chunk = NULL;
522 : 13044 : hashtable->parallel_state = state->parallel_state;
523 : 13044 : hashtable->area = state->ps.state->es_query_dsa;
524 : 13044 : hashtable->batches = NULL;
525 : :
526 : : #ifdef HJDEBUG
527 : : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
528 : : hashtable, nbatch, nbuckets);
529 : : #endif
530 : :
531 : : /*
532 : : * Create temporary memory contexts in which to keep the hashtable working
533 : : * storage. See notes in executor/hashjoin.h.
534 : : */
2782 tgl@sss.pgh.pa.us 535 : 13044 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
536 : : "HashTableContext",
537 : : ALLOCSET_DEFAULT_SIZES);
538 : :
539 : 13044 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
540 : : "HashBatchContext",
541 : : ALLOCSET_DEFAULT_SIZES);
542 : :
892 tomas.vondra@postgre 543 : 13044 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
544 : : "HashSpillContext",
545 : : ALLOCSET_DEFAULT_SIZES);
546 : :
547 : : /* Allocate data that will live for the life of the hashjoin */
548 : :
2782 tgl@sss.pgh.pa.us 549 : 13044 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
550 : :
2868 andres@anarazel.de 551 [ + + + + ]: 13044 : if (nbatch > 1 && hashtable->parallel_state == NULL)
552 : : {
553 : : MemoryContext oldctx;
554 : :
555 : : /*
556 : : * allocate and initialize the file arrays in hashCxt (not needed for
557 : : * parallel case which uses shared tuplestores instead of raw files)
558 : : */
892 tomas.vondra@postgre 559 : 63 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
560 : :
1141 peter@eisentraut.org 561 : 63 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
562 : 63 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
563 : :
892 tomas.vondra@postgre 564 : 63 : MemoryContextSwitchTo(oldctx);
565 : :
566 : : /* The files will not be opened until needed... */
567 : : /* ... but make sure we have temp tablespaces established for them */
6717 tgl@sss.pgh.pa.us 568 : 63 : PrepareTempTablespaces();
569 : : }
570 : :
2868 andres@anarazel.de 571 : 13044 : MemoryContextSwitchTo(oldcxt);
572 : :
573 [ + + ]: 13044 : if (hashtable->parallel_state)
574 : : {
575 : 207 : ParallelHashJoinState *pstate = hashtable->parallel_state;
576 : : Barrier *build_barrier;
577 : :
578 : : /*
579 : : * Attach to the build barrier. The corresponding detach operation is
580 : : * in ExecHashTableDetach. Note that we won't attach to the
581 : : * batch_barrier for batch 0 yet. We'll attach later and start it out
582 : : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
583 : : * then loaded while hashing (the standard hybrid hash join
584 : : * algorithm), and we'll coordinate that using build_barrier.
585 : : */
586 : 207 : build_barrier = &pstate->build_barrier;
587 : 207 : BarrierAttach(build_barrier);
588 : :
589 : : /*
590 : : * So far we have no idea whether there are any other participants,
591 : : * and if so, what phase they are working on. The only thing we care
592 : : * about at this point is whether someone has already created the
593 : : * SharedHashJoinBatch objects and the hash table for batch 0. One
594 : : * backend will be elected to do that now if necessary.
595 : : */
949 tmunro@postgresql.or 596 [ + + + - ]: 294 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
1990 tgl@sss.pgh.pa.us 597 : 87 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
598 : : {
2868 andres@anarazel.de 599 : 87 : pstate->nbatch = nbatch;
600 : 87 : pstate->space_allowed = space_allowed;
601 : 87 : pstate->growth = PHJ_GROWTH_OK;
602 : :
603 : : /* Set up the shared state for coordinating batches. */
604 : 87 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
605 : :
606 : : /*
607 : : * Allocate batch 0's hash table up front so we can load it
608 : : * directly while hashing.
609 : : */
610 : 87 : pstate->nbuckets = nbuckets;
611 : 87 : ExecParallelHashTableAlloc(hashtable, 0);
612 : : }
613 : :
614 : : /*
615 : : * The next Parallel Hash synchronization point is in
616 : : * MultiExecParallelHash(), which will progress it all the way to
617 : : * PHJ_BUILD_RUN. The caller must not return control from this
618 : : * executor node between now and then.
619 : : */
620 : : }
621 : : else
622 : : {
623 : : /*
624 : : * Prepare context for the first-scan space allocations; allocate the
625 : : * hashbucket array therein, and set each bucket "empty".
626 : : */
627 : 12837 : MemoryContextSwitchTo(hashtable->batchCxt);
628 : :
1141 peter@eisentraut.org 629 : 12837 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
630 : :
631 : : /*
632 : : * Set up for skew optimization, if possible and there's a need for
633 : : * more than one batch. (In a one-batch join, there's no point in
634 : : * it.)
635 : : */
2868 andres@anarazel.de 636 [ + + ]: 12837 : if (nbatch > 1)
433 drowley@postgresql.o 637 : 63 : ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
638 : :
2868 andres@anarazel.de 639 : 12837 : MemoryContextSwitchTo(oldcxt);
640 : : }
641 : :
9918 bruce@momjian.us 642 : 13044 : return hashtable;
643 : : }
644 : :
645 : :
646 : : /*
647 : : * Compute appropriate size for hashtable given the estimated size of the
648 : : * relation to be hashed (number of rows and average row width).
649 : : *
650 : : * This is exported so that the planner's costsize.c can use it.
651 : : */
652 : :
653 : : /* Target bucket loading (tuples per bucket) */
654 : : #define NTUP_PER_BUCKET 1
655 : :
656 : : void
6064 tgl@sss.pgh.pa.us 657 : 451371 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
658 : : bool try_combined_hash_mem,
659 : : int parallel_workers,
660 : : size_t *space_allowed,
661 : : int *numbuckets,
662 : : int *numbatches,
663 : : int *num_skew_mcvs)
664 : : {
665 : : int tupsize;
666 : : double inner_rel_bytes;
667 : : size_t hash_table_bytes;
668 : : size_t bucket_bytes;
669 : : size_t max_pointers;
4063 rhaas@postgresql.org 670 : 451371 : int nbatch = 1;
671 : : int nbuckets;
672 : : double dbuckets;
673 : :
674 : : /* Force a plausible relation size if no info */
8904 tgl@sss.pgh.pa.us 675 [ + + ]: 451371 : if (ntuples <= 0.0)
676 : 75 : ntuples = 1000.0;
677 : :
678 : : /*
679 : : * Estimate tupsize based on footprint of tuple in hashtable... note this
680 : : * does not allow for any palloc overhead. The manipulations of spaceUsed
681 : : * don't count palloc overhead either.
682 : : */
7062 683 : 451371 : tupsize = HJTUPLE_OVERHEAD +
3901 684 : 451371 : MAXALIGN(SizeofMinimalTupleHeader) +
7540 685 : 451371 : MAXALIGN(tupwidth);
686 : 451371 : inner_rel_bytes = ntuples * tupsize;
687 : :
688 : : /*
689 : : * Compute in-memory hashtable size limit from GUCs.
690 : : */
1555 691 : 451371 : hash_table_bytes = get_hash_memory_limit();
692 : :
693 : : /*
694 : : * Parallel Hash tries to use the combined hash_mem of all workers to
695 : : * avoid the need to batch. If that won't work, it falls back to hash_mem
696 : : * per worker and tries to process batches in parallel.
697 : : */
1916 pg@bowt.ie 698 [ + + ]: 451371 : if (try_combined_hash_mem)
699 : : {
700 : : /* Careful, this could overflow size_t */
701 : : double newlimit;
702 : :
1555 tgl@sss.pgh.pa.us 703 : 38043 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
704 [ + - ]: 38043 : newlimit = Min(newlimit, (double) SIZE_MAX);
705 : 38043 : hash_table_bytes = (size_t) newlimit;
706 : : }
707 : :
2868 andres@anarazel.de 708 : 451371 : *space_allowed = hash_table_bytes;
709 : :
710 : : /*
711 : : * If skew optimization is possible, estimate the number of skew buckets
712 : : * that will fit in the memory allowed, and decrement the assumed space
713 : : * available for the main hash table accordingly.
714 : : *
715 : : * We make the optimistic assumption that each skew bucket will contain
716 : : * one inner-relation tuple. If that turns out to be low, we will recover
717 : : * at runtime by reducing the number of skew buckets.
718 : : *
719 : : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
720 : : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
721 : : * will round up to the next power of 2 and then multiply by 4 to reduce
722 : : * collisions.
723 : : */
6064 tgl@sss.pgh.pa.us 724 [ + + ]: 451371 : if (useskew)
725 : : {
726 : : size_t bytes_per_mcv;
727 : : size_t skew_mcvs;
728 : :
729 : : /*----------
730 : : * Compute number of MCVs we could hold in hash_table_bytes
731 : : *
732 : : * Divisor is:
733 : : * size of a hash tuple +
734 : : * worst-case size of skewBucket[] per MCV +
735 : : * size of skewBucketNums[] entry +
736 : : * size of skew bucket struct itself
737 : : *----------
738 : : */
1555 739 : 448604 : bytes_per_mcv = tupsize +
740 : : (8 * sizeof(HashSkewBucket *)) +
741 : 448604 : sizeof(int) +
742 : : SKEW_BUCKET_OVERHEAD;
743 : 448604 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
744 : :
745 : : /*
746 : : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
747 : : * not to worry about size_t overflow in the multiplication)
748 : : */
749 : 448604 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
750 : :
751 : : /* Now clamp to integer range */
752 : 448604 : skew_mcvs = Min(skew_mcvs, INT_MAX);
753 : :
754 : 448604 : *num_skew_mcvs = (int) skew_mcvs;
755 : :
756 : : /* Reduce hash_table_bytes by the amount needed for the skew table */
757 [ + - ]: 448604 : if (skew_mcvs > 0)
758 : 448604 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
759 : : }
760 : : else
6064 761 : 2767 : *num_skew_mcvs = 0;
762 : :
763 : : /*
764 : : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
765 : : * memory is filled, assuming a single batch; but limit the value so that
766 : : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
767 : : * nor MaxAllocSize.
768 : : *
769 : : * Note that both nbuckets and nbatch must be powers of 2 to make
770 : : * ExecHashGetBucketAndBatch fast.
771 : : */
1555 772 : 451371 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
3676 773 : 451371 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
774 : : /* If max_pointers isn't a power of 2, must round it down to one */
1555 775 : 451371 : max_pointers = pg_prevpower2_size_t(max_pointers);
776 : :
777 : : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
778 : : /* (this step is redundant given the current value of MaxAllocSize) */
779 : 451371 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
780 : :
4063 rhaas@postgresql.org 781 : 451371 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
782 [ + + ]: 451371 : dbuckets = Min(dbuckets, max_pointers);
3676 tgl@sss.pgh.pa.us 783 : 451371 : nbuckets = (int) dbuckets;
784 : : /* don't let nbuckets be really small, though ... */
785 : 451371 : nbuckets = Max(nbuckets, 1024);
786 : : /* ... and force it to be a power of 2. */
1555 787 : 451371 : nbuckets = pg_nextpower2_32(nbuckets);
788 : :
789 : : /*
790 : : * If there's not enough space to store the projected number of tuples and
791 : : * the required bucket headers, we will need multiple batches.
792 : : */
3676 793 : 451371 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
4063 rhaas@postgresql.org 794 [ + + ]: 451371 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
795 : : {
796 : : /* We'll need multiple batches */
797 : : size_t sbuckets;
798 : : double dbatch;
799 : : int minbatch;
800 : : size_t bucket_size;
801 : :
802 : : /*
803 : : * If Parallel Hash with combined hash_mem would still need multiple
804 : : * batches, we'll have to fall back to regular hash_mem budget.
805 : : */
1916 pg@bowt.ie 806 [ + + ]: 2600 : if (try_combined_hash_mem)
807 : : {
2868 andres@anarazel.de 808 : 123 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
809 : : false, parallel_workers,
810 : : space_allowed,
811 : : numbuckets,
812 : : numbatches,
813 : : num_skew_mcvs);
814 : 123 : return;
815 : : }
816 : :
817 : : /*
818 : : * Estimate the number of buckets we'll want to have when hash_mem is
819 : : * entirely full. Each bucket will contain a bucket pointer plus
820 : : * NTUP_PER_BUCKET tuples, whose projected size already includes
821 : : * overhead for the hash code, pointer to the next tuple, etc.
822 : : */
4063 rhaas@postgresql.org 823 : 2477 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
1171 tgl@sss.pgh.pa.us 824 [ - + ]: 2477 : if (hash_table_bytes <= bucket_size)
1171 tgl@sss.pgh.pa.us 825 :UBC 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
826 : : else
1171 tgl@sss.pgh.pa.us 827 :CBC 2477 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
1555 828 : 2477 : sbuckets = Min(sbuckets, max_pointers);
829 : 2477 : nbuckets = (int) sbuckets;
830 : 2477 : nbuckets = pg_nextpower2_32(nbuckets);
4063 rhaas@postgresql.org 831 : 2477 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
832 : :
833 : : /*
834 : : * Buckets are simple pointers to hashjoin tuples, while tupsize
835 : : * includes the pointer, hash code, and MinimalTupleData. So buckets
836 : : * should never really exceed 25% of hash_mem (even for
837 : : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
838 : : * 2^N bytes, where we might get more because of doubling. So let's
839 : : * look for 50% here.
840 : : */
841 [ - + ]: 2477 : Assert(bucket_bytes <= hash_table_bytes / 2);
842 : :
843 : : /* Calculate required number of batches. */
844 : 2477 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
5841 tgl@sss.pgh.pa.us 845 [ + - ]: 2477 : dbatch = Min(dbatch, max_pointers);
7540 846 : 2477 : minbatch = (int) dbatch;
2028 drowley@postgresql.o 847 : 2477 : nbatch = pg_nextpower2_32(Max(2, minbatch));
848 : : }
849 : :
850 : : /*
851 : : * Optimize the total amount of memory consumed by the hash node.
852 : : *
853 : : * The nbatch calculation above focuses on the in-memory hash table,
854 : : * assuming no per-batch overhead. But each batch may have two files, each
855 : : * with a BLCKSZ buffer. For large nbatch values these buffers may use
856 : : * significantly more memory than the hash table.
857 : : *
858 : : * The total memory usage may be expressed by this formula:
859 : : *
860 : : * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ)
861 : : *
862 : : * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
863 : : * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
864 : : * buffers.
865 : : *
866 : : * The nbatch calculation however ignores the second part. And for very
867 : : * large inner_rel_bytes, there may be no nbatch that keeps total memory
868 : : * usage under the budget (work_mem * hash_mem_multiplier). To deal with
869 : : * that, we will adjust nbatch to minimize total memory consumption across
870 : : * both the hashtable and file buffers.
871 : : *
872 : : * As we increase the size of the hashtable, the number of batches
873 : : * decreases, and the total memory usage follows a U-shaped curve. We find
874 : : * the minimum nbatch by "walking back" -- checking if halving nbatch
875 : : * would lower the total memory usage. We stop when it no longer helps.
876 : : *
877 : : * We only reduce the number of batches. Adding batches reduces memory
878 : : * usage only when most of the memory is used by the hash table, with
879 : : * total memory usage within the limit or not far from it. We don't want
880 : : * to start batching when not needed, even if that would reduce memory
881 : : * usage.
882 : : *
883 : : * While growing the hashtable, we also adjust the number of buckets to
884 : : * maintain a load factor of NTUP_PER_BUCKET while squeezing tuples back
885 : : * from batches into the hashtable.
886 : : *
887 : : * Note that we can only change nbuckets during initial hashtable sizing.
888 : : * Once we start building the hash, nbuckets is fixed (we may still grow
889 : : * the hash table).
890 : : *
891 : : * We double several parameters (space_allowed, nbuckets, num_skew_mcvs),
892 : : * which introduces a risk of overflow. We avoid this by exiting the loop.
893 : : * We could do something smarter (e.g. capping nbuckets and continue), but
894 : : * the complexity is not worth it. Such cases are extremely rare, and this
895 : : * is a best-effort attempt to reduce memory usage.
896 : : */
10 tomas.vondra@postgre 897 [ + + ]: 451665 : while (nbatch > 1)
898 : : {
899 : : /* Check that buckets wont't overflow MaxAllocSize */
900 [ - + ]: 2894 : if (nbuckets > (MaxAllocSize / sizeof(HashJoinTuple) / 2))
10 tomas.vondra@postgre 901 :UBC 0 : break;
902 : :
903 : : /* num_skew_mcvs should be less than nbuckets */
10 tomas.vondra@postgre 904 [ - + ]:CBC 2894 : Assert((*num_skew_mcvs) < (INT_MAX / 2));
905 : :
906 : : /*
907 : : * Check that space_allowed won't overlow SIZE_MAX.
908 : : *
909 : : * We don't use hash_table_bytes here, because it does not include the
910 : : * skew buckets. And we want to limit the overall memory limit.
911 : : */
912 [ - + ]: 2894 : if ((*space_allowed) > (SIZE_MAX / 2))
10 tomas.vondra@postgre 913 :UBC 0 : break;
914 : :
915 : : /*
916 : : * Will halving the number of batches and doubling the size of the
917 : : * hashtable reduce overall memory usage?
918 : : *
919 : : * This is the same as (S = space_allowed):
920 : : *
921 : : * (S + 2 * nbatch * BLCKSZ) < (S * 2 + nbatch * BLCKSZ)
922 : : *
923 : : * but avoiding intermediate overflow.
924 : : */
10 tomas.vondra@postgre 925 [ + + ]:CBC 2894 : if (nbatch < (*space_allowed) / BLCKSZ)
250 926 : 2477 : break;
927 : :
928 : : /*
929 : : * MaxAllocSize is sufficiently small that we are not worried about
930 : : * overflowing nbuckets.
931 : : */
932 : 417 : nbuckets *= 2;
933 : :
10 934 : 417 : *num_skew_mcvs = (*num_skew_mcvs) * 2;
250 935 : 417 : *space_allowed = (*space_allowed) * 2;
936 : :
10 937 : 417 : nbatch /= 2;
938 : : }
939 : :
3742 tgl@sss.pgh.pa.us 940 [ - + ]: 451248 : Assert(nbuckets > 0);
941 [ - + ]: 451248 : Assert(nbatch > 0);
942 : :
7540 943 : 451248 : *numbuckets = nbuckets;
8904 944 : 451248 : *numbatches = nbatch;
945 : : }
946 : :
947 : :
948 : : /* ----------------------------------------------------------------
949 : : * ExecHashTableDestroy
950 : : *
951 : : * destroy a hash table
952 : : * ----------------------------------------------------------------
953 : : */
954 : : void
9659 955 : 12987 : ExecHashTableDestroy(HashJoinTable hashtable)
956 : : {
957 : : int i;
958 : :
959 : : /*
960 : : * Make sure all the temp files are closed. We skip batch 0, since it
961 : : * can't have any temp files (and the arrays might not even exist if
962 : : * nbatch is only 1). Parallel hash joins don't use these files.
963 : : */
2868 andres@anarazel.de 964 [ + + ]: 12987 : if (hashtable->innerBatchFile != NULL)
965 : : {
966 [ + + ]: 588 : for (i = 1; i < hashtable->nbatch; i++)
967 : : {
968 [ - + ]: 477 : if (hashtable->innerBatchFile[i])
2868 andres@anarazel.de 969 :UBC 0 : BufFileClose(hashtable->innerBatchFile[i]);
2868 andres@anarazel.de 970 [ - + ]:CBC 477 : if (hashtable->outerBatchFile[i])
2868 andres@anarazel.de 971 :UBC 0 : BufFileClose(hashtable->outerBatchFile[i]);
972 : : }
973 : : }
974 : :
975 : : /* Release working memory (batchCxt is a child, so it goes away too) */
9252 tgl@sss.pgh.pa.us 976 :CBC 12987 : MemoryContextDelete(hashtable->hashCxt);
977 : :
978 : : /* And drop the control block */
9659 979 : 12987 : pfree(hashtable);
980 : 12987 : }
981 : :
982 : : /*
983 : : * Consider adjusting the allowed hash table size, depending on the number
984 : : * of batches, to minimize the overall memory usage (for both the hashtable
985 : : * and batch files).
986 : : *
987 : : * We're adjusting the size of the hash table, not the (optimal) number of
988 : : * buckets. We can't change that once we start building the hash, due to how
989 : : * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
990 : : * means the load factor may not be optimal, but we're in damage control so
991 : : * we accept slower lookups. It's still much better than batch explosion.
992 : : *
993 : : * Returns true if we chose to increase the batch size (and thus we don't
994 : : * need to add batches), and false if we should increase nbatch.
995 : : */
996 : : static bool
250 tomas.vondra@postgre 997 : 99 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
998 : : {
999 : : /*
1000 : : * How much additional memory would doubling nbatch use? Each batch may
1001 : : * require two buffered files (inner/outer), with a BLCKSZ buffer.
1002 : : */
10 1003 : 99 : size_t batchSpace = (hashtable->nbatch * 2 * (size_t) BLCKSZ);
1004 : :
1005 : : /*
1006 : : * Compare the new space needed for doubling nbatch and for enlarging the
1007 : : * in-memory hash table. If doubling the hash table needs less memory,
1008 : : * just do that. Otherwise, continue with doubling the nbatch.
1009 : : *
1010 : : * We're either doubling spaceAllowed or batchSpace, so which of those
1011 : : * increases the memory usage the least is the same as comparing the
1012 : : * values directly.
1013 : : */
250 1014 [ - + ]: 99 : if (hashtable->spaceAllowed <= batchSpace)
1015 : : {
250 tomas.vondra@postgre 1016 :UBC 0 : hashtable->spaceAllowed *= 2;
1017 : 0 : return true;
1018 : : }
1019 : :
250 tomas.vondra@postgre 1020 :CBC 99 : return false;
1021 : : }
1022 : :
1023 : : /*
1024 : : * ExecHashIncreaseNumBatches
1025 : : * increase the original number of batches in order to reduce
1026 : : * current memory consumption
1027 : : */
1028 : : static void
7540 tgl@sss.pgh.pa.us 1029 : 414579 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
1030 : : {
1031 : 414579 : int oldnbatch = hashtable->nbatch;
1032 : 414579 : int curbatch = hashtable->curbatch;
1033 : : int nbatch;
1034 : : long ninmemory;
1035 : : long nfreed;
1036 : : HashMemoryChunk oldchunks;
1037 : :
1038 : : /* do nothing if we've decided to shut off growth */
1039 [ + + ]: 414579 : if (!hashtable->growEnabled)
1040 : 414480 : return;
1041 : :
1042 : : /* safety check to avoid overflow */
5841 1043 [ - + ]: 99 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
7540 tgl@sss.pgh.pa.us 1044 :UBC 0 : return;
1045 : :
1046 : : /* consider increasing size of the in-memory hash table instead */
250 tomas.vondra@postgre 1047 [ - + ]:CBC 99 : if (ExecHashIncreaseBatchSize(hashtable))
250 tomas.vondra@postgre 1048 :UBC 0 : return;
1049 : :
7540 tgl@sss.pgh.pa.us 1050 :CBC 99 : nbatch = oldnbatch * 2;
1051 [ - + ]: 99 : Assert(nbatch > 1);
1052 : :
1053 : : #ifdef HJDEBUG
1054 : : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
1055 : : hashtable, nbatch, hashtable->spaceUsed);
1056 : : #endif
1057 : :
1058 [ + + ]: 99 : if (hashtable->innerBatchFile == NULL)
1059 : : {
892 tomas.vondra@postgre 1060 : 48 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
1061 : :
1062 : : /* we had no file arrays before */
1141 peter@eisentraut.org 1063 : 48 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
1064 : 48 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
1065 : :
892 tomas.vondra@postgre 1066 : 48 : MemoryContextSwitchTo(oldcxt);
1067 : :
1068 : : /* time to establish the temp tablespaces, too */
6717 tgl@sss.pgh.pa.us 1069 : 48 : PrepareTempTablespaces();
1070 : : }
1071 : : else
1072 : : {
1073 : : /* enlarge arrays and zero out added entries */
1080 peter@eisentraut.org 1074 : 51 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
1075 : 51 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
1076 : : }
1077 : :
7540 tgl@sss.pgh.pa.us 1078 : 99 : hashtable->nbatch = nbatch;
1079 : :
1080 : : /*
1081 : : * Scan through the existing hash table entries and dump out any that are
1082 : : * no longer of the current batch.
1083 : : */
1084 : 99 : ninmemory = nfreed = 0;
1085 : :
1086 : : /* If know we need to resize nbuckets, we can do it while rebatching. */
4032 kgrittn@postgresql.o 1087 [ + + ]: 99 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
1088 : : {
1089 : : /* we never decrease the number of buckets */
1090 [ - + ]: 48 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
1091 : :
1092 : 48 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1093 : 48 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1094 : :
2868 andres@anarazel.de 1095 : 48 : hashtable->buckets.unshared =
1141 peter@eisentraut.org 1096 : 48 : repalloc_array(hashtable->buckets.unshared,
1097 : : HashJoinTuple, hashtable->nbuckets);
1098 : : }
1099 : :
1100 : : /*
1101 : : * We will scan through the chunks directly, so that we can reset the
1102 : : * buckets now and not have to keep track which tuples in the buckets have
1103 : : * already been processed. We will free the old chunks as we go.
1104 : : */
2868 andres@anarazel.de 1105 : 99 : memset(hashtable->buckets.unshared, 0,
1106 : 99 : sizeof(HashJoinTuple) * hashtable->nbuckets);
4065 heikki.linnakangas@i 1107 : 99 : oldchunks = hashtable->chunks;
1108 : 99 : hashtable->chunks = NULL;
1109 : :
1110 : : /* so, let's scan through the old chunks, and all tuples in each chunk */
1111 [ + + ]: 495 : while (oldchunks != NULL)
1112 : : {
2868 andres@anarazel.de 1113 : 396 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
1114 : :
1115 : : /* position within the buffer (up to oldchunks->used) */
4065 heikki.linnakangas@i 1116 : 396 : size_t idx = 0;
1117 : :
1118 : : /* process all tuples stored in this chunk (and then free it) */
1119 [ + + ]: 270549 : while (idx < oldchunks->used)
1120 : : {
2855 tgl@sss.pgh.pa.us 1121 : 270153 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
4065 heikki.linnakangas@i 1122 : 270153 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1123 : 270153 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1124 : : int bucketno;
1125 : : int batchno;
1126 : :
7540 tgl@sss.pgh.pa.us 1127 : 270153 : ninmemory++;
4065 heikki.linnakangas@i 1128 : 270153 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1129 : : &bucketno, &batchno);
1130 : :
7540 tgl@sss.pgh.pa.us 1131 [ + + ]: 270153 : if (batchno == curbatch)
1132 : : {
1133 : : /* keep tuple in memory - copy it into the new chunk */
1134 : : HashJoinTuple copyTuple;
1135 : :
3809 1136 : 101433 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
4065 heikki.linnakangas@i 1137 : 101433 : memcpy(copyTuple, hashTuple, hashTupleSize);
1138 : :
1139 : : /* and add it back to the appropriate bucket */
2868 andres@anarazel.de 1140 : 101433 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1141 : 101433 : hashtable->buckets.unshared[bucketno] = copyTuple;
1142 : : }
1143 : : else
1144 : : {
1145 : : /* dump it out */
7540 tgl@sss.pgh.pa.us 1146 [ - + ]: 168720 : Assert(batchno > curbatch);
4065 heikki.linnakangas@i 1147 : 168720 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1148 : : hashTuple->hashvalue,
892 tomas.vondra@postgre 1149 : 168720 : &hashtable->innerBatchFile[batchno],
1150 : : hashtable);
1151 : :
4065 heikki.linnakangas@i 1152 : 168720 : hashtable->spaceUsed -= hashTupleSize;
7540 tgl@sss.pgh.pa.us 1153 : 168720 : nfreed++;
1154 : : }
1155 : :
1156 : : /* next tuple in this chunk */
4065 heikki.linnakangas@i 1157 : 270153 : idx += MAXALIGN(hashTupleSize);
1158 : :
1159 : : /* allow this loop to be cancellable */
3176 tgl@sss.pgh.pa.us 1160 [ - + ]: 270153 : CHECK_FOR_INTERRUPTS();
1161 : : }
1162 : :
1163 : : /* we're done with this chunk - free it and proceed to the next one */
4065 heikki.linnakangas@i 1164 : 396 : pfree(oldchunks);
1165 : 396 : oldchunks = nextchunk;
1166 : : }
1167 : :
1168 : : #ifdef HJDEBUG
1169 : : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1170 : : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1171 : : #endif
1172 : :
1173 : : /*
1174 : : * If we dumped out either all or none of the tuples in the table, disable
1175 : : * further expansion of nbatch. This situation implies that we have
1176 : : * enough tuples of identical hashvalues to overflow spaceAllowed.
1177 : : * Increasing nbatch will not fix it since there's no way to subdivide the
1178 : : * group any more finely. We have to just gut it out and hope the server
1179 : : * has enough RAM.
1180 : : */
7540 tgl@sss.pgh.pa.us 1181 [ + - + + ]: 99 : if (nfreed == 0 || nfreed == ninmemory)
1182 : : {
1183 : 24 : hashtable->growEnabled = false;
1184 : : #ifdef HJDEBUG
1185 : : printf("Hashjoin %p: disabling further increase of nbatch\n",
1186 : : hashtable);
1187 : : #endif
1188 : : }
1189 : : }
1190 : :
1191 : : /*
1192 : : * ExecParallelHashIncreaseNumBatches
1193 : : * Every participant attached to grow_batches_barrier must run this
1194 : : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1195 : : */
1196 : : static void
2868 andres@anarazel.de 1197 : 37 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1198 : : {
1199 : 37 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1200 : :
949 tmunro@postgresql.or 1201 [ - + ]: 37 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1202 : :
1203 : : /*
1204 : : * It's unlikely, but we need to be prepared for new participants to show
1205 : : * up while we're in the middle of this operation so we need to switch on
1206 : : * barrier phase here.
1207 : : */
2868 andres@anarazel.de 1208 [ + - + - : 37 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
- - ]
1209 : : {
949 tmunro@postgresql.or 1210 : 36 : case PHJ_GROW_BATCHES_ELECT:
1211 : :
1212 : : /*
1213 : : * Elect one participant to prepare to grow the number of batches.
1214 : : * This involves reallocating or resetting the buckets of batch 0
1215 : : * in preparation for all participants to begin repartitioning the
1216 : : * tuples.
1217 : : */
2868 andres@anarazel.de 1218 [ + + ]: 36 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1219 : : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1220 : : {
1221 : : dsa_pointer_atomic *buckets;
1222 : : ParallelHashJoinBatch *old_batch0;
1223 : : int new_nbatch;
1224 : : int i;
1225 : :
1226 : : /* Move the old batch out of the way. */
1227 : 27 : old_batch0 = hashtable->batches[0].shared;
1228 : 27 : pstate->old_batches = pstate->batches;
1229 : 27 : pstate->old_nbatch = hashtable->nbatch;
1230 : 27 : pstate->batches = InvalidDsaPointer;
1231 : :
1232 : : /* Free this backend's old accessors. */
1233 : 27 : ExecParallelHashCloseBatchAccessors(hashtable);
1234 : :
1235 : : /* Figure out how many batches to use. */
1236 [ + + ]: 27 : if (hashtable->nbatch == 1)
1237 : : {
1238 : : /*
1239 : : * We are going from single-batch to multi-batch. We need
1240 : : * to switch from one large combined memory budget to the
1241 : : * regular hash_mem budget.
1242 : : */
1555 tgl@sss.pgh.pa.us 1243 : 18 : pstate->space_allowed = get_hash_memory_limit();
1244 : :
1245 : : /*
1246 : : * The combined hash_mem of all participants wasn't
1247 : : * enough. Therefore one batch per participant would be
1248 : : * approximately equivalent and would probably also be
1249 : : * insufficient. So try two batches per participant,
1250 : : * rounded up to a power of two.
1251 : : */
1252 : 18 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1253 : : }
1254 : : else
1255 : : {
1256 : : /*
1257 : : * We were already multi-batched. Try doubling the number
1258 : : * of batches.
1259 : : */
2868 andres@anarazel.de 1260 : 9 : new_nbatch = hashtable->nbatch * 2;
1261 : : }
1262 : :
1263 : : /* Allocate new larger generation of batches. */
1264 [ - + ]: 27 : Assert(hashtable->nbatch == pstate->nbatch);
1265 : 27 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1266 [ - + ]: 27 : Assert(hashtable->nbatch == pstate->nbatch);
1267 : :
1268 : : /* Replace or recycle batch 0's bucket array. */
1269 [ + + ]: 27 : if (pstate->old_nbatch == 1)
1270 : : {
1271 : : double dtuples;
1272 : : double dbuckets;
1273 : : int new_nbuckets;
1274 : : uint32 max_buckets;
1275 : :
1276 : : /*
1277 : : * We probably also need a smaller bucket array. How many
1278 : : * tuples do we expect per batch, assuming we have only
1279 : : * half of them so far? Normally we don't need to change
1280 : : * the bucket array's size, because the size of each batch
1281 : : * stays the same as we add more batches, but in this
1282 : : * special case we move from a large batch to many smaller
1283 : : * batches and it would be wasteful to keep the large
1284 : : * array.
1285 : : */
1286 : 18 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1287 : :
1288 : : /*
1289 : : * We need to calculate the maximum number of buckets to
1290 : : * stay within the MaxAllocSize boundary. Round the
1291 : : * maximum number to the previous power of 2 given that
1292 : : * later we round the number to the next power of 2.
1293 : : */
659 akorotkov@postgresql 1294 : 18 : max_buckets = pg_prevpower2_32((uint32)
1295 : : (MaxAllocSize / sizeof(dsa_pointer_atomic)));
2868 andres@anarazel.de 1296 : 18 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
659 akorotkov@postgresql 1297 [ + - ]: 18 : dbuckets = Min(dbuckets, max_buckets);
2868 andres@anarazel.de 1298 : 18 : new_nbuckets = (int) dbuckets;
1299 : 18 : new_nbuckets = Max(new_nbuckets, 1024);
1555 tgl@sss.pgh.pa.us 1300 : 18 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
2868 andres@anarazel.de 1301 : 18 : dsa_free(hashtable->area, old_batch0->buckets);
1302 : 36 : hashtable->batches[0].shared->buckets =
1303 : 18 : dsa_allocate(hashtable->area,
1304 : : sizeof(dsa_pointer_atomic) * new_nbuckets);
1305 : : buckets = (dsa_pointer_atomic *)
1306 : 18 : dsa_get_address(hashtable->area,
1307 : 18 : hashtable->batches[0].shared->buckets);
1308 [ + + ]: 55314 : for (i = 0; i < new_nbuckets; ++i)
1309 : 55296 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1310 : 18 : pstate->nbuckets = new_nbuckets;
1311 : : }
1312 : : else
1313 : : {
1314 : : /* Recycle the existing bucket array. */
1315 : 9 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1316 : : buckets = (dsa_pointer_atomic *)
1317 : 9 : dsa_get_address(hashtable->area, old_batch0->buckets);
1318 [ + + ]: 36873 : for (i = 0; i < hashtable->nbuckets; ++i)
1319 : 36864 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1320 : : }
1321 : :
1322 : : /* Move all chunks to the work queue for parallel processing. */
1323 : 27 : pstate->chunk_work_queue = old_batch0->chunks;
1324 : :
1325 : : /* Disable further growth temporarily while we're growing. */
1326 : 27 : pstate->growth = PHJ_GROWTH_DISABLED;
1327 : : }
1328 : : else
1329 : : {
1330 : : /* All other participants just flush their tuples to disk. */
1331 : 9 : ExecParallelHashCloseBatchAccessors(hashtable);
1332 : : }
1333 : : /* Fall through. */
1334 : :
1335 : : case PHJ_GROW_BATCHES_REALLOCATE:
1336 : : /* Wait for the above to be finished. */
1337 : 36 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1338 : : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1339 : : /* Fall through. */
1340 : :
949 tmunro@postgresql.or 1341 : 37 : case PHJ_GROW_BATCHES_REPARTITION:
1342 : : /* Make sure that we have the current dimensions and buckets. */
2868 andres@anarazel.de 1343 : 37 : ExecParallelHashEnsureBatchAccessors(hashtable);
1344 : 37 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1345 : : /* Then partition, flush counters. */
1346 : 37 : ExecParallelHashRepartitionFirst(hashtable);
1347 : 37 : ExecParallelHashRepartitionRest(hashtable);
1348 : 37 : ExecParallelHashMergeCounters(hashtable);
1349 : : /* Wait for the above to be finished. */
1350 : 37 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1351 : : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1352 : : /* Fall through. */
1353 : :
949 tmunro@postgresql.or 1354 : 37 : case PHJ_GROW_BATCHES_DECIDE:
1355 : :
1356 : : /*
1357 : : * Elect one participant to clean up and decide whether further
1358 : : * repartitioning is needed, or should be disabled because it's
1359 : : * not helping.
1360 : : */
2868 andres@anarazel.de 1361 [ + + ]: 37 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1362 : : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1363 : : {
1364 : : ParallelHashJoinBatch *old_batches;
1365 : 27 : bool space_exhausted = false;
1366 : 27 : bool extreme_skew_detected = false;
1367 : :
1368 : : /* Make sure that we have the current dimensions and buckets. */
1369 : 27 : ExecParallelHashEnsureBatchAccessors(hashtable);
1370 : 27 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1371 : :
375 tmunro@postgresql.or 1372 : 27 : old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
1373 : :
1374 : : /* Are any of the new generation of batches exhausted? */
1160 drowley@postgresql.o 1375 [ + + ]: 195 : for (int i = 0; i < hashtable->nbatch; ++i)
1376 : : {
1377 : : ParallelHashJoinBatch *batch;
1378 : : ParallelHashJoinBatch *old_batch;
1379 : : int parent;
1380 : :
375 tmunro@postgresql.or 1381 : 168 : batch = hashtable->batches[i].shared;
2868 andres@anarazel.de 1382 [ + - ]: 168 : if (batch->space_exhausted ||
1383 [ + + ]: 168 : batch->estimated_size > pstate->space_allowed)
1384 : 12 : space_exhausted = true;
1385 : :
375 tmunro@postgresql.or 1386 : 168 : parent = i % pstate->old_nbatch;
1387 : 168 : old_batch = NthParallelHashJoinBatch(old_batches, parent);
1388 [ + + ]: 168 : if (old_batch->space_exhausted ||
1389 [ - + ]: 54 : batch->estimated_size > pstate->space_allowed)
1390 : : {
1391 : : /*
1392 : : * Did this batch receive ALL of the tuples from its
1393 : : * parent batch? That would indicate that further
1394 : : * repartitioning isn't going to help (the hash values
1395 : : * are probably all the same).
1396 : : */
2868 andres@anarazel.de 1397 [ + + ]: 114 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1398 : 12 : extreme_skew_detected = true;
1399 : : }
1400 : : }
1401 : :
1402 : : /* Don't keep growing if it's not helping or we'd overflow. */
1403 [ + + - + ]: 27 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1404 : 12 : pstate->growth = PHJ_GROWTH_DISABLED;
1405 [ - + ]: 15 : else if (space_exhausted)
2868 andres@anarazel.de 1406 :UBC 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1407 : : else
2868 andres@anarazel.de 1408 :CBC 15 : pstate->growth = PHJ_GROWTH_OK;
1409 : :
1410 : : /* Free the old batches in shared memory. */
1411 : 27 : dsa_free(hashtable->area, pstate->old_batches);
1412 : 27 : pstate->old_batches = InvalidDsaPointer;
1413 : : }
1414 : : /* Fall through. */
1415 : :
1416 : : case PHJ_GROW_BATCHES_FINISH:
1417 : : /* Wait for the above to complete. */
1418 : 37 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1419 : : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1420 : : }
1421 : 37 : }
1422 : :
1423 : : /*
1424 : : * Repartition the tuples currently loaded into memory for inner batch 0
1425 : : * because the number of batches has been increased. Some tuples are retained
1426 : : * in memory and some are written out to a later batch.
1427 : : */
1428 : : static void
1429 : 37 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1430 : : {
1431 : : dsa_pointer chunk_shared;
1432 : : HashMemoryChunk chunk;
1433 : :
2864 1434 [ - + ]: 37 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1435 : :
2868 1436 [ + + ]: 231 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1437 : : {
1438 : 157 : size_t idx = 0;
1439 : :
1440 : : /* Repartition all tuples in this chunk. */
1441 [ + + ]: 118582 : while (idx < chunk->used)
1442 : : {
2855 tgl@sss.pgh.pa.us 1443 : 118425 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
2868 andres@anarazel.de 1444 : 118425 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1445 : : HashJoinTuple copyTuple;
1446 : : dsa_pointer shared;
1447 : : int bucketno;
1448 : : int batchno;
1449 : :
1450 : 118425 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1451 : : &bucketno, &batchno);
1452 : :
1453 [ - + ]: 118425 : Assert(batchno < hashtable->nbatch);
1454 [ + + ]: 118425 : if (batchno == 0)
1455 : : {
1456 : : /* It still belongs in batch 0. Copy to a new chunk. */
1457 : : copyTuple =
1458 : 29213 : ExecParallelHashTupleAlloc(hashtable,
1459 : 29213 : HJTUPLE_OVERHEAD + tuple->t_len,
1460 : : &shared);
1461 : 29213 : copyTuple->hashvalue = hashTuple->hashvalue;
1462 : 29213 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1463 : 29213 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1464 : : copyTuple, shared);
1465 : : }
1466 : : else
1467 : : {
1468 : 89212 : size_t tuple_size =
892 tgl@sss.pgh.pa.us 1469 : 89212 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1470 : :
1471 : : /* It belongs in a later batch. */
2868 andres@anarazel.de 1472 : 89212 : hashtable->batches[batchno].estimated_size += tuple_size;
1473 : 89212 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1474 : 89212 : &hashTuple->hashvalue, tuple);
1475 : : }
1476 : :
1477 : : /* Count this tuple. */
1478 : 118425 : ++hashtable->batches[0].old_ntuples;
1479 : 118425 : ++hashtable->batches[batchno].ntuples;
1480 : :
1481 : 118425 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1482 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1483 : : }
1484 : :
1485 : : /* Free this chunk. */
1486 : 157 : dsa_free(hashtable->area, chunk_shared);
1487 : :
1488 [ - + ]: 157 : CHECK_FOR_INTERRUPTS();
1489 : : }
1490 : 37 : }
1491 : :
1492 : : /*
1493 : : * Help repartition inner batches 1..n.
1494 : : */
1495 : : static void
1496 : 37 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1497 : : {
1498 : 37 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1499 : 37 : int old_nbatch = pstate->old_nbatch;
1500 : : SharedTuplestoreAccessor **old_inner_tuples;
1501 : : ParallelHashJoinBatch *old_batches;
1502 : : int i;
1503 : :
1504 : : /* Get our hands on the previous generation of batches. */
1505 : : old_batches = (ParallelHashJoinBatch *)
1506 : 37 : dsa_get_address(hashtable->area, pstate->old_batches);
1141 peter@eisentraut.org 1507 : 37 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
2868 andres@anarazel.de 1508 [ + + ]: 85 : for (i = 1; i < old_nbatch; ++i)
1509 : : {
1510 : 48 : ParallelHashJoinBatch *shared =
892 tgl@sss.pgh.pa.us 1511 : 48 : NthParallelHashJoinBatch(old_batches, i);
1512 : :
2868 andres@anarazel.de 1513 : 48 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1514 : : ParallelWorkerNumber + 1,
1515 : : &pstate->fileset);
1516 : : }
1517 : :
1518 : : /* Join in the effort to repartition them. */
1519 [ + + ]: 85 : for (i = 1; i < old_nbatch; ++i)
1520 : : {
1521 : : MinimalTuple tuple;
1522 : : uint32 hashvalue;
1523 : :
1524 : : /* Scan one partition from the previous generation. */
1525 : 48 : sts_begin_parallel_scan(old_inner_tuples[i]);
1526 [ + + ]: 102791 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1527 : : {
1528 : 102743 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1529 : : int bucketno;
1530 : : int batchno;
1531 : :
1532 : : /* Decide which partition it goes to in the new generation. */
1533 : 102743 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1534 : : &batchno);
1535 : :
1536 : 102743 : hashtable->batches[batchno].estimated_size += tuple_size;
1537 : 102743 : ++hashtable->batches[batchno].ntuples;
1538 : 102743 : ++hashtable->batches[i].old_ntuples;
1539 : :
1540 : : /* Store the tuple its new batch. */
1541 : 102743 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1542 : : &hashvalue, tuple);
1543 : :
1544 [ - + ]: 102743 : CHECK_FOR_INTERRUPTS();
1545 : : }
1546 : 48 : sts_end_parallel_scan(old_inner_tuples[i]);
1547 : : }
1548 : :
1549 : 37 : pfree(old_inner_tuples);
1550 : 37 : }
1551 : :
1552 : : /*
1553 : : * Transfer the backend-local per-batch counters to the shared totals.
1554 : : */
1555 : : static void
1556 : 216 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1557 : : {
1558 : 216 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1559 : : int i;
1560 : :
1561 : 216 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1562 : 216 : pstate->total_tuples = 0;
1563 [ + + ]: 1106 : for (i = 0; i < hashtable->nbatch; ++i)
1564 : : {
1565 : 890 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1566 : :
1567 : 890 : batch->shared->size += batch->size;
1568 : 890 : batch->shared->estimated_size += batch->estimated_size;
1569 : 890 : batch->shared->ntuples += batch->ntuples;
1570 : 890 : batch->shared->old_ntuples += batch->old_ntuples;
1571 : 890 : batch->size = 0;
1572 : 890 : batch->estimated_size = 0;
1573 : 890 : batch->ntuples = 0;
1574 : 890 : batch->old_ntuples = 0;
1575 : 890 : pstate->total_tuples += batch->shared->ntuples;
1576 : : }
1577 : 216 : LWLockRelease(&pstate->lock);
1578 : 216 : }
1579 : :
1580 : : /*
1581 : : * ExecHashIncreaseNumBuckets
1582 : : * increase the original number of buckets in order to reduce
1583 : : * number of tuples per bucket
1584 : : */
1585 : : static void
4032 kgrittn@postgresql.o 1586 : 75 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1587 : : {
1588 : : HashMemoryChunk chunk;
1589 : :
1590 : : /* do nothing if not an increase (it's called increase for a reason) */
1591 [ - + ]: 75 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
4032 kgrittn@postgresql.o 1592 :UBC 0 : return;
1593 : :
1594 : : #ifdef HJDEBUG
1595 : : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1596 : : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1597 : : #endif
1598 : :
4032 kgrittn@postgresql.o 1599 :CBC 75 : hashtable->nbuckets = hashtable->nbuckets_optimal;
3676 tgl@sss.pgh.pa.us 1600 : 75 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1601 : :
4032 kgrittn@postgresql.o 1602 [ - + ]: 75 : Assert(hashtable->nbuckets > 1);
1603 [ - + ]: 75 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1604 [ - + ]: 75 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1605 : :
1606 : : /*
1607 : : * Just reallocate the proper number of buckets - we don't need to walk
1608 : : * through them - we can walk the dense-allocated chunks (just like in
1609 : : * ExecHashIncreaseNumBatches, but without all the copying into new
1610 : : * chunks)
1611 : : */
2868 andres@anarazel.de 1612 : 75 : hashtable->buckets.unshared =
1141 peter@eisentraut.org 1613 : 75 : repalloc_array(hashtable->buckets.unshared,
1614 : : HashJoinTuple, hashtable->nbuckets);
1615 : :
2868 andres@anarazel.de 1616 : 75 : memset(hashtable->buckets.unshared, 0,
1617 : 75 : hashtable->nbuckets * sizeof(HashJoinTuple));
1618 : :
1619 : : /* scan through all tuples in all chunks to rebuild the hash table */
1620 [ + + ]: 698 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1621 : : {
1622 : : /* process all tuples stored in this chunk */
3810 bruce@momjian.us 1623 : 623 : size_t idx = 0;
1624 : :
4032 kgrittn@postgresql.o 1625 [ + + ]: 437425 : while (idx < chunk->used)
1626 : : {
2855 tgl@sss.pgh.pa.us 1627 : 436802 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1628 : : int bucketno;
1629 : : int batchno;
1630 : :
4032 kgrittn@postgresql.o 1631 : 436802 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1632 : : &bucketno, &batchno);
1633 : :
1634 : : /* add the tuple to the proper bucket */
2868 andres@anarazel.de 1635 : 436802 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1636 : 436802 : hashtable->buckets.unshared[bucketno] = hashTuple;
1637 : :
1638 : : /* advance index past the tuple */
4032 kgrittn@postgresql.o 1639 : 436802 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1640 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1641 : : }
1642 : :
1643 : : /* allow this loop to be cancellable */
3016 andres@anarazel.de 1644 [ - + ]: 623 : CHECK_FOR_INTERRUPTS();
1645 : : }
1646 : : }
1647 : :
1648 : : static void
2868 1649 : 57 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1650 : : {
1651 : 57 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1652 : : int i;
1653 : : HashMemoryChunk chunk;
1654 : : dsa_pointer chunk_s;
1655 : :
949 tmunro@postgresql.or 1656 [ - + ]: 57 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1657 : :
1658 : : /*
1659 : : * It's unlikely, but we need to be prepared for new participants to show
1660 : : * up while we're in the middle of this operation so we need to switch on
1661 : : * barrier phase here.
1662 : : */
2868 andres@anarazel.de 1663 [ + - - - ]: 57 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1664 : : {
949 tmunro@postgresql.or 1665 : 57 : case PHJ_GROW_BUCKETS_ELECT:
1666 : : /* Elect one participant to prepare to increase nbuckets. */
2868 andres@anarazel.de 1667 [ + + ]: 57 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1668 : : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1669 : : {
1670 : : size_t size;
1671 : : dsa_pointer_atomic *buckets;
1672 : :
1673 : : /* Double the size of the bucket array. */
1674 : 54 : pstate->nbuckets *= 2;
1675 : 54 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1676 : 54 : hashtable->batches[0].shared->size += size / 2;
1677 : 54 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1678 : 108 : hashtable->batches[0].shared->buckets =
1679 : 54 : dsa_allocate(hashtable->area, size);
1680 : : buckets = (dsa_pointer_atomic *)
1681 : 54 : dsa_get_address(hashtable->area,
1682 : 54 : hashtable->batches[0].shared->buckets);
1683 [ + + ]: 466998 : for (i = 0; i < pstate->nbuckets; ++i)
1684 : 466944 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1685 : :
1686 : : /* Put the chunk list onto the work queue. */
1687 : 54 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1688 : :
1689 : : /* Clear the flag. */
1690 : 54 : pstate->growth = PHJ_GROWTH_OK;
1691 : : }
1692 : : /* Fall through. */
1693 : :
1694 : : case PHJ_GROW_BUCKETS_REALLOCATE:
1695 : : /* Wait for the above to complete. */
1696 : 57 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1697 : : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1698 : : /* Fall through. */
1699 : :
949 tmunro@postgresql.or 1700 : 57 : case PHJ_GROW_BUCKETS_REINSERT:
1701 : : /* Reinsert all tuples into the hash table. */
2868 andres@anarazel.de 1702 : 57 : ExecParallelHashEnsureBatchAccessors(hashtable);
1703 : 57 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1704 [ + + ]: 462 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1705 : : {
1706 : 348 : size_t idx = 0;
1707 : :
1708 [ + + ]: 283741 : while (idx < chunk->used)
1709 : : {
2855 tgl@sss.pgh.pa.us 1710 : 283393 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
2868 andres@anarazel.de 1711 : 283393 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1712 : : int bucketno;
1713 : : int batchno;
1714 : :
1715 : 283393 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1716 : : &bucketno, &batchno);
1717 [ - + ]: 283393 : Assert(batchno == 0);
1718 : :
1719 : : /* add the tuple to the proper bucket */
1720 : 283393 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1721 : : hashTuple, shared);
1722 : :
1723 : : /* advance index past the tuple */
1724 : 283393 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1725 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1726 : : }
1727 : :
1728 : : /* allow this loop to be cancellable */
1729 [ - + ]: 348 : CHECK_FOR_INTERRUPTS();
1730 : : }
1731 : 57 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1732 : : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1733 : : }
1734 : 57 : }
1735 : :
1736 : : /*
1737 : : * ExecHashTableInsert
1738 : : * insert a tuple into the hash table depending on the hash value
1739 : : * it may just go to a temp file for later batches
1740 : : *
1741 : : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1742 : : * tuple; the minimal case in particular is certain to happen while reloading
1743 : : * tuples from batch files. We could save some cycles in the regular-tuple
1744 : : * case by not forcing the slot contents into minimal form; not clear if it's
1745 : : * worth the messiness required.
1746 : : */
1747 : : void
10702 scrappy@hub.org 1748 : 6177194 : ExecHashTableInsert(HashJoinTable hashtable,
1749 : : TupleTableSlot *slot,
1750 : : uint32 hashvalue)
1751 : : {
1752 : : bool shouldFree;
2538 andres@anarazel.de 1753 : 6177194 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1754 : : int bucketno;
1755 : : int batchno;
1756 : :
7540 tgl@sss.pgh.pa.us 1757 : 6177194 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1758 : : &bucketno, &batchno);
1759 : :
1760 : : /*
1761 : : * decide whether to put the tuple in the hash table or a temp file
1762 : : */
1763 [ + + ]: 6177194 : if (batchno == hashtable->curbatch)
1764 : : {
1765 : : /*
1766 : : * put the tuple in hash table
1767 : : */
1768 : : HashJoinTuple hashTuple;
1769 : : int hashTupleSize;
4032 kgrittn@postgresql.o 1770 : 4613684 : double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1771 : :
1772 : : /* Create the HashJoinTuple */
7062 tgl@sss.pgh.pa.us 1773 : 4613684 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
4065 heikki.linnakangas@i 1774 : 4613684 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1775 : :
7540 tgl@sss.pgh.pa.us 1776 : 4613684 : hashTuple->hashvalue = hashvalue;
7062 1777 : 4613684 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1778 : :
1779 : : /*
1780 : : * We always reset the tuple-matched flag on insertion. This is okay
1781 : : * even when reloading a tuple from a batch file, since the tuple
1782 : : * could not possibly have been matched to an outer tuple before it
1783 : : * went into the batch file.
1784 : : */
5415 1785 : 4613684 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1786 : :
1787 : : /* Push it onto the front of the bucket's list */
2868 andres@anarazel.de 1788 : 4613684 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1789 : 4613684 : hashtable->buckets.unshared[bucketno] = hashTuple;
1790 : :
1791 : : /*
1792 : : * Increase the (optimal) number of buckets if we just exceeded the
1793 : : * NTUP_PER_BUCKET threshold, but only when there's still a single
1794 : : * batch.
1795 : : */
3676 tgl@sss.pgh.pa.us 1796 [ + + ]: 4613684 : if (hashtable->nbatch == 1 &&
1797 [ + + ]: 2746241 : ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1798 : : {
1799 : : /* Guard against integer overflow and alloc size overflow */
1800 [ + - ]: 171 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1801 [ + - ]: 171 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1802 : : {
1803 : 171 : hashtable->nbuckets_optimal *= 2;
1804 : 171 : hashtable->log2_nbuckets_optimal += 1;
1805 : : }
1806 : : }
1807 : :
1808 : : /* Account for space used, and back off if we've used too much */
7540 1809 : 4613684 : hashtable->spaceUsed += hashTupleSize;
5747 rhaas@postgresql.org 1810 [ + + ]: 4613684 : if (hashtable->spaceUsed > hashtable->spacePeak)
1811 : 3318812 : hashtable->spacePeak = hashtable->spaceUsed;
4032 kgrittn@postgresql.o 1812 : 4613684 : if (hashtable->spaceUsed +
1813 : 4613684 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
4063 rhaas@postgresql.org 1814 [ + + ]: 4613684 : > hashtable->spaceAllowed)
7540 tgl@sss.pgh.pa.us 1815 : 414579 : ExecHashIncreaseNumBatches(hashtable);
1816 : : }
1817 : : else
1818 : : {
1819 : : /*
1820 : : * put the tuple into a temp file for later batches
1821 : : */
1822 [ - + ]: 1563510 : Assert(batchno > hashtable->curbatch);
6717 1823 : 1563510 : ExecHashJoinSaveTuple(tuple,
1824 : : hashvalue,
892 tomas.vondra@postgre 1825 : 1563510 : &hashtable->innerBatchFile[batchno],
1826 : : hashtable);
1827 : : }
1828 : :
2538 andres@anarazel.de 1829 [ + + ]: 6177194 : if (shouldFree)
1830 : 4404826 : heap_free_minimal_tuple(tuple);
10702 scrappy@hub.org 1831 : 6177194 : }
1832 : :
1833 : : /*
1834 : : * ExecParallelHashTableInsert
1835 : : * insert a tuple into a shared hash table or shared batch tuplestore
1836 : : */
1837 : : void
2868 andres@anarazel.de 1838 : 1080096 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1839 : : TupleTableSlot *slot,
1840 : : uint32 hashvalue)
1841 : : {
1842 : : bool shouldFree;
2538 1843 : 1080096 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1844 : : dsa_pointer shared;
1845 : : int bucketno;
1846 : : int batchno;
1847 : :
2868 1848 : 174 : retry:
1849 : 1080270 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1850 : :
1851 [ + + ]: 1080270 : if (batchno == 0)
1852 : : {
1853 : : HashJoinTuple hashTuple;
1854 : :
1855 : : /* Try to load it into memory. */
1856 [ - + ]: 647487 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1857 : : PHJ_BUILD_HASH_INNER);
1858 : 647487 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1859 : 647487 : HJTUPLE_OVERHEAD + tuple->t_len,
1860 : : &shared);
1861 [ + + ]: 647487 : if (hashTuple == NULL)
1862 : 158 : goto retry;
1863 : :
1864 : : /* Store the hash value in the HashJoinTuple header. */
1865 : 647329 : hashTuple->hashvalue = hashvalue;
1866 : 647329 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
927 tmunro@postgresql.or 1867 : 647329 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1868 : :
1869 : : /* Push it onto the front of the bucket's list */
2868 andres@anarazel.de 1870 : 647329 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1871 : : hashTuple, shared);
1872 : : }
1873 : : else
1874 : : {
1875 : 432783 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1876 : :
1877 [ - + ]: 432783 : Assert(batchno > 0);
1878 : :
1879 : : /* Try to preallocate space in the batch if necessary. */
1880 [ + + ]: 432783 : if (hashtable->batches[batchno].preallocated < tuple_size)
1881 : : {
1882 [ + + ]: 784 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1883 : 16 : goto retry;
1884 : : }
1885 : :
1886 [ - + ]: 432767 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1887 : 432767 : hashtable->batches[batchno].preallocated -= tuple_size;
1888 : 432767 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1889 : : tuple);
1890 : : }
1891 : 1080096 : ++hashtable->batches[batchno].ntuples;
1892 : :
2538 1893 [ + - ]: 1080096 : if (shouldFree)
1894 : 1080096 : heap_free_minimal_tuple(tuple);
2868 1895 : 1080096 : }
1896 : :
1897 : : /*
1898 : : * Insert a tuple into the current hash table. Unlike
1899 : : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1900 : : * to other batches or to run out of memory, and should only be called with
1901 : : * tuples that belong in the current batch once growth has been disabled.
1902 : : */
1903 : : void
1904 : 521979 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1905 : : TupleTableSlot *slot,
1906 : : uint32 hashvalue)
1907 : : {
1908 : : bool shouldFree;
2538 1909 : 521979 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1910 : : HashJoinTuple hashTuple;
1911 : : dsa_pointer shared;
1912 : : int batchno;
1913 : : int bucketno;
1914 : :
2868 1915 : 521979 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1916 [ - + ]: 521979 : Assert(batchno == hashtable->curbatch);
1917 : 521979 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1918 : 521979 : HJTUPLE_OVERHEAD + tuple->t_len,
1919 : : &shared);
1920 : 521979 : hashTuple->hashvalue = hashvalue;
1921 : 521979 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1922 : 521979 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1923 : 521979 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1924 : : hashTuple, shared);
1925 : :
2538 1926 [ - + ]: 521979 : if (shouldFree)
2538 andres@anarazel.de 1927 :UBC 0 : heap_free_minimal_tuple(tuple);
2868 andres@anarazel.de 1928 :CBC 521979 : }
1929 : :
1930 : :
1931 : : /*
1932 : : * ExecHashGetBucketAndBatch
1933 : : * Determine the bucket number and batch number for a hash value
1934 : : *
1935 : : * Note: on-the-fly increases of nbatch must not change the bucket number
1936 : : * for a given hash code (since we don't move tuples to different hash
1937 : : * chains), and must only cause the batch number to remain the same or
1938 : : * increase. Our algorithm is
1939 : : * bucketno = hashvalue MOD nbuckets
1940 : : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1941 : : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1942 : : * do the computations by shifting and masking. (This assumes that all hash
1943 : : * functions are good about randomizing all their output bits, else we are
1944 : : * likely to have very skewed bucket or batch occupancy.)
1945 : : *
1946 : : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1947 : : * bucket count growth. Once we start batching, the value is fixed and does
1948 : : * not change over the course of the join (making it possible to compute batch
1949 : : * number the way we do here).
1950 : : *
1951 : : * nbatch is always a power of 2; we increase it only by doubling it. This
1952 : : * effectively adds one more bit to the top of the batchno. In very large
1953 : : * joins, we might run out of bits to add, so we do this by rotating the hash
1954 : : * value. This causes batchno to steal bits from bucketno when the number of
1955 : : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1956 : : * than to lose the ability to divide batches.
1957 : : */
1958 : : void
7540 tgl@sss.pgh.pa.us 1959 : 20293568 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1960 : : uint32 hashvalue,
1961 : : int *bucketno,
1962 : : int *batchno)
1963 : : {
7317 bruce@momjian.us 1964 : 20293568 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1965 : 20293568 : uint32 nbatch = (uint32) hashtable->nbatch;
1966 : :
7540 tgl@sss.pgh.pa.us 1967 [ + + ]: 20293568 : if (nbatch > 1)
1968 : : {
6723 1969 : 7786621 : *bucketno = hashvalue & (nbuckets - 1);
2134 tmunro@postgresql.or 1970 : 7786621 : *batchno = pg_rotate_right32(hashvalue,
1971 : 7786621 : hashtable->log2_nbuckets) & (nbatch - 1);
1972 : : }
1973 : : else
1974 : : {
6723 tgl@sss.pgh.pa.us 1975 : 12506947 : *bucketno = hashvalue & (nbuckets - 1);
7540 1976 : 12506947 : *batchno = 0;
1977 : : }
8337 1978 : 20293568 : }
1979 : :
1980 : : /*
1981 : : * ExecScanHashBucket
1982 : : * scan a hash bucket for matches to the current outer tuple
1983 : : *
1984 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
1985 : : *
1986 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1987 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1988 : : * for the latter.
1989 : : */
1990 : : bool
10276 bruce@momjian.us 1991 : 11475485 : ExecScanHashBucket(HashJoinState *hjstate,
1992 : : ExprContext *econtext)
1993 : : {
3149 andres@anarazel.de 1994 : 11475485 : ExprState *hjclauses = hjstate->hashclauses;
9652 bruce@momjian.us 1995 : 11475485 : HashJoinTable hashtable = hjstate->hj_HashTable;
1996 : 11475485 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
7540 tgl@sss.pgh.pa.us 1997 : 11475485 : uint32 hashvalue = hjstate->hj_CurHashValue;
1998 : :
1999 : : /*
2000 : : * hj_CurTuple is the address of the tuple last returned from the current
2001 : : * bucket, or NULL if it's time to start scanning a new bucket.
2002 : : *
2003 : : * If the tuple hashed to a skew bucket then scan the skew bucket
2004 : : * otherwise scan the standard hashtable bucket.
2005 : : */
6064 2006 [ + + ]: 11475485 : if (hashTuple != NULL)
2868 andres@anarazel.de 2007 : 2552470 : hashTuple = hashTuple->next.unshared;
6064 tgl@sss.pgh.pa.us 2008 [ + + ]: 8923015 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
2009 : 1200 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
2010 : : else
2868 andres@anarazel.de 2011 : 8921815 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2012 : :
2013 [ + + ]: 13717031 : while (hashTuple != NULL)
2014 : : {
2015 [ + + ]: 7565257 : if (hashTuple->hashvalue == hashvalue)
2016 : : {
2017 : : TupleTableSlot *inntuple;
2018 : :
2019 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2020 : 5323717 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2021 : : hjstate->hj_HashTupleSlot,
2022 : : false); /* do not pfree */
2023 : 5323717 : econtext->ecxt_innertuple = inntuple;
2024 : :
2828 2025 [ + + ]: 5323717 : if (ExecQualAndReset(hjclauses, econtext))
2026 : : {
2868 2027 : 5323711 : hjstate->hj_CurTuple = hashTuple;
2028 : 5323711 : return true;
2029 : : }
2030 : : }
2031 : :
2032 : 2241546 : hashTuple = hashTuple->next.unshared;
2033 : : }
2034 : :
2035 : : /*
2036 : : * no match
2037 : : */
2038 : 6151774 : return false;
2039 : : }
2040 : :
2041 : : /*
2042 : : * ExecParallelScanHashBucket
2043 : : * scan a hash bucket for matches to the current outer tuple
2044 : : *
2045 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2046 : : *
2047 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2048 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2049 : : * for the latter.
2050 : : */
2051 : : bool
2052 : 2103054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2053 : : ExprContext *econtext)
2054 : : {
2055 : 2103054 : ExprState *hjclauses = hjstate->hashclauses;
2056 : 2103054 : HashJoinTable hashtable = hjstate->hj_HashTable;
2057 : 2103054 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2058 : 2103054 : uint32 hashvalue = hjstate->hj_CurHashValue;
2059 : :
2060 : : /*
2061 : : * hj_CurTuple is the address of the tuple last returned from the current
2062 : : * bucket, or NULL if it's time to start scanning a new bucket.
2063 : : */
2064 [ + + ]: 2103054 : if (hashTuple != NULL)
2065 : 1020039 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2066 : : else
2067 : 1083015 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2068 : : hjstate->hj_CurBucketNo);
2069 : :
9659 tgl@sss.pgh.pa.us 2070 [ + + ]: 2797125 : while (hashTuple != NULL)
2071 : : {
7540 2072 [ + + ]: 1714110 : if (hashTuple->hashvalue == hashvalue)
2073 : : {
2074 : : TupleTableSlot *inntuple;
2075 : :
2076 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
7062 2077 : 1020039 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2078 : : hjstate->hj_HashTupleSlot,
2079 : : false); /* do not pfree */
7540 2080 : 1020039 : econtext->ecxt_innertuple = inntuple;
2081 : :
2828 andres@anarazel.de 2082 [ + - ]: 1020039 : if (ExecQualAndReset(hjclauses, econtext))
2083 : : {
7540 tgl@sss.pgh.pa.us 2084 : 1020039 : hjstate->hj_CurTuple = hashTuple;
5415 2085 : 1020039 : return true;
2086 : : }
2087 : : }
2088 : :
2868 andres@anarazel.de 2089 : 694071 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2090 : : }
2091 : :
2092 : : /*
2093 : : * no match
2094 : : */
5415 tgl@sss.pgh.pa.us 2095 : 1083015 : return false;
2096 : : }
2097 : :
2098 : : /*
2099 : : * ExecPrepHashTableForUnmatched
2100 : : * set up for a series of ExecScanHashTableForUnmatched calls
2101 : : */
2102 : : void
2103 : 2046 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2104 : : {
2105 : : /*----------
2106 : : * During this scan we use the HashJoinState fields as follows:
2107 : : *
2108 : : * hj_CurBucketNo: next regular bucket to scan
2109 : : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2110 : : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2111 : : *----------
2112 : : */
2113 : 2046 : hjstate->hj_CurBucketNo = 0;
2114 : 2046 : hjstate->hj_CurSkewBucketNo = 0;
2115 : 2046 : hjstate->hj_CurTuple = NULL;
2116 : 2046 : }
2117 : :
2118 : : /*
2119 : : * Decide if this process is allowed to run the unmatched scan. If so, the
2120 : : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2121 : : * Otherwise the batch is detached and false is returned.
2122 : : */
2123 : : bool
941 tmunro@postgresql.or 2124 : 52 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2125 : : {
2126 : 52 : HashJoinTable hashtable = hjstate->hj_HashTable;
2127 : 52 : int curbatch = hashtable->curbatch;
2128 : 52 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2129 : :
2130 [ - + ]: 52 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2131 : :
2132 : : /*
2133 : : * It would not be deadlock-free to wait on the batch barrier, because it
2134 : : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2135 : : * already emitted tuples. Therefore, we'll hold a wait-free election:
2136 : : * only one process can continue to the next phase, and all others detach
2137 : : * from this batch. They can still go any work on other batches, if there
2138 : : * are any.
2139 : : */
2140 [ + + ]: 52 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2141 : : {
2142 : : /* This process considers the batch to be done. */
2143 : 19 : hashtable->batches[hashtable->curbatch].done = true;
2144 : :
2145 : : /* Make sure any temporary files are closed. */
2146 : 19 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2147 : 19 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2148 : :
2149 : : /*
2150 : : * Track largest batch we've seen, which would normally happen in
2151 : : * ExecHashTableDetachBatch().
2152 : : */
2153 : 19 : hashtable->spacePeak =
2154 : 19 : Max(hashtable->spacePeak,
2155 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2156 : 19 : hashtable->curbatch = -1;
2157 : 19 : return false;
2158 : : }
2159 : :
2160 : : /* Now we are alone with this batch. */
2161 [ - + ]: 33 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2162 : :
2163 : : /*
2164 : : * Has another process decided to give up early and command all processes
2165 : : * to skip the unmatched scan?
2166 : : */
2167 [ - + ]: 33 : if (batch->skip_unmatched)
2168 : : {
941 tmunro@postgresql.or 2169 :UBC 0 : hashtable->batches[hashtable->curbatch].done = true;
2170 : 0 : ExecHashTableDetachBatch(hashtable);
2171 : 0 : return false;
2172 : : }
2173 : :
2174 : : /* Now prepare the process local state, just as for non-parallel join. */
941 tmunro@postgresql.or 2175 :CBC 33 : ExecPrepHashTableForUnmatched(hjstate);
2176 : :
2177 : 33 : return true;
2178 : : }
2179 : :
2180 : : /*
2181 : : * ExecScanHashTableForUnmatched
2182 : : * scan the hash table for unmatched inner tuples
2183 : : *
2184 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2185 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2186 : : * for the latter.
2187 : : */
2188 : : bool
5415 tgl@sss.pgh.pa.us 2189 : 218298 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2190 : : {
2191 : 218298 : HashJoinTable hashtable = hjstate->hj_HashTable;
2192 : 218298 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2193 : :
2194 : : for (;;)
2195 : : {
2196 : : /*
2197 : : * hj_CurTuple is the address of the tuple last returned from the
2198 : : * current bucket, or NULL if it's time to start scanning a new
2199 : : * bucket.
2200 : : */
2201 [ + + ]: 2850220 : if (hashTuple != NULL)
2868 andres@anarazel.de 2202 : 216285 : hashTuple = hashTuple->next.unshared;
5415 tgl@sss.pgh.pa.us 2203 [ + + ]: 2633935 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2204 : : {
2868 andres@anarazel.de 2205 : 2631928 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
5415 tgl@sss.pgh.pa.us 2206 : 2631928 : hjstate->hj_CurBucketNo++;
2207 : : }
2208 [ - + ]: 2007 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2209 : : {
5314 bruce@momjian.us 2210 :UBC 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2211 : :
5415 tgl@sss.pgh.pa.us 2212 : 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2213 : 0 : hjstate->hj_CurSkewBucketNo++;
2214 : : }
2215 : : else
5415 tgl@sss.pgh.pa.us 2216 :CBC 2007 : break; /* finished all buckets */
2217 : :
2218 [ + + ]: 3053028 : while (hashTuple != NULL)
2219 : : {
2220 [ + + ]: 421106 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2221 : : {
2222 : : TupleTableSlot *inntuple;
2223 : :
2224 : : /* insert hashtable's tuple into exec slot */
2225 : 216291 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2226 : : hjstate->hj_HashTupleSlot,
2227 : : false); /* do not pfree */
2228 : 216291 : econtext->ecxt_innertuple = inntuple;
2229 : :
2230 : : /*
2231 : : * Reset temp memory each time; although this function doesn't
2232 : : * do any qual eval, the caller will, so let's keep it
2233 : : * parallel to ExecScanHashBucket.
2234 : : */
2235 : 216291 : ResetExprContext(econtext);
2236 : :
2237 : 216291 : hjstate->hj_CurTuple = hashTuple;
2238 : 216291 : return true;
2239 : : }
2240 : :
2868 andres@anarazel.de 2241 : 204815 : hashTuple = hashTuple->next.unshared;
2242 : : }
2243 : :
2244 : : /* allow this loop to be cancellable */
3016 2245 [ - + ]: 2631922 : CHECK_FOR_INTERRUPTS();
2246 : : }
2247 : :
2248 : : /*
2249 : : * no more unmatched tuples
2250 : : */
5415 tgl@sss.pgh.pa.us 2251 : 2007 : return false;
2252 : : }
2253 : :
2254 : : /*
2255 : : * ExecParallelScanHashTableForUnmatched
2256 : : * scan the hash table for unmatched inner tuples, in parallel join
2257 : : *
2258 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2259 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2260 : : * for the latter.
2261 : : */
2262 : : bool
941 tmunro@postgresql.or 2263 : 60036 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2264 : : ExprContext *econtext)
2265 : : {
2266 : 60036 : HashJoinTable hashtable = hjstate->hj_HashTable;
2267 : 60036 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2268 : :
2269 : : for (;;)
2270 : : {
2271 : : /*
2272 : : * hj_CurTuple is the address of the tuple last returned from the
2273 : : * current bucket, or NULL if it's time to start scanning a new
2274 : : * bucket.
2275 : : */
2276 [ + + ]: 367236 : if (hashTuple != NULL)
2277 : 60003 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2278 [ + + ]: 307233 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2279 : 307200 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2280 : 307200 : hjstate->hj_CurBucketNo++);
2281 : : else
2282 : 33 : break; /* finished all buckets */
2283 : :
2284 [ + + ]: 487203 : while (hashTuple != NULL)
2285 : : {
2286 [ + + ]: 180003 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2287 : : {
2288 : : TupleTableSlot *inntuple;
2289 : :
2290 : : /* insert hashtable's tuple into exec slot */
2291 : 60003 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2292 : : hjstate->hj_HashTupleSlot,
2293 : : false); /* do not pfree */
2294 : 60003 : econtext->ecxt_innertuple = inntuple;
2295 : :
2296 : : /*
2297 : : * Reset temp memory each time; although this function doesn't
2298 : : * do any qual eval, the caller will, so let's keep it
2299 : : * parallel to ExecScanHashBucket.
2300 : : */
2301 : 60003 : ResetExprContext(econtext);
2302 : :
2303 : 60003 : hjstate->hj_CurTuple = hashTuple;
2304 : 60003 : return true;
2305 : : }
2306 : :
2307 : 120000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2308 : : }
2309 : :
2310 : : /* allow this loop to be cancellable */
2311 [ - + ]: 307200 : CHECK_FOR_INTERRUPTS();
2312 : : }
2313 : :
2314 : : /*
2315 : : * no more unmatched tuples
2316 : : */
2317 : 33 : return false;
2318 : : }
2319 : :
2320 : : /*
2321 : : * ExecHashTableReset
2322 : : *
2323 : : * reset hash table header for new batch
2324 : : */
2325 : : void
7540 tgl@sss.pgh.pa.us 2326 : 477 : ExecHashTableReset(HashJoinTable hashtable)
2327 : : {
2328 : : MemoryContext oldcxt;
9659 2329 : 477 : int nbuckets = hashtable->nbuckets;
2330 : :
2331 : : /*
2332 : : * Release all the hash buckets and tuples acquired in the prior pass, and
2333 : : * reinitialize the context for a new pass.
2334 : : */
9252 2335 : 477 : MemoryContextReset(hashtable->batchCxt);
9659 2336 : 477 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2337 : :
2338 : : /* Reallocate and reinitialize the hash bucket headers. */
1141 peter@eisentraut.org 2339 : 477 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2340 : :
7540 tgl@sss.pgh.pa.us 2341 : 477 : hashtable->spaceUsed = 0;
2342 : :
9659 2343 : 477 : MemoryContextSwitchTo(oldcxt);
2344 : :
2345 : : /* Forget the chunks (the memory was freed by the context reset above). */
4065 heikki.linnakangas@i 2346 : 477 : hashtable->chunks = NULL;
10702 scrappy@hub.org 2347 : 477 : }
2348 : :
2349 : : /*
2350 : : * ExecHashTableResetMatchFlags
2351 : : * Clear all the HeapTupleHeaderHasMatch flags in the table
2352 : : */
2353 : : void
5415 tgl@sss.pgh.pa.us 2354 : 35 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2355 : : {
2356 : : HashJoinTuple tuple;
2357 : : int i;
2358 : :
2359 : : /* Reset all flags in the main table ... */
2360 [ + + ]: 35875 : for (i = 0; i < hashtable->nbuckets; i++)
2361 : : {
2868 andres@anarazel.de 2362 [ + + ]: 36007 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2363 : 167 : tuple = tuple->next.unshared)
5415 tgl@sss.pgh.pa.us 2364 : 167 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2365 : : }
2366 : :
2367 : : /* ... and the same for the skew buckets, if any */
2368 [ - + ]: 35 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2369 : : {
5314 bruce@momjian.us 2370 :UBC 0 : int j = hashtable->skewBucketNums[i];
5415 tgl@sss.pgh.pa.us 2371 : 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2372 : :
2868 andres@anarazel.de 2373 [ # # ]: 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
5415 tgl@sss.pgh.pa.us 2374 : 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2375 : : }
5415 tgl@sss.pgh.pa.us 2376 :CBC 35 : }
2377 : :
2378 : :
2379 : : void
5586 2380 : 915 : ExecReScanHash(HashState *node)
2381 : : {
1208 2382 : 915 : PlanState *outerPlan = outerPlanState(node);
2383 : :
2384 : : /*
2385 : : * if chgParam of subnode is not null then plan will be re-scanned by
2386 : : * first ExecProcNode.
2387 : : */
2388 [ + + ]: 915 : if (outerPlan->chgParam == NULL)
2389 : 15 : ExecReScan(outerPlan);
10118 vadim4o@yahoo.com 2390 : 915 : }
2391 : :
2392 : :
2393 : : /*
2394 : : * ExecHashBuildSkewHash
2395 : : *
2396 : : * Set up for skew optimization if we can identify the most common values
2397 : : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2398 : : * for the hash value of each MCV, up to the number of slots allowed
2399 : : * based on available memory.
2400 : : */
2401 : : static void
433 drowley@postgresql.o 2402 : 63 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
2403 : : Hash *node, int mcvsToUse)
2404 : : {
2405 : : HeapTupleData *statsTuple;
2406 : : AttStatsSlot sslot;
2407 : :
2408 : : /* Do nothing if planner didn't identify the outer relation's join key */
6064 tgl@sss.pgh.pa.us 2409 [ - + ]: 63 : if (!OidIsValid(node->skewTable))
2410 : 36 : return;
2411 : : /* Also, do nothing if we don't have room for at least one skew bucket */
2412 [ - + ]: 63 : if (mcvsToUse <= 0)
6064 tgl@sss.pgh.pa.us 2413 :UBC 0 : return;
2414 : :
2415 : : /*
2416 : : * Try to find the MCV statistics for the outer relation's join key.
2417 : : */
5734 rhaas@postgresql.org 2418 :CBC 63 : statsTuple = SearchSysCache3(STATRELATTINH,
2419 : : ObjectIdGetDatum(node->skewTable),
2420 : 63 : Int16GetDatum(node->skewColumn),
2421 : 63 : BoolGetDatum(node->skewInherit));
6064 tgl@sss.pgh.pa.us 2422 [ + + ]: 63 : if (!HeapTupleIsValid(statsTuple))
2423 : 36 : return;
2424 : :
3089 2425 [ + + ]: 27 : if (get_attstatsslot(&sslot, statsTuple,
2426 : : STATISTIC_KIND_MCV, InvalidOid,
2427 : : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2428 : : {
2429 : : double frac;
2430 : : int nbuckets;
2431 : : int i;
2432 : :
2433 [ - + ]: 3 : if (mcvsToUse > sslot.nvalues)
3089 tgl@sss.pgh.pa.us 2434 :UBC 0 : mcvsToUse = sslot.nvalues;
2435 : :
2436 : : /*
2437 : : * Calculate the expected fraction of outer relation that will
2438 : : * participate in the skew optimization. If this isn't at least
2439 : : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2440 : : */
6064 tgl@sss.pgh.pa.us 2441 :CBC 3 : frac = 0;
2442 [ + + ]: 66 : for (i = 0; i < mcvsToUse; i++)
3089 2443 : 63 : frac += sslot.numbers[i];
6064 2444 [ - + ]: 3 : if (frac < SKEW_MIN_OUTER_FRACTION)
2445 : : {
3089 tgl@sss.pgh.pa.us 2446 :UBC 0 : free_attstatsslot(&sslot);
6064 2447 : 0 : ReleaseSysCache(statsTuple);
2448 : 0 : return;
2449 : : }
2450 : :
2451 : : /*
2452 : : * Okay, set up the skew hashtable.
2453 : : *
2454 : : * skewBucket[] is an open addressing hashtable with a power of 2 size
2455 : : * that is greater than the number of MCV values. (This ensures there
2456 : : * will be at least one null entry, so searches will always
2457 : : * terminate.)
2458 : : *
2459 : : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2460 : : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2461 : : * since we limit pg_statistic entries to much less than that.
2462 : : */
2028 drowley@postgresql.o 2463 :CBC 3 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2464 : : /* use two more bits just to help avoid collisions */
6064 tgl@sss.pgh.pa.us 2465 : 3 : nbuckets <<= 2;
2466 : :
2467 : 3 : hashtable->skewEnabled = true;
2468 : 3 : hashtable->skewBucketLen = nbuckets;
2469 : :
2470 : : /*
2471 : : * We allocate the bucket memory in the hashtable's batch context. It
2472 : : * is only needed during the first batch, and this ensures it will be
2473 : : * automatically removed once the first batch is done.
2474 : : */
2475 : 3 : hashtable->skewBucket = (HashSkewBucket **)
2476 : 3 : MemoryContextAllocZero(hashtable->batchCxt,
2477 : : nbuckets * sizeof(HashSkewBucket *));
2478 : 3 : hashtable->skewBucketNums = (int *)
2479 : 3 : MemoryContextAllocZero(hashtable->batchCxt,
2480 : : mcvsToUse * sizeof(int));
2481 : :
2482 : 3 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2483 : 3 : + mcvsToUse * sizeof(int);
2484 : 3 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2485 : 3 : + mcvsToUse * sizeof(int);
5747 rhaas@postgresql.org 2486 [ + - ]: 3 : if (hashtable->spaceUsed > hashtable->spacePeak)
2487 : 3 : hashtable->spacePeak = hashtable->spaceUsed;
2488 : :
2489 : : /*
2490 : : * Create a skew bucket for each MCV hash value.
2491 : : *
2492 : : * Note: it is very important that we create the buckets in order of
2493 : : * decreasing MCV frequency. If we have to remove some buckets, they
2494 : : * must be removed in reverse order of creation (see notes in
2495 : : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2496 : : * be removed first.
2497 : : */
2498 : :
6064 tgl@sss.pgh.pa.us 2499 [ + + ]: 66 : for (i = 0; i < mcvsToUse; i++)
2500 : : {
2501 : : uint32 hashvalue;
2502 : : int bucket;
2503 : :
433 drowley@postgresql.o 2504 : 63 : hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
2505 : : hashstate->skew_collation,
2411 peter@eisentraut.org 2506 : 63 : sslot.values[i]));
2507 : :
2508 : : /*
2509 : : * While we have not hit a hole in the hashtable and have not hit
2510 : : * the desired bucket, we have collided with some previous hash
2511 : : * value, so try the next bucket location. NB: this code must
2512 : : * match ExecHashGetSkewBucket.
2513 : : */
6064 tgl@sss.pgh.pa.us 2514 : 63 : bucket = hashvalue & (nbuckets - 1);
2515 [ - + ]: 63 : while (hashtable->skewBucket[bucket] != NULL &&
6064 tgl@sss.pgh.pa.us 2516 [ # # ]:UBC 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2517 : 0 : bucket = (bucket + 1) & (nbuckets - 1);
2518 : :
2519 : : /*
2520 : : * If we found an existing bucket with the same hashvalue, leave
2521 : : * it alone. It's okay for two MCVs to share a hashvalue.
2522 : : */
6064 tgl@sss.pgh.pa.us 2523 [ - + ]:CBC 63 : if (hashtable->skewBucket[bucket] != NULL)
6064 tgl@sss.pgh.pa.us 2524 :UBC 0 : continue;
2525 : :
2526 : : /* Okay, create a new skew bucket for this hashvalue. */
6064 tgl@sss.pgh.pa.us 2527 :CBC 126 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2528 : 63 : MemoryContextAlloc(hashtable->batchCxt,
2529 : : sizeof(HashSkewBucket));
2530 : 63 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2531 : 63 : hashtable->skewBucket[bucket]->tuples = NULL;
2532 : 63 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2533 : 63 : hashtable->nSkewBuckets++;
2534 : 63 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2535 : 63 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
5747 rhaas@postgresql.org 2536 [ + - ]: 63 : if (hashtable->spaceUsed > hashtable->spacePeak)
2537 : 63 : hashtable->spacePeak = hashtable->spaceUsed;
2538 : : }
2539 : :
3089 tgl@sss.pgh.pa.us 2540 : 3 : free_attstatsslot(&sslot);
2541 : : }
2542 : :
6064 2543 : 27 : ReleaseSysCache(statsTuple);
2544 : : }
2545 : :
2546 : : /*
2547 : : * ExecHashGetSkewBucket
2548 : : *
2549 : : * Returns the index of the skew bucket for this hashvalue,
2550 : : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2551 : : * associated with any active skew bucket.
2552 : : */
2553 : : int
2554 : 15147699 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2555 : : {
2556 : : int bucket;
2557 : :
2558 : : /*
2559 : : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2560 : : * particular, this happens after the initial batch is done).
2561 : : */
2562 [ + + ]: 15147699 : if (!hashtable->skewEnabled)
2563 : 15087699 : return INVALID_SKEW_BUCKET_NO;
2564 : :
2565 : : /*
2566 : : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2567 : : */
2568 : 60000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2569 : :
2570 : : /*
2571 : : * While we have not hit a hole in the hashtable and have not hit the
2572 : : * desired bucket, we have collided with some other hash value, so try the
2573 : : * next bucket location.
2574 : : */
2575 [ + + ]: 63915 : while (hashtable->skewBucket[bucket] != NULL &&
2576 [ + + ]: 5409 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2577 : 3915 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2578 : :
2579 : : /*
2580 : : * Found the desired bucket?
2581 : : */
2582 [ + + ]: 60000 : if (hashtable->skewBucket[bucket] != NULL)
2583 : 1494 : return bucket;
2584 : :
2585 : : /*
2586 : : * There must not be any hashtable entry for this hash value.
2587 : : */
2588 : 58506 : return INVALID_SKEW_BUCKET_NO;
2589 : : }
2590 : :
2591 : : /*
2592 : : * ExecHashSkewTableInsert
2593 : : *
2594 : : * Insert a tuple into the skew hashtable.
2595 : : *
2596 : : * This should generally match up with the current-batch case in
2597 : : * ExecHashTableInsert.
2598 : : */
2599 : : static void
2600 : 294 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2601 : : TupleTableSlot *slot,
2602 : : uint32 hashvalue,
2603 : : int bucketNumber)
2604 : : {
2605 : : bool shouldFree;
2538 andres@anarazel.de 2606 : 294 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2607 : : HashJoinTuple hashTuple;
2608 : : int hashTupleSize;
2609 : :
2610 : : /* Create the HashJoinTuple */
6064 tgl@sss.pgh.pa.us 2611 : 294 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2612 : 294 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2613 : : hashTupleSize);
2614 : 294 : hashTuple->hashvalue = hashvalue;
2615 : 294 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
5415 2616 : 294 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2617 : :
2618 : : /* Push it onto the front of the skew bucket's list */
2868 andres@anarazel.de 2619 : 294 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
6064 tgl@sss.pgh.pa.us 2620 : 294 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2868 andres@anarazel.de 2621 [ - + ]: 294 : Assert(hashTuple != hashTuple->next.unshared);
2622 : :
2623 : : /* Account for space used, and back off if we've used too much */
6064 tgl@sss.pgh.pa.us 2624 : 294 : hashtable->spaceUsed += hashTupleSize;
2625 : 294 : hashtable->spaceUsedSkew += hashTupleSize;
5747 rhaas@postgresql.org 2626 [ + + ]: 294 : if (hashtable->spaceUsed > hashtable->spacePeak)
2627 : 216 : hashtable->spacePeak = hashtable->spaceUsed;
6064 tgl@sss.pgh.pa.us 2628 [ + + ]: 345 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2629 : 51 : ExecHashRemoveNextSkewBucket(hashtable);
2630 : :
2631 : : /* Check we are not over the total spaceAllowed, either */
2632 [ - + ]: 294 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
6064 tgl@sss.pgh.pa.us 2633 :UBC 0 : ExecHashIncreaseNumBatches(hashtable);
2634 : :
2538 andres@anarazel.de 2635 [ + - ]:CBC 294 : if (shouldFree)
2636 : 294 : heap_free_minimal_tuple(tuple);
6064 tgl@sss.pgh.pa.us 2637 : 294 : }
2638 : :
2639 : : /*
2640 : : * ExecHashRemoveNextSkewBucket
2641 : : *
2642 : : * Remove the least valuable skew bucket by pushing its tuples into
2643 : : * the main hash table.
2644 : : */
2645 : : static void
2646 : 51 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2647 : : {
2648 : : int bucketToRemove;
2649 : : HashSkewBucket *bucket;
2650 : : uint32 hashvalue;
2651 : : int bucketno;
2652 : : int batchno;
2653 : : HashJoinTuple hashTuple;
2654 : :
2655 : : /* Locate the bucket to remove */
2656 : 51 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2657 : 51 : bucket = hashtable->skewBucket[bucketToRemove];
2658 : :
2659 : : /*
2660 : : * Calculate which bucket and batch the tuples belong to in the main
2661 : : * hashtable. They all have the same hash value, so it's the same for all
2662 : : * of them. Also note that it's not possible for nbatch to increase while
2663 : : * we are processing the tuples.
2664 : : */
2665 : 51 : hashvalue = bucket->hashvalue;
2666 : 51 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2667 : :
2668 : : /* Process all tuples in the bucket */
2669 : 51 : hashTuple = bucket->tuples;
2670 [ + + ]: 225 : while (hashTuple != NULL)
2671 : : {
2868 andres@anarazel.de 2672 : 174 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2673 : : MinimalTuple tuple;
2674 : : Size tupleSize;
2675 : :
2676 : : /*
2677 : : * This code must agree with ExecHashTableInsert. We do not use
2678 : : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2679 : : * TupleTableSlot while we already have HashJoinTuples.
2680 : : */
6064 tgl@sss.pgh.pa.us 2681 : 174 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2682 : 174 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2683 : :
2684 : : /* Decide whether to put the tuple in the hash table or a temp file */
2685 [ + + ]: 174 : if (batchno == hashtable->curbatch)
2686 : : {
2687 : : /* Move the tuple to the main hash table */
2688 : : HashJoinTuple copyTuple;
2689 : :
2690 : : /*
2691 : : * We must copy the tuple into the dense storage, else it will not
2692 : : * be found by, eg, ExecHashIncreaseNumBatches.
2693 : : */
3550 2694 : 69 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2695 : 69 : memcpy(copyTuple, hashTuple, tupleSize);
2696 : 69 : pfree(hashTuple);
2697 : :
2868 andres@anarazel.de 2698 : 69 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2699 : 69 : hashtable->buckets.unshared[bucketno] = copyTuple;
2700 : :
2701 : : /* We have reduced skew space, but overall space doesn't change */
6064 tgl@sss.pgh.pa.us 2702 : 69 : hashtable->spaceUsedSkew -= tupleSize;
2703 : : }
2704 : : else
2705 : : {
2706 : : /* Put the tuple into a temp file for later batches */
2707 [ - + ]: 105 : Assert(batchno > hashtable->curbatch);
2708 : 105 : ExecHashJoinSaveTuple(tuple, hashvalue,
892 tomas.vondra@postgre 2709 : 105 : &hashtable->innerBatchFile[batchno],
2710 : : hashtable);
6064 tgl@sss.pgh.pa.us 2711 : 105 : pfree(hashTuple);
2712 : 105 : hashtable->spaceUsed -= tupleSize;
2713 : 105 : hashtable->spaceUsedSkew -= tupleSize;
2714 : : }
2715 : :
2716 : 174 : hashTuple = nextHashTuple;
2717 : :
2718 : : /* allow this loop to be cancellable */
3176 2719 [ - + ]: 174 : CHECK_FOR_INTERRUPTS();
2720 : : }
2721 : :
2722 : : /*
2723 : : * Free the bucket struct itself and reset the hashtable entry to NULL.
2724 : : *
2725 : : * NOTE: this is not nearly as simple as it looks on the surface, because
2726 : : * of the possibility of collisions in the hashtable. Suppose that hash
2727 : : * values A and B collide at a particular hashtable entry, and that A was
2728 : : * entered first so B gets shifted to a different table entry. If we were
2729 : : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2730 : : * reporting that B is not in the hashtable, because it would hit the NULL
2731 : : * before finding B. However, we always remove entries in the reverse
2732 : : * order of creation, so this failure cannot happen.
2733 : : */
6064 2734 : 51 : hashtable->skewBucket[bucketToRemove] = NULL;
2735 : 51 : hashtable->nSkewBuckets--;
2736 : 51 : pfree(bucket);
2737 : 51 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2738 : 51 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2739 : :
2740 : : /*
2741 : : * If we have removed all skew buckets then give up on skew optimization.
2742 : : * Release the arrays since they aren't useful any more.
2743 : : */
2744 [ - + ]: 51 : if (hashtable->nSkewBuckets == 0)
2745 : : {
6064 tgl@sss.pgh.pa.us 2746 :UBC 0 : hashtable->skewEnabled = false;
2747 : 0 : pfree(hashtable->skewBucket);
2748 : 0 : pfree(hashtable->skewBucketNums);
2749 : 0 : hashtable->skewBucket = NULL;
2750 : 0 : hashtable->skewBucketNums = NULL;
2751 : 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2752 : 0 : hashtable->spaceUsedSkew = 0;
2753 : : }
6064 tgl@sss.pgh.pa.us 2754 :CBC 51 : }
2755 : :
2756 : : /*
2757 : : * Reserve space in the DSM segment for instrumentation data.
2758 : : */
2759 : : void
2883 andres@anarazel.de 2760 : 99 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2761 : : {
2762 : : size_t size;
2763 : :
2764 : : /* don't need this if not instrumenting or no workers */
2822 tgl@sss.pgh.pa.us 2765 [ + + - + ]: 99 : if (!node->ps.instrument || pcxt->nworkers == 0)
2766 : 57 : return;
2767 : :
2883 andres@anarazel.de 2768 : 42 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2769 : 42 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2770 : 42 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2771 : 42 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2772 : : }
2773 : :
2774 : : /*
2775 : : * Set up a space in the DSM for all workers to record instrumentation data
2776 : : * about their hash table.
2777 : : */
2778 : : void
2779 : 99 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2780 : : {
2781 : : size_t size;
2782 : :
2783 : : /* don't need this if not instrumenting or no workers */
2822 tgl@sss.pgh.pa.us 2784 [ + + - + ]: 99 : if (!node->ps.instrument || pcxt->nworkers == 0)
2785 : 57 : return;
2786 : :
2883 andres@anarazel.de 2787 : 42 : size = offsetof(SharedHashInfo, hinstrument) +
2788 : 42 : pcxt->nworkers * sizeof(HashInstrumentation);
2789 : 42 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2790 : :
2791 : : /* Each per-worker area must start out as zeroes. */
2792 : 42 : memset(node->shared_info, 0, size);
2793 : :
2794 : 42 : node->shared_info->num_workers = pcxt->nworkers;
2795 : 42 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2796 : 42 : node->shared_info);
2797 : : }
2798 : :
2799 : : /*
2800 : : * Locate the DSM space for hash table instrumentation data that we'll write
2801 : : * to at shutdown time.
2802 : : */
2803 : : void
2804 : 279 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2805 : : {
2806 : : SharedHashInfo *shared_info;
2807 : :
2808 : : /* don't need this if not instrumenting */
2822 tgl@sss.pgh.pa.us 2809 [ + + ]: 279 : if (!node->ps.instrument)
2810 : 153 : return;
2811 : :
2812 : : /*
2813 : : * Find our entry in the shared area, and set up a pointer to it so that
2814 : : * we'll accumulate stats there when shutting down or rebuilding the hash
2815 : : * table.
2816 : : */
2817 : : shared_info = (SharedHashInfo *)
2818 : 126 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2819 : 126 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2820 : : }
2821 : :
2822 : : /*
2823 : : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2824 : : * ExecHashInitializeWorker was called, or local storage if not. In the
2825 : : * parallel case, this must be done in ExecShutdownHash() rather than
2826 : : * ExecEndHash() because the latter runs after we've detached from the DSM
2827 : : * segment.
2828 : : */
2829 : : void
2883 andres@anarazel.de 2830 : 15708 : ExecShutdownHash(HashState *node)
2831 : : {
2832 : : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2025 tgl@sss.pgh.pa.us 2833 [ + + + + ]: 15708 : if (node->ps.instrument && !node->hinstrument)
1141 peter@eisentraut.org 2834 : 57 : node->hinstrument = palloc0_object(HashInstrumentation);
2835 : : /* Now accumulate data for the current (final) hash table */
2883 andres@anarazel.de 2836 [ + + + + ]: 15708 : if (node->hinstrument && node->hashtable)
2025 tgl@sss.pgh.pa.us 2837 : 168 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
2883 andres@anarazel.de 2838 : 15708 : }
2839 : :
2840 : : /*
2841 : : * Retrieve instrumentation data from workers before the DSM segment is
2842 : : * detached, so that EXPLAIN can access it.
2843 : : */
2844 : : void
2845 : 42 : ExecHashRetrieveInstrumentation(HashState *node)
2846 : : {
2847 : 42 : SharedHashInfo *shared_info = node->shared_info;
2848 : : size_t size;
2849 : :
2822 tgl@sss.pgh.pa.us 2850 [ - + ]: 42 : if (shared_info == NULL)
2822 tgl@sss.pgh.pa.us 2851 :UBC 0 : return;
2852 : :
2853 : : /* Replace node->shared_info with a copy in backend-local memory. */
2883 andres@anarazel.de 2854 :CBC 42 : size = offsetof(SharedHashInfo, hinstrument) +
2855 : 42 : shared_info->num_workers * sizeof(HashInstrumentation);
2856 : 42 : node->shared_info = palloc(size);
2857 : 42 : memcpy(node->shared_info, shared_info, size);
2858 : : }
2859 : :
2860 : : /*
2861 : : * Accumulate instrumentation data from 'hashtable' into an
2862 : : * initially-zeroed HashInstrumentation struct.
2863 : : *
2864 : : * This is used to merge information across successive hash table instances
2865 : : * within a single plan node. We take the maximum values of each interesting
2866 : : * number. The largest nbuckets and largest nbatch values might have occurred
2867 : : * in different instances, so there's some risk of confusion from reporting
2868 : : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2869 : : * issue if we don't report the largest values. Similarly, we want to report
2870 : : * the largest spacePeak regardless of whether it happened in the same
2871 : : * instance as the largest nbuckets or nbatch. All the instances should have
2872 : : * the same nbuckets_original and nbatch_original; but there's little value
2873 : : * in depending on that here, so handle them the same way.
2874 : : */
2875 : : void
2025 tgl@sss.pgh.pa.us 2876 : 168 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2877 : : HashJoinTable hashtable)
2878 : : {
2879 : 168 : instrument->nbuckets = Max(instrument->nbuckets,
2880 : : hashtable->nbuckets);
2881 : 168 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2882 : : hashtable->nbuckets_original);
2883 : 168 : instrument->nbatch = Max(instrument->nbatch,
2884 : : hashtable->nbatch);
2885 : 168 : instrument->nbatch_original = Max(instrument->nbatch_original,
2886 : : hashtable->nbatch_original);
2887 : 168 : instrument->space_peak = Max(instrument->space_peak,
2888 : : hashtable->spacePeak);
2883 andres@anarazel.de 2889 : 168 : }
2890 : :
2891 : : /*
2892 : : * Allocate 'size' bytes from the currently active HashMemoryChunk
2893 : : */
2894 : : static void *
4065 heikki.linnakangas@i 2895 : 4715186 : dense_alloc(HashJoinTable hashtable, Size size)
2896 : : {
2897 : : HashMemoryChunk newChunk;
2898 : : char *ptr;
2899 : :
2900 : : /* just in case the size is not already aligned properly */
2901 : 4715186 : size = MAXALIGN(size);
2902 : :
2903 : : /*
2904 : : * If tuple size is larger than threshold, allocate a separate chunk.
2905 : : */
2906 [ - + ]: 4715186 : if (size > HASH_CHUNK_THRESHOLD)
2907 : : {
2908 : : /* allocate new chunk and put it at the beginning of the list */
4065 heikki.linnakangas@i 2909 :UBC 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2910 : : HASH_CHUNK_HEADER_SIZE + size);
2911 : 0 : newChunk->maxlen = size;
2855 tgl@sss.pgh.pa.us 2912 : 0 : newChunk->used = size;
2913 : 0 : newChunk->ntuples = 1;
2914 : :
2915 : : /*
2916 : : * Add this chunk to the list after the first existing chunk, so that
2917 : : * we don't lose the remaining space in the "current" chunk.
2918 : : */
4065 heikki.linnakangas@i 2919 [ # # ]: 0 : if (hashtable->chunks != NULL)
2920 : : {
2921 : 0 : newChunk->next = hashtable->chunks->next;
2868 andres@anarazel.de 2922 : 0 : hashtable->chunks->next.unshared = newChunk;
2923 : : }
2924 : : else
2925 : : {
2926 : 0 : newChunk->next.unshared = hashtable->chunks;
4065 heikki.linnakangas@i 2927 : 0 : hashtable->chunks = newChunk;
2928 : : }
2929 : :
2855 tgl@sss.pgh.pa.us 2930 : 0 : return HASH_CHUNK_DATA(newChunk);
2931 : : }
2932 : :
2933 : : /*
2934 : : * See if we have enough space for it in the current chunk (if any). If
2935 : : * not, allocate a fresh chunk.
2936 : : */
4065 heikki.linnakangas@i 2937 [ + + ]:CBC 4715186 : if ((hashtable->chunks == NULL) ||
2938 [ + + ]: 4703363 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2939 : : {
2940 : : /* allocate new chunk and put it at the beginning of the list */
2941 : 17860 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2942 : : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2943 : :
2944 : 17860 : newChunk->maxlen = HASH_CHUNK_SIZE;
2945 : 17860 : newChunk->used = size;
2946 : 17860 : newChunk->ntuples = 1;
2947 : :
2868 andres@anarazel.de 2948 : 17860 : newChunk->next.unshared = hashtable->chunks;
4065 heikki.linnakangas@i 2949 : 17860 : hashtable->chunks = newChunk;
2950 : :
2855 tgl@sss.pgh.pa.us 2951 : 17860 : return HASH_CHUNK_DATA(newChunk);
2952 : : }
2953 : :
2954 : : /* There is enough space in the current chunk, let's add the tuple */
2955 : 4697326 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
4065 heikki.linnakangas@i 2956 : 4697326 : hashtable->chunks->used += size;
2957 : 4697326 : hashtable->chunks->ntuples += 1;
2958 : :
2959 : : /* return pointer to the start of the tuple memory */
2960 : 4697326 : return ptr;
2961 : : }
2962 : :
2963 : : /*
2964 : : * Allocate space for a tuple in shared dense storage. This is equivalent to
2965 : : * dense_alloc but for Parallel Hash using shared memory.
2966 : : *
2967 : : * While loading a tuple into shared memory, we might run out of memory and
2968 : : * decide to repartition, or determine that the load factor is too high and
2969 : : * decide to expand the bucket array, or discover that another participant has
2970 : : * commanded us to help do that. Return NULL if number of buckets or batches
2971 : : * has changed, indicating that the caller must retry (considering the
2972 : : * possibility that the tuple no longer belongs in the same batch).
2973 : : */
2974 : : static HashJoinTuple
2868 andres@anarazel.de 2975 : 1198679 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2976 : : dsa_pointer *shared)
2977 : : {
2978 : 1198679 : ParallelHashJoinState *pstate = hashtable->parallel_state;
2979 : : dsa_pointer chunk_shared;
2980 : : HashMemoryChunk chunk;
2981 : : Size chunk_size;
2982 : : HashJoinTuple result;
2983 : 1198679 : int curbatch = hashtable->curbatch;
2984 : :
2985 : 1198679 : size = MAXALIGN(size);
2986 : :
2987 : : /*
2988 : : * Fast path: if there is enough space in this backend's current chunk,
2989 : : * then we can allocate without any locking.
2990 : : */
2991 : 1198679 : chunk = hashtable->current_chunk;
2992 [ + + + - ]: 1198679 : if (chunk != NULL &&
2854 tgl@sss.pgh.pa.us 2993 : 1198204 : size <= HASH_CHUNK_THRESHOLD &&
2868 andres@anarazel.de 2994 [ + + ]: 1198204 : chunk->maxlen - chunk->used >= size)
2995 : : {
2996 : :
2997 : 1196790 : chunk_shared = hashtable->current_chunk_shared;
2998 [ - + ]: 1196790 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2999 : 1196790 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2855 tgl@sss.pgh.pa.us 3000 : 1196790 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2868 andres@anarazel.de 3001 : 1196790 : chunk->used += size;
3002 : :
3003 [ - + ]: 1196790 : Assert(chunk->used <= chunk->maxlen);
3004 [ - + ]: 1196790 : Assert(result == dsa_get_address(hashtable->area, *shared));
3005 : :
3006 : 1196790 : return result;
3007 : : }
3008 : :
3009 : : /* Slow path: try to allocate a new chunk. */
3010 : 1889 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3011 : :
3012 : : /*
3013 : : * Check if we need to help increase the number of buckets or batches.
3014 : : */
3015 [ + + ]: 1889 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3016 [ + + ]: 1864 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3017 : : {
3018 : 82 : ParallelHashGrowth growth = pstate->growth;
3019 : :
3020 : 82 : hashtable->current_chunk = NULL;
3021 : 82 : LWLockRelease(&pstate->lock);
3022 : :
3023 : : /* Another participant has commanded us to help grow. */
3024 [ + + ]: 82 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3025 : 25 : ExecParallelHashIncreaseNumBatches(hashtable);
3026 [ + - ]: 57 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3027 : 57 : ExecParallelHashIncreaseNumBuckets(hashtable);
3028 : :
3029 : : /* The caller must retry. */
3030 : 82 : return NULL;
3031 : : }
3032 : :
3033 : : /* Oversized tuples get their own chunk. */
3034 [ + + ]: 1807 : if (size > HASH_CHUNK_THRESHOLD)
3035 : 24 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3036 : : else
3037 : 1783 : chunk_size = HASH_CHUNK_SIZE;
3038 : :
3039 : : /* Check if it's time to grow batches or buckets. */
3040 [ + + ]: 1807 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3041 : : {
3042 [ - + ]: 957 : Assert(curbatch == 0);
949 tmunro@postgresql.or 3043 [ - + ]: 957 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3044 : :
3045 : : /*
3046 : : * Check if our space limit would be exceeded. To avoid choking on
3047 : : * very large tuples or very low hash_mem setting, we'll always allow
3048 : : * each backend to allocate at least one chunk.
3049 : : */
2868 andres@anarazel.de 3050 [ + + ]: 957 : if (hashtable->batches[0].at_least_one_chunk &&
3051 : 708 : hashtable->batches[0].shared->size +
3052 [ + + ]: 708 : chunk_size > pstate->space_allowed)
3053 : : {
3054 : 22 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3055 : 22 : hashtable->batches[0].shared->space_exhausted = true;
3056 : 22 : LWLockRelease(&pstate->lock);
3057 : :
3058 : 22 : return NULL;
3059 : : }
3060 : :
3061 : : /* Check if our load factor limit would be exceeded. */
3062 [ + + ]: 935 : if (hashtable->nbatch == 1)
3063 : : {
3064 : 793 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3065 : 793 : hashtable->batches[0].ntuples = 0;
3066 : : /* Guard against integer overflow and alloc size overflow */
3067 : 793 : if (hashtable->batches[0].shared->ntuples + 1 >
3068 [ + + ]: 793 : hashtable->nbuckets * NTUP_PER_BUCKET &&
2696 tmunro@postgresql.or 3069 [ + - ]: 54 : hashtable->nbuckets < (INT_MAX / 2) &&
3070 [ + - ]: 54 : hashtable->nbuckets * 2 <=
3071 : : MaxAllocSize / sizeof(dsa_pointer_atomic))
3072 : : {
2868 andres@anarazel.de 3073 : 54 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3074 : 54 : LWLockRelease(&pstate->lock);
3075 : :
3076 : 54 : return NULL;
3077 : : }
3078 : : }
3079 : : }
3080 : :
3081 : : /* We are cleared to allocate a new chunk. */
3082 : 1731 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3083 : 1731 : hashtable->batches[curbatch].shared->size += chunk_size;
3084 : 1731 : hashtable->batches[curbatch].at_least_one_chunk = true;
3085 : :
3086 : : /* Set up the chunk. */
3087 : 1731 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3088 : 1731 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3089 : 1731 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3090 : 1731 : chunk->used = size;
3091 : :
3092 : : /*
3093 : : * Push it onto the list of chunks, so that it can be found if we need to
3094 : : * increase the number of buckets or batches (batch 0 only) and later for
3095 : : * freeing the memory (all batches).
3096 : : */
3097 : 1731 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3098 : 1731 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3099 : :
3100 [ + + ]: 1731 : if (size <= HASH_CHUNK_THRESHOLD)
3101 : : {
3102 : : /*
3103 : : * Make this the current chunk so that we can use the fast path to
3104 : : * fill the rest of it up in future calls.
3105 : : */
3106 : 1713 : hashtable->current_chunk = chunk;
3107 : 1713 : hashtable->current_chunk_shared = chunk_shared;
3108 : : }
3109 : 1731 : LWLockRelease(&pstate->lock);
3110 : :
2855 tgl@sss.pgh.pa.us 3111 [ - + ]: 1731 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3112 : 1731 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3113 : :
2868 andres@anarazel.de 3114 : 1731 : return result;
3115 : : }
3116 : :
3117 : : /*
3118 : : * One backend needs to set up the shared batch state including tuplestores.
3119 : : * Other backends will ensure they have correctly configured accessors by
3120 : : * called ExecParallelHashEnsureBatchAccessors().
3121 : : */
3122 : : static void
3123 : 114 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3124 : : {
3125 : 114 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3126 : : ParallelHashJoinBatch *batches;
3127 : : MemoryContext oldcxt;
3128 : : int i;
3129 : :
3130 [ - + ]: 114 : Assert(hashtable->batches == NULL);
3131 : :
3132 : : /* Allocate space. */
3133 : 114 : pstate->batches =
3134 : 114 : dsa_allocate0(hashtable->area,
3135 : : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3136 : 114 : pstate->nbatch = nbatch;
3137 : 114 : batches = dsa_get_address(hashtable->area, pstate->batches);
3138 : :
3139 : : /*
3140 : : * Use hash join spill memory context to allocate accessors, including
3141 : : * buffers for the temporary files.
3142 : : */
892 tomas.vondra@postgre 3143 : 114 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3144 : :
3145 : : /* Allocate this backend's accessor array. */
2868 andres@anarazel.de 3146 : 114 : hashtable->nbatch = nbatch;
1141 peter@eisentraut.org 3147 : 114 : hashtable->batches =
3148 : 114 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3149 : :
3150 : : /* Set up the shared state, tuplestores and backend-local accessors. */
2868 andres@anarazel.de 3151 [ + + ]: 486 : for (i = 0; i < hashtable->nbatch; ++i)
3152 : : {
3153 : 372 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3154 : 372 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3155 : : char name[MAXPGPATH];
3156 : :
3157 : : /*
3158 : : * All members of shared were zero-initialized. We just need to set
3159 : : * up the Barrier.
3160 : : */
3161 : 372 : BarrierInit(&shared->batch_barrier, 0);
3162 [ + + ]: 372 : if (i == 0)
3163 : : {
3164 : : /* Batch 0 doesn't need to be loaded. */
3165 : 114 : BarrierAttach(&shared->batch_barrier);
949 tmunro@postgresql.or 3166 [ + + ]: 456 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
2868 andres@anarazel.de 3167 : 342 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3168 : 114 : BarrierDetach(&shared->batch_barrier);
3169 : : }
3170 : :
3171 : : /* Initialize accessor state. All members were zero-initialized. */
3172 : 372 : accessor->shared = shared;
3173 : :
3174 : : /* Initialize the shared tuplestores. */
3175 : 372 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3176 : 372 : accessor->inner_tuples =
3177 : 372 : sts_initialize(ParallelHashJoinBatchInner(shared),
3178 : : pstate->nparticipants,
3179 : : ParallelWorkerNumber + 1,
3180 : : sizeof(uint32),
3181 : : SHARED_TUPLESTORE_SINGLE_PASS,
3182 : : &pstate->fileset,
3183 : : name);
3184 : 372 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3185 : 372 : accessor->outer_tuples =
3186 : 372 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3187 : : pstate->nparticipants),
3188 : : pstate->nparticipants,
3189 : : ParallelWorkerNumber + 1,
3190 : : sizeof(uint32),
3191 : : SHARED_TUPLESTORE_SINGLE_PASS,
3192 : : &pstate->fileset,
3193 : : name);
3194 : : }
3195 : :
3196 : 114 : MemoryContextSwitchTo(oldcxt);
3197 : 114 : }
3198 : :
3199 : : /*
3200 : : * Free the current set of ParallelHashJoinBatchAccessor objects.
3201 : : */
3202 : : static void
3203 : 38 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3204 : : {
3205 : : int i;
3206 : :
3207 [ + + ]: 130 : for (i = 0; i < hashtable->nbatch; ++i)
3208 : : {
3209 : : /* Make sure no files are left open. */
3210 : 92 : sts_end_write(hashtable->batches[i].inner_tuples);
3211 : 92 : sts_end_write(hashtable->batches[i].outer_tuples);
3212 : 92 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3213 : 92 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3214 : : }
3215 : 38 : pfree(hashtable->batches);
3216 : 38 : hashtable->batches = NULL;
3217 : 38 : }
3218 : :
3219 : : /*
3220 : : * Make sure this backend has up-to-date accessors for the current set of
3221 : : * batches.
3222 : : */
3223 : : static void
3224 : 507 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3225 : : {
3226 : 507 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3227 : : ParallelHashJoinBatch *batches;
3228 : : MemoryContext oldcxt;
3229 : : int i;
3230 : :
3231 [ + + ]: 507 : if (hashtable->batches != NULL)
3232 : : {
3233 [ + + ]: 378 : if (hashtable->nbatch == pstate->nbatch)
3234 : 376 : return;
3235 : 2 : ExecParallelHashCloseBatchAccessors(hashtable);
3236 : : }
3237 : :
3238 : : /*
3239 : : * We should never see a state where the batch-tracking array is freed,
3240 : : * because we should have given up sooner if we join when the build
3241 : : * barrier has reached the PHJ_BUILD_FREE phase.
3242 : : */
951 tmunro@postgresql.or 3243 [ - + ]: 131 : Assert(DsaPointerIsValid(pstate->batches));
3244 : :
3245 : : /*
3246 : : * Use hash join spill memory context to allocate accessors, including
3247 : : * buffers for the temporary files.
3248 : : */
892 tomas.vondra@postgre 3249 : 131 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3250 : :
3251 : : /* Allocate this backend's accessor array. */
2868 andres@anarazel.de 3252 : 131 : hashtable->nbatch = pstate->nbatch;
1141 peter@eisentraut.org 3253 : 131 : hashtable->batches =
3254 : 131 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3255 : :
3256 : : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3257 : : batches = (ParallelHashJoinBatch *)
2868 andres@anarazel.de 3258 : 131 : dsa_get_address(hashtable->area, pstate->batches);
3259 : :
3260 : : /* Set up the accessor array and attach to the tuplestores. */
3261 [ + + ]: 625 : for (i = 0; i < hashtable->nbatch; ++i)
3262 : : {
3263 : 494 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3264 : 494 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3265 : :
3266 : 494 : accessor->shared = shared;
3267 : 494 : accessor->preallocated = 0;
3268 : 494 : accessor->done = false;
941 tmunro@postgresql.or 3269 : 494 : accessor->outer_eof = false;
2868 andres@anarazel.de 3270 : 494 : accessor->inner_tuples =
3271 : 494 : sts_attach(ParallelHashJoinBatchInner(shared),
3272 : : ParallelWorkerNumber + 1,
3273 : : &pstate->fileset);
3274 : 494 : accessor->outer_tuples =
3275 : 494 : sts_attach(ParallelHashJoinBatchOuter(shared,
3276 : : pstate->nparticipants),
3277 : : ParallelWorkerNumber + 1,
3278 : : &pstate->fileset);
3279 : : }
3280 : :
3281 : 131 : MemoryContextSwitchTo(oldcxt);
3282 : : }
3283 : :
3284 : : /*
3285 : : * Allocate an empty shared memory hash table for a given batch.
3286 : : */
3287 : : void
3288 : 318 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3289 : : {
3290 : 318 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3291 : : dsa_pointer_atomic *buckets;
3292 : 318 : int nbuckets = hashtable->parallel_state->nbuckets;
3293 : : int i;
3294 : :
3295 : 318 : batch->buckets =
3296 : 318 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3297 : : buckets = (dsa_pointer_atomic *)
3298 : 318 : dsa_get_address(hashtable->area, batch->buckets);
3299 [ + + ]: 1610046 : for (i = 0; i < nbuckets; ++i)
3300 : 1609728 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3301 : 318 : }
3302 : :
3303 : : /*
3304 : : * If we are currently attached to a shared hash join batch, detach. If we
3305 : : * are last to detach, clean up.
3306 : : */
3307 : : void
3308 : 12588 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3309 : : {
3310 [ + + ]: 12588 : if (hashtable->parallel_state != NULL &&
3311 [ + + ]: 608 : hashtable->curbatch >= 0)
3312 : : {
3313 : 401 : int curbatch = hashtable->curbatch;
3314 : 401 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
941 tmunro@postgresql.or 3315 : 401 : bool attached = true;
3316 : :
3317 : : /* Make sure any temporary files are closed. */
2868 andres@anarazel.de 3318 : 401 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3319 : 401 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3320 : :
3321 : : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
941 tmunro@postgresql.or 3322 [ + + - + ]: 401 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3323 : : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3324 : :
3325 : : /*
3326 : : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3327 : : * reached the end of it, it means the plan doesn't want any more
3328 : : * tuples, and it is happy to abandon any tuples buffered in this
3329 : : * process's subplans. For correctness, we can't allow any process to
3330 : : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3331 : : * complete set of match bits. Therefore we skip emitting unmatched
3332 : : * tuples in all backends (if this is a full/right join), as if those
3333 : : * tuples were all due to be emitted by this process and it has
3334 : : * abandoned them too.
3335 : : */
3336 [ + + ]: 401 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3337 [ - + ]: 366 : !hashtable->batches[curbatch].outer_eof)
3338 : : {
3339 : : /*
3340 : : * This flag may be written to by multiple backends during
3341 : : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3342 : : * phase so requires no extra locking.
3343 : : */
941 tmunro@postgresql.or 3344 :UBC 0 : batch->skip_unmatched = true;
3345 : : }
3346 : :
3347 : : /*
3348 : : * Even if we aren't doing a full/right outer join, we'll step through
3349 : : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3350 : : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3351 : : */
941 tmunro@postgresql.or 3352 [ + + ]:CBC 401 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3353 : 366 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3354 [ + + + + ]: 401 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3355 : : {
3356 : : /*
3357 : : * We are not longer attached to the batch barrier, but we're the
3358 : : * process that was chosen to free resources and it's safe to
3359 : : * assert the current phase. The ParallelHashJoinBatch can't go
3360 : : * away underneath us while we are attached to the build barrier,
3361 : : * making this access safe.
3362 : : */
949 3363 [ - + ]: 318 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3364 : :
3365 : : /* Free shared chunks and buckets. */
2868 andres@anarazel.de 3366 [ + + ]: 1892 : while (DsaPointerIsValid(batch->chunks))
3367 : : {
3368 : : HashMemoryChunk chunk =
892 tgl@sss.pgh.pa.us 3369 : 1574 : dsa_get_address(hashtable->area, batch->chunks);
2868 andres@anarazel.de 3370 : 1574 : dsa_pointer next = chunk->next.shared;
3371 : :
3372 : 1574 : dsa_free(hashtable->area, batch->chunks);
3373 : 1574 : batch->chunks = next;
3374 : : }
3375 [ + - ]: 318 : if (DsaPointerIsValid(batch->buckets))
3376 : : {
3377 : 318 : dsa_free(hashtable->area, batch->buckets);
3378 : 318 : batch->buckets = InvalidDsaPointer;
3379 : : }
3380 : : }
3381 : :
3382 : : /*
3383 : : * Track the largest batch we've been attached to. Though each
3384 : : * backend might see a different subset of batches, explain.c will
3385 : : * scan the results from all backends to find the largest value.
3386 : : */
2856 3387 : 401 : hashtable->spacePeak =
3388 : 401 : Max(hashtable->spacePeak,
3389 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3390 : :
3391 : : /* Remember that we are not attached to a batch. */
2868 3392 : 401 : hashtable->curbatch = -1;
3393 : : }
3394 : 12588 : }
3395 : :
3396 : : /*
3397 : : * Detach from all shared resources. If we are last to detach, clean up.
3398 : : */
3399 : : void
3400 : 12187 : ExecHashTableDetach(HashJoinTable hashtable)
3401 : : {
951 tmunro@postgresql.or 3402 : 12187 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3403 : :
3404 : : /*
3405 : : * If we're involved in a parallel query, we must either have gotten all
3406 : : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3407 : : */
3408 [ + + - + ]: 12187 : Assert(!pstate ||
3409 : : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3410 : :
949 3411 [ + + + - ]: 12187 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3412 : : {
3413 : : int i;
3414 : :
3415 : : /* Make sure any temporary files are closed. */
2868 andres@anarazel.de 3416 [ + - ]: 207 : if (hashtable->batches)
3417 : : {
3418 [ + + ]: 981 : for (i = 0; i < hashtable->nbatch; ++i)
3419 : : {
3420 : 774 : sts_end_write(hashtable->batches[i].inner_tuples);
3421 : 774 : sts_end_write(hashtable->batches[i].outer_tuples);
3422 : 774 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3423 : 774 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3424 : : }
3425 : : }
3426 : :
3427 : : /* If we're last to detach, clean up shared memory. */
951 tmunro@postgresql.or 3428 [ + + ]: 207 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3429 : : {
3430 : : /*
3431 : : * Late joining processes will see this state and give up
3432 : : * immediately.
3433 : : */
949 3434 [ - + ]: 87 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3435 : :
2868 andres@anarazel.de 3436 [ + - ]: 87 : if (DsaPointerIsValid(pstate->batches))
3437 : : {
3438 : 87 : dsa_free(hashtable->area, pstate->batches);
3439 : 87 : pstate->batches = InvalidDsaPointer;
3440 : : }
3441 : : }
3442 : : }
951 tmunro@postgresql.or 3443 : 12187 : hashtable->parallel_state = NULL;
2868 andres@anarazel.de 3444 : 12187 : }
3445 : :
3446 : : /*
3447 : : * Get the first tuple in a given bucket identified by number.
3448 : : */
3449 : : static inline HashJoinTuple
3450 : 1390215 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3451 : : {
3452 : : HashJoinTuple tuple;
3453 : : dsa_pointer p;
3454 : :
3455 [ - + ]: 1390215 : Assert(hashtable->parallel_state);
3456 : 1390215 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3457 : 1390215 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3458 : :
3459 : 1390215 : return tuple;
3460 : : }
3461 : :
3462 : : /*
3463 : : * Get the next tuple in the same bucket as 'tuple'.
3464 : : */
3465 : : static inline HashJoinTuple
3466 : 1894113 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3467 : : {
3468 : : HashJoinTuple next;
3469 : :
3470 [ - + ]: 1894113 : Assert(hashtable->parallel_state);
3471 : 1894113 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3472 : :
3473 : 1894113 : return next;
3474 : : }
3475 : :
3476 : : /*
3477 : : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3478 : : */
3479 : : static inline void
3480 : 1481914 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3481 : : HashJoinTuple tuple,
3482 : : dsa_pointer tuple_shared)
3483 : : {
3484 : : for (;;)
3485 : : {
3486 : 1482817 : tuple->next.shared = dsa_pointer_atomic_read(head);
3487 [ + + ]: 1482817 : if (dsa_pointer_atomic_compare_exchange(head,
3488 : 1482817 : &tuple->next.shared,
3489 : : tuple_shared))
3490 : 1481914 : break;
3491 : : }
3492 : 1481914 : }
3493 : :
3494 : : /*
3495 : : * Prepare to work on a given batch.
3496 : : */
3497 : : void
3498 : 962 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3499 : : {
3500 [ - + ]: 962 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3501 : :
3502 : 962 : hashtable->curbatch = batchno;
3503 : 962 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3504 : 962 : dsa_get_address(hashtable->area,
3505 : 962 : hashtable->batches[batchno].shared->buckets);
3506 : 962 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
47 michael@paquier.xyz 3507 :GNC 962 : hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
2868 andres@anarazel.de 3508 :CBC 962 : hashtable->current_chunk = NULL;
3509 : 962 : hashtable->current_chunk_shared = InvalidDsaPointer;
3510 : 962 : hashtable->batches[batchno].at_least_one_chunk = false;
3511 : 962 : }
3512 : :
3513 : : /*
3514 : : * Take the next available chunk from the queue of chunks being worked on in
3515 : : * parallel. Return NULL if there are none left. Otherwise return a pointer
3516 : : * to the chunk, and set *shared to the DSA pointer to the chunk.
3517 : : */
3518 : : static HashMemoryChunk
3519 : 599 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3520 : : {
3521 : 599 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3522 : : HashMemoryChunk chunk;
3523 : :
3524 : 599 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3525 [ + + ]: 599 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3526 : : {
3527 : 505 : *shared = pstate->chunk_work_queue;
3528 : : chunk = (HashMemoryChunk)
3529 : 505 : dsa_get_address(hashtable->area, *shared);
3530 : 505 : pstate->chunk_work_queue = chunk->next.shared;
3531 : : }
3532 : : else
3533 : 94 : chunk = NULL;
3534 : 599 : LWLockRelease(&pstate->lock);
3535 : :
3536 : 599 : return chunk;
3537 : : }
3538 : :
3539 : : /*
3540 : : * Increase the space preallocated in this backend for a given inner batch by
3541 : : * at least a given amount. This allows us to track whether a given batch
3542 : : * would fit in memory when loaded back in. Also increase the number of
3543 : : * batches or buckets if required.
3544 : : *
3545 : : * This maintains a running estimation of how much space will be taken when we
3546 : : * load the batch back into memory by simulating the way chunks will be handed
3547 : : * out to workers. It's not perfectly accurate because the tuples will be
3548 : : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3549 : : * it should be pretty close. It tends to overestimate by a fraction of a
3550 : : * chunk per worker since all workers gang up to preallocate during hashing,
3551 : : * but workers tend to reload batches alone if there are enough to go around,
3552 : : * leaving fewer partially filled chunks. This effect is bounded by
3553 : : * nparticipants.
3554 : : *
3555 : : * Return false if the number of batches or buckets has changed, and the
3556 : : * caller should reconsider which batch a given tuple now belongs in and call
3557 : : * again.
3558 : : */
3559 : : static bool
3560 : 784 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3561 : : {
3562 : 784 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3563 : 784 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3564 : 784 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3565 : :
3566 [ - + ]: 784 : Assert(batchno > 0);
3567 [ - + ]: 784 : Assert(batchno < hashtable->nbatch);
2854 tgl@sss.pgh.pa.us 3568 [ - + ]: 784 : Assert(size == MAXALIGN(size));
3569 : :
2868 andres@anarazel.de 3570 : 784 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3571 : :
3572 : : /* Has another participant commanded us to help grow? */
3573 [ + + ]: 784 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3574 [ - + ]: 773 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3575 : : {
3576 : 11 : ParallelHashGrowth growth = pstate->growth;
3577 : :
3578 : 11 : LWLockRelease(&pstate->lock);
3579 [ + - ]: 11 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3580 : 11 : ExecParallelHashIncreaseNumBatches(hashtable);
2868 andres@anarazel.de 3581 [ # # ]:UBC 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3582 : 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3583 : :
2868 andres@anarazel.de 3584 :CBC 11 : return false;
3585 : : }
3586 : :
3587 [ + + ]: 773 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3588 [ + + ]: 657 : batch->at_least_one_chunk &&
2854 tgl@sss.pgh.pa.us 3589 : 217 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3590 [ + + ]: 217 : > pstate->space_allowed))
3591 : : {
3592 : : /*
3593 : : * We have determined that this batch would exceed the space budget if
3594 : : * loaded into memory. Command all participants to help repartition.
3595 : : */
2868 andres@anarazel.de 3596 : 5 : batch->shared->space_exhausted = true;
3597 : 5 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3598 : 5 : LWLockRelease(&pstate->lock);
3599 : :
3600 : 5 : return false;
3601 : : }
3602 : :
3603 : 768 : batch->at_least_one_chunk = true;
3604 : 768 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3605 : 768 : batch->preallocated = want;
3606 : 768 : LWLockRelease(&pstate->lock);
3607 : :
3608 : 768 : return true;
3609 : : }
3610 : :
3611 : : /*
3612 : : * Calculate the limit on how much memory can be used by Hash and similar
3613 : : * plan types. This is work_mem times hash_mem_multiplier, and is
3614 : : * expressed in bytes.
3615 : : *
3616 : : * Exported for use by the planner, as well as other hash-like executor
3617 : : * nodes. This is a rather random place for this, but there is no better
3618 : : * place.
3619 : : */
3620 : : size_t
1555 tgl@sss.pgh.pa.us 3621 : 873907 : get_hash_memory_limit(void)
3622 : : {
3623 : : double mem_limit;
3624 : :
3625 : : /* Do initial calculation in double arithmetic */
3626 : 873907 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3627 : :
3628 : : /* Clamp in case it doesn't fit in size_t */
3629 [ + - ]: 873907 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3630 : :
3631 : 873907 : return (size_t) mem_limit;
3632 : : }
|