Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeHash.c
4 : : * Routines to hash relations for hashjoin
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeHash.c
12 : : *
13 : : * See note on parallelism in nodeHashjoin.c.
14 : : *
15 : : *-------------------------------------------------------------------------
16 : : */
17 : : /*
18 : : * INTERFACE ROUTINES
19 : : * MultiExecHash - generate an in-memory hash table of the relation
20 : : * ExecInitHash - initialize node and subnodes
21 : : * ExecEndHash - shutdown node and subnodes
22 : : */
23 : :
24 : : #include "postgres.h"
25 : :
26 : : #include <math.h>
27 : : #include <limits.h>
28 : :
29 : : #include "access/htup_details.h"
30 : : #include "access/parallel.h"
31 : : #include "catalog/pg_statistic.h"
32 : : #include "commands/tablespace.h"
33 : : #include "executor/executor.h"
34 : : #include "executor/hashjoin.h"
35 : : #include "executor/instrument.h"
36 : : #include "executor/nodeHash.h"
37 : : #include "executor/nodeHashjoin.h"
38 : : #include "miscadmin.h"
39 : : #include "port/pg_bitutils.h"
40 : : #include "utils/lsyscache.h"
41 : : #include "utils/memutils.h"
42 : : #include "utils/syscache.h"
43 : : #include "utils/tuplestore.h"
44 : : #include "utils/wait_event.h"
45 : :
46 : : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
47 : : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
48 : : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
49 : : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
50 : : static void ExecHashBuildSkewHash(HashState *hashstate,
51 : : HashJoinTable hashtable, Hash *node,
52 : : int mcvsToUse);
53 : : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
54 : : TupleTableSlot *slot,
55 : : uint32 hashvalue,
56 : : int bucketNumber);
57 : : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
58 : :
59 : : static void *dense_alloc(HashJoinTable hashtable, Size size);
60 : : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
61 : : size_t size,
62 : : dsa_pointer *shared);
63 : : static void MultiExecPrivateHash(HashState *node);
64 : : static void MultiExecParallelHash(HashState *node);
65 : : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
66 : : int bucketno);
67 : : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
68 : : HashJoinTuple tuple);
69 : : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
70 : : HashJoinTuple tuple,
71 : : dsa_pointer tuple_shared);
72 : : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
73 : : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
74 : : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
75 : : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
76 : : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
77 : : dsa_pointer *shared);
78 : : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
79 : : int batchno,
80 : : size_t size);
81 : : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
82 : : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
83 : :
84 : :
85 : : /* ----------------------------------------------------------------
86 : : * ExecHash
87 : : *
88 : : * stub for pro forma compliance
89 : : * ----------------------------------------------------------------
90 : : */
91 : : static TupleTableSlot *
3214 andres@anarazel.de 92 :UBC 0 : ExecHash(PlanState *pstate)
93 : : {
7527 tgl@sss.pgh.pa.us 94 [ # # ]: 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
95 : : return NULL;
96 : : }
97 : :
98 : : /* ----------------------------------------------------------------
99 : : * MultiExecHash
100 : : *
101 : : * build hash table for hashjoin, doing partitioning if more
102 : : * than one batch is required.
103 : : * ----------------------------------------------------------------
104 : : */
105 : : Node *
7689 tgl@sss.pgh.pa.us 106 :CBC 18887 : MultiExecHash(HashState *node)
107 : : {
108 : : /* must provide our own instrumentation support */
3058 andres@anarazel.de 109 [ + + ]: 18887 : if (node->ps.instrument)
110 : 229 : InstrStartNode(node->ps.instrument);
111 : :
112 [ + + ]: 18887 : if (node->parallel_state != NULL)
113 : 276 : MultiExecParallelHash(node);
114 : : else
115 : 18611 : MultiExecPrivateHash(node);
116 : :
117 : : /* must provide our own instrumentation support */
118 [ + + ]: 18885 : if (node->ps.instrument)
47 tgl@sss.pgh.pa.us 119 :GNC 229 : InstrStopNode(node->ps.instrument, node->hashtable->reportTuples);
120 : :
121 : : /*
122 : : * We do not return the hash table directly because it's not a subtype of
123 : : * Node, and so would violate the MultiExecProcNode API. Instead, our
124 : : * parent Hashjoin node is expected to know how to fish it out of our node
125 : : * state. Ugly but not really worth cleaning up, since Hashjoin knows
126 : : * quite a bit more about Hash besides that.
127 : : */
3058 andres@anarazel.de 128 :CBC 18885 : return NULL;
129 : : }
130 : :
131 : : /* ----------------------------------------------------------------
132 : : * MultiExecPrivateHash
133 : : *
134 : : * parallel-oblivious version, building a backend-private
135 : : * hash table and (if necessary) batch files.
136 : : * ----------------------------------------------------------------
137 : : */
138 : : static void
139 : 18611 : MultiExecPrivateHash(HashState *node)
140 : : {
141 : : PlanState *outerNode;
142 : : HashJoinTable hashtable;
143 : : TupleTableSlot *slot;
144 : : ExprContext *econtext;
47 tgl@sss.pgh.pa.us 145 :GNC 18611 : double nullTuples = 0;
146 : :
147 : : /*
148 : : * get state info from node
149 : : */
8552 tgl@sss.pgh.pa.us 150 :CBC 18611 : outerNode = outerPlanState(node);
151 : 18611 : hashtable = node->hashtable;
152 : :
153 : : /*
154 : : * set expression context
155 : : */
156 : 18611 : econtext = node->ps.ps_ExprContext;
157 : :
158 : : /*
159 : : * Get all tuples from the node below the Hash node and insert the
160 : : * potentially-matchable ones into the hash table (or temp files). Tuples
161 : : * that can't possibly match because they have null join keys are dumped
162 : : * into a separate tuplestore, or just summarily discarded if we don't
163 : : * need to emit them with null-extension.
164 : : */
165 : : for (;;)
10467 bruce@momjian.us 166 : 6561217 : {
167 : : bool isnull;
168 : : Datum hashdatum;
169 : :
7527 tgl@sss.pgh.pa.us 170 : 6579828 : slot = ExecProcNode(outerNode);
171 [ + + + + ]: 6579826 : if (TupIsNull(slot))
172 : : break;
173 : : /* We have to compute the hash value */
2468 andres@anarazel.de 174 : 6561217 : econtext->ecxt_outertuple = slot;
175 : :
623 drowley@postgresql.o 176 : 6561217 : ResetExprContext(econtext);
177 : :
178 : 6561217 : hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
179 : : &isnull);
180 : :
181 [ + + ]: 6561217 : if (!isnull)
182 : : {
183 : : /* normal case with a non-null join key */
184 : 6561043 : uint32 hashvalue = DatumGetUInt32(hashdatum);
185 : : int bucketNumber;
186 : :
6254 tgl@sss.pgh.pa.us 187 : 6561043 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
188 [ + + ]: 6561043 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
189 : : {
190 : : /* It's a skew tuple, so put it into that hash table */
191 : 392 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
192 : : bucketNumber);
193 : : }
194 : : else
195 : : {
196 : : /* Not subject to skew optimization, so insert normally */
197 : 6560651 : ExecHashTableInsert(hashtable, slot, hashvalue);
198 : : }
7037 199 : 6561043 : hashtable->totalTuples += 1;
200 : : }
47 tgl@sss.pgh.pa.us 201 [ + + ]:GNC 174 : else if (node->keep_null_tuples)
202 : : {
203 : : /* null join key, but we must save tuple to be emitted later */
204 [ + - ]: 57 : if (node->null_tuple_store == NULL)
205 : 57 : node->null_tuple_store = ExecHashBuildNullTupleStore(hashtable);
206 : 57 : tuplestore_puttupleslot(node->null_tuple_store, slot);
207 : 57 : nullTuples += 1;
208 : : }
209 : : /* else we can discard the tuple immediately */
210 : : }
211 : :
212 : : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
4222 kgrittn@postgresql.o 213 [ + + ]:CBC 18609 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
214 : 94 : ExecHashIncreaseNumBuckets(hashtable);
215 : :
216 : : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
217 : 18609 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
218 [ + + ]: 18609 : if (hashtable->spaceUsed > hashtable->spacePeak)
219 : 18577 : hashtable->spacePeak = hashtable->spaceUsed;
220 : :
221 : : /* Report total number of tuples output (but not those discarded) */
47 tgl@sss.pgh.pa.us 222 :GNC 18609 : hashtable->reportTuples = hashtable->totalTuples + nullTuples;
3058 andres@anarazel.de 223 :CBC 18609 : }
224 : :
225 : : /* ----------------------------------------------------------------
226 : : * MultiExecParallelHash
227 : : *
228 : : * parallel-aware version, building a shared hash table and
229 : : * (if necessary) batch files using the combined effort of
230 : : * a set of co-operating backends.
231 : : * ----------------------------------------------------------------
232 : : */
233 : : static void
234 : 276 : MultiExecParallelHash(HashState *node)
235 : : {
236 : : ParallelHashJoinState *pstate;
237 : : PlanState *outerNode;
238 : : HashJoinTable hashtable;
239 : : TupleTableSlot *slot;
240 : : ExprContext *econtext;
241 : : Barrier *build_barrier;
242 : : int i;
243 : :
244 : : /*
245 : : * get state info from node
246 : : */
247 : 276 : outerNode = outerPlanState(node);
248 : 276 : hashtable = node->hashtable;
249 : :
250 : : /*
251 : : * set expression context
252 : : */
253 : 276 : econtext = node->ps.ps_ExprContext;
254 : :
255 : : /*
256 : : * Synchronize the parallel hash table build. At this stage we know that
257 : : * the shared hash table has been or is being set up by
258 : : * ExecHashTableCreate(), but we don't know if our peers have returned
259 : : * from there or are here in MultiExecParallelHash(), and if so how far
260 : : * through they are. To find out, we check the build_barrier phase then
261 : : * and jump to the right step in the build algorithm.
262 : : */
263 : 276 : pstate = hashtable->parallel_state;
264 : 276 : build_barrier = &pstate->build_barrier;
1139 tmunro@postgresql.or 265 [ - + ]: 276 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
3058 andres@anarazel.de 266 [ + + + ]: 276 : switch (BarrierPhase(build_barrier))
267 : : {
1139 tmunro@postgresql.or 268 : 131 : case PHJ_BUILD_ALLOCATE:
269 : :
270 : : /*
271 : : * Either I just allocated the initial hash table in
272 : : * ExecHashTableCreate(), or someone else is doing that. Either
273 : : * way, wait for everyone to arrive here so we can proceed.
274 : : */
2180 tgl@sss.pgh.pa.us 275 : 131 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
276 : : pg_fallthrough;
277 : :
1139 tmunro@postgresql.or 278 : 236 : case PHJ_BUILD_HASH_INNER:
279 : :
280 : : /*
281 : : * It's time to begin hashing, or if we just arrived here then
282 : : * hashing is already underway, so join in that effort. While
283 : : * hashing we have to be prepared to help increase the number of
284 : : * batches or buckets at any time, and if we arrived here when
285 : : * that was already underway we'll have to help complete that work
286 : : * immediately so that it's safe to access batches and buckets
287 : : * below.
288 : : */
3058 andres@anarazel.de 289 [ + + ]: 236 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
290 : : PHJ_GROW_BATCHES_ELECT)
3058 andres@anarazel.de 291 :GBC 2 : ExecParallelHashIncreaseNumBatches(hashtable);
3058 andres@anarazel.de 292 [ - + ]:CBC 236 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
293 : : PHJ_GROW_BUCKETS_ELECT)
3058 andres@anarazel.de 294 :LBC (1) : ExecParallelHashIncreaseNumBuckets(hashtable);
3058 andres@anarazel.de 295 :CBC 236 : ExecParallelHashEnsureBatchAccessors(hashtable);
296 : 236 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
297 : : for (;;)
298 : 1440160 : {
299 : : bool isnull;
300 : : uint32 hashvalue;
301 : :
302 : 1440396 : slot = ExecProcNode(outerNode);
303 [ + + + + ]: 1440396 : if (TupIsNull(slot))
304 : : break;
2468 305 : 1440160 : econtext->ecxt_outertuple = slot;
306 : :
623 drowley@postgresql.o 307 : 1440160 : ResetExprContext(econtext);
308 : :
309 : 1440160 : hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
310 : : econtext,
311 : : &isnull));
312 : :
313 [ + + ]: 1440160 : if (!isnull)
314 : : {
315 : : /* normal case with a non-null join key */
3058 andres@anarazel.de 316 : 1440128 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
47 tgl@sss.pgh.pa.us 317 :GNC 1440128 : hashtable->reportTuples++;
318 : : }
319 [ + + ]: 32 : else if (node->keep_null_tuples)
320 : : {
321 : : /* null join key, but save tuple to be emitted later */
322 [ + - ]: 12 : if (node->null_tuple_store == NULL)
323 : 12 : node->null_tuple_store = ExecHashBuildNullTupleStore(hashtable);
324 : 12 : tuplestore_puttupleslot(node->null_tuple_store, slot);
325 : 12 : hashtable->reportTuples++;
326 : : }
327 : : /* else we can discard the tuple immediately */
328 : : }
329 : :
330 : : /*
331 : : * Make sure that any tuples we wrote to disk are visible to
332 : : * others before anyone tries to load them.
333 : : */
3058 andres@anarazel.de 334 [ + + ]:CBC 1262 : for (i = 0; i < hashtable->nbatch; ++i)
335 : 1026 : sts_end_write(hashtable->batches[i].inner_tuples);
336 : :
337 : : /*
338 : : * Update shared counters. We need an accurate total tuple count
339 : : * to control the empty table optimization.
340 : : */
341 : 236 : ExecParallelHashMergeCounters(hashtable);
342 : :
3050 343 : 236 : BarrierDetach(&pstate->grow_buckets_barrier);
344 : 236 : BarrierDetach(&pstate->grow_batches_barrier);
345 : :
346 : : /*
347 : : * Wait for everyone to finish building and flushing files and
348 : : * counters.
349 : : */
3058 350 [ + + ]: 236 : if (BarrierArriveAndWait(build_barrier,
351 : : WAIT_EVENT_HASH_BUILD_HASH_INNER))
352 : : {
353 : : /*
354 : : * Elect one backend to disable any further growth. Batches
355 : : * are now fixed. While building them we made sure they'd fit
356 : : * in our memory budget when we load them back in later (or we
357 : : * tried to do that and gave up because we detected extreme
358 : : * skew).
359 : : */
360 : 116 : pstate->growth = PHJ_GROWTH_DISABLED;
361 : : }
362 : : }
363 : :
364 : : /*
365 : : * We're not yet attached to a batch. We all agree on the dimensions and
366 : : * number of inner tuples. (In parallel mode, totalTuples isn't used in
367 : : * this module, but we must report it for nodeHashjoin.c's empty-table
368 : : * optimization.)
369 : : */
370 : 276 : hashtable->curbatch = -1;
371 : 276 : hashtable->nbuckets = pstate->nbuckets;
237 michael@paquier.xyz 372 :GNC 276 : hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
3058 andres@anarazel.de 373 :CBC 276 : hashtable->totalTuples = pstate->total_tuples;
374 : :
375 : : /*
376 : : * Unless we're completely done and the batch state has been freed, make
377 : : * sure we have accessors.
378 : : */
1139 tmunro@postgresql.or 379 [ + - ]: 276 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
1141 380 : 276 : ExecParallelHashEnsureBatchAccessors(hashtable);
381 : :
382 : : /*
383 : : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
384 : : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
385 : : * there already).
386 : : */
1139 387 [ + + - + : 276 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
- - ]
388 : : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
389 : : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
10892 scrappy@hub.org 390 : 276 : }
391 : :
392 : : /* ----------------------------------------------------------------
393 : : * ExecInitHash
394 : : *
395 : : * Init routine for Hash node
396 : : * ----------------------------------------------------------------
397 : : */
398 : : HashState *
7371 tgl@sss.pgh.pa.us 399 : 27388 : ExecInitHash(Hash *node, EState *estate, int eflags)
400 : : {
401 : : HashState *hashstate;
402 : :
403 : : /* check for unsupported flags */
404 [ - + ]: 27388 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
405 : :
406 : : /*
407 : : * create state structure
408 : : */
10467 bruce@momjian.us 409 : 27388 : hashstate = makeNode(HashState);
8552 tgl@sss.pgh.pa.us 410 : 27388 : hashstate->ps.plan = (Plan *) node;
411 : 27388 : hashstate->ps.state = estate;
3214 andres@anarazel.de 412 : 27388 : hashstate->ps.ExecProcNode = ExecHash;
413 : : /* delay building hashtable until ExecHashTableCreate() in executor run */
9849 tgl@sss.pgh.pa.us 414 : 27388 : hashstate->hashtable = NULL;
415 : :
416 : : /*
417 : : * Miscellaneous initialization
418 : : *
419 : : * create expression context for node
420 : : */
8552 421 : 27388 : ExecAssignExprContext(estate, &hashstate->ps);
422 : :
423 : : /*
424 : : * initialize child nodes
425 : : */
7371 426 : 27388 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
427 : :
428 : : /*
429 : : * initialize our result slot and type. No need to build projection
430 : : * because this node doesn't do projections.
431 : : */
2728 andres@anarazel.de 432 : 27388 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
8552 tgl@sss.pgh.pa.us 433 : 27388 : hashstate->ps.ps_ProjInfo = NULL;
434 : :
623 drowley@postgresql.o 435 [ - + ]: 27388 : Assert(node->plan.qual == NIL);
436 : :
437 : : /* these fields will be filled by ExecInitHashJoin() */
438 : 27388 : hashstate->hash_expr = NULL;
47 tgl@sss.pgh.pa.us 439 :GNC 27388 : hashstate->null_tuple_store = NULL;
440 : 27388 : hashstate->keep_null_tuples = false;
441 : :
8552 tgl@sss.pgh.pa.us 442 :CBC 27388 : return hashstate;
443 : : }
444 : :
445 : : /* ---------------------------------------------------------------
446 : : * ExecEndHash
447 : : *
448 : : * clean up routine for Hash node
449 : : * ----------------------------------------------------------------
450 : : */
451 : : void
452 : 27314 : ExecEndHash(HashState *node)
453 : : {
454 : : PlanState *outerPlan;
455 : :
456 : : /*
457 : : * shut down the subplan
458 : : */
459 : 27314 : outerPlan = outerPlanState(node);
460 : 27314 : ExecEndNode(outerPlan);
10467 bruce@momjian.us 461 : 27314 : }
462 : :
463 : :
464 : : /* ----------------------------------------------------------------
465 : : * ExecHashTableCreate
466 : : *
467 : : * create an empty hashtable data structure for hashjoin.
468 : : * ----------------------------------------------------------------
469 : : */
470 : : HashJoinTable
623 drowley@postgresql.o 471 : 18887 : ExecHashTableCreate(HashState *state)
472 : : {
473 : : Hash *node;
474 : : HashJoinTable hashtable;
475 : : Plan *outerNode;
476 : : size_t space_allowed;
477 : : int nbuckets;
478 : : int nbatch;
479 : : double rows;
480 : : int num_skew_mcvs;
481 : : int log2_nbuckets;
482 : : MemoryContext oldcxt;
483 : :
484 : : /*
485 : : * Get information about the size of the relation to be hashed (it's the
486 : : * "outer" subtree of this node, but the inner relation of the hashjoin).
487 : : * Compute the appropriate size of the hash table.
488 : : */
3058 andres@anarazel.de 489 : 18887 : node = (Hash *) state->ps.plan;
10467 bruce@momjian.us 490 : 18887 : outerNode = outerPlan(node);
491 : :
492 : : /*
493 : : * If this is shared hash table with a partial plan, then we can't use
494 : : * outerNode->plan_rows to estimate its size. We need an estimate of the
495 : : * total number of rows across all copies of the partial plan.
496 : : */
3058 andres@anarazel.de 497 [ + + ]: 18887 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
498 : :
499 : 18611 : ExecChooseHashTableSize(rows, outerNode->plan_width,
6254 tgl@sss.pgh.pa.us 500 : 18887 : OidIsValid(node->skewTable),
3058 andres@anarazel.de 501 : 18887 : state->parallel_state != NULL,
502 [ + + ]: 18887 : state->parallel_state != NULL ?
503 : 276 : state->parallel_state->nparticipants - 1 : 0,
504 : : &space_allowed,
505 : : &nbuckets, &nbatch, &num_skew_mcvs);
506 : :
507 : : /* nbuckets must be a power of 2 */
237 michael@paquier.xyz 508 :GNC 18887 : log2_nbuckets = pg_ceil_log2_32(nbuckets);
6913 tgl@sss.pgh.pa.us 509 [ - + ]:CBC 18887 : Assert(nbuckets == (1 << log2_nbuckets));
510 : :
511 : : /*
512 : : * Initialize the hash table control block.
513 : : *
514 : : * The hashtable control block is just palloc'd from the executor's
515 : : * per-query memory context. Everything else should be kept inside the
516 : : * subsidiary hashCxt, batchCxt or spillCxt.
517 : : */
1331 peter@eisentraut.org 518 : 18887 : hashtable = palloc_object(HashJoinTableData);
10467 bruce@momjian.us 519 : 18887 : hashtable->nbuckets = nbuckets;
4222 kgrittn@postgresql.o 520 : 18887 : hashtable->nbuckets_original = nbuckets;
521 : 18887 : hashtable->nbuckets_optimal = nbuckets;
6913 tgl@sss.pgh.pa.us 522 : 18887 : hashtable->log2_nbuckets = log2_nbuckets;
4222 kgrittn@postgresql.o 523 : 18887 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
3058 andres@anarazel.de 524 : 18887 : hashtable->buckets.unshared = NULL;
6254 tgl@sss.pgh.pa.us 525 : 18887 : hashtable->skewEnabled = false;
526 : 18887 : hashtable->skewBucket = NULL;
527 : 18887 : hashtable->skewBucketLen = 0;
528 : 18887 : hashtable->nSkewBuckets = 0;
529 : 18887 : hashtable->skewBucketNums = NULL;
10467 bruce@momjian.us 530 : 18887 : hashtable->nbatch = nbatch;
531 : 18887 : hashtable->curbatch = 0;
7730 tgl@sss.pgh.pa.us 532 : 18887 : hashtable->nbatch_original = nbatch;
533 : 18887 : hashtable->nbatch_outstart = nbatch;
534 : 18887 : hashtable->growEnabled = true;
7689 535 : 18887 : hashtable->totalTuples = 0;
47 tgl@sss.pgh.pa.us 536 :GNC 18887 : hashtable->reportTuples = 0;
4222 kgrittn@postgresql.o 537 :CBC 18887 : hashtable->skewTuples = 0;
9849 tgl@sss.pgh.pa.us 538 : 18887 : hashtable->innerBatchFile = NULL;
539 : 18887 : hashtable->outerBatchFile = NULL;
7730 540 : 18887 : hashtable->spaceUsed = 0;
5937 rhaas@postgresql.org 541 : 18887 : hashtable->spacePeak = 0;
3058 andres@anarazel.de 542 : 18887 : hashtable->spaceAllowed = space_allowed;
6254 tgl@sss.pgh.pa.us 543 : 18887 : hashtable->spaceUsedSkew = 0;
544 : 18887 : hashtable->spaceAllowedSkew =
2106 pg@bowt.ie 545 : 18887 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
4255 heikki.linnakangas@i 546 : 18887 : hashtable->chunks = NULL;
3058 andres@anarazel.de 547 : 18887 : hashtable->current_chunk = NULL;
548 : 18887 : hashtable->parallel_state = state->parallel_state;
549 : 18887 : hashtable->area = state->ps.state->es_query_dsa;
550 : 18887 : hashtable->batches = NULL;
551 : :
552 : : #ifdef HJDEBUG
553 : : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
554 : : hashtable, nbatch, nbuckets);
555 : : #endif
556 : :
557 : : /*
558 : : * Create temporary memory contexts in which to keep the hashtable working
559 : : * storage. See notes in executor/hashjoin.h.
560 : : */
2972 tgl@sss.pgh.pa.us 561 : 18887 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
562 : : "HashTableContext",
563 : : ALLOCSET_DEFAULT_SIZES);
564 : :
565 : 18887 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
566 : : "HashBatchContext",
567 : : ALLOCSET_DEFAULT_SIZES);
568 : :
1082 tomas.vondra@postgre 569 : 18887 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
570 : : "HashSpillContext",
571 : : ALLOCSET_DEFAULT_SIZES);
572 : :
573 : : /* Allocate data that will live for the life of the hashjoin */
574 : :
2972 tgl@sss.pgh.pa.us 575 : 18887 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
576 : :
3058 andres@anarazel.de 577 [ + + + + ]: 18887 : if (nbatch > 1 && hashtable->parallel_state == NULL)
578 : : {
579 : : MemoryContext oldctx;
580 : :
581 : : /*
582 : : * allocate and initialize the file arrays in hashCxt (not needed for
583 : : * parallel case which uses shared tuplestores instead of raw files)
584 : : */
1082 tomas.vondra@postgre 585 : 84 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
586 : :
1331 peter@eisentraut.org 587 : 84 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
588 : 84 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
589 : :
1082 tomas.vondra@postgre 590 : 84 : MemoryContextSwitchTo(oldctx);
591 : :
592 : : /* The files will not be opened until needed... */
593 : : /* ... but make sure we have temp tablespaces established for them */
6907 tgl@sss.pgh.pa.us 594 : 84 : PrepareTempTablespaces();
595 : : }
596 : :
3058 andres@anarazel.de 597 : 18887 : MemoryContextSwitchTo(oldcxt);
598 : :
599 [ + + ]: 18887 : if (hashtable->parallel_state)
600 : : {
601 : 276 : ParallelHashJoinState *pstate = hashtable->parallel_state;
602 : : Barrier *build_barrier;
603 : :
604 : : /*
605 : : * Attach to the build barrier. The corresponding detach operation is
606 : : * in ExecHashTableDetach. Note that we won't attach to the
607 : : * batch_barrier for batch 0 yet. We'll attach later and start it out
608 : : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
609 : : * then loaded while hashing (the standard hybrid hash join
610 : : * algorithm), and we'll coordinate that using build_barrier.
611 : : */
612 : 276 : build_barrier = &pstate->build_barrier;
613 : 276 : BarrierAttach(build_barrier);
614 : :
615 : : /*
616 : : * So far we have no idea whether there are any other participants,
617 : : * and if so, what phase they are working on. The only thing we care
618 : : * about at this point is whether someone has already created the
619 : : * SharedHashJoinBatch objects and the hash table for batch 0. One
620 : : * backend will be elected to do that now if necessary.
621 : : */
1139 tmunro@postgresql.or 622 [ + + + - ]: 392 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
2180 tgl@sss.pgh.pa.us 623 : 116 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
624 : : {
3058 andres@anarazel.de 625 : 116 : pstate->nbatch = nbatch;
626 : 116 : pstate->space_allowed = space_allowed;
627 : 116 : pstate->growth = PHJ_GROWTH_OK;
628 : :
629 : : /* Set up the shared state for coordinating batches. */
630 : 116 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
631 : :
632 : : /*
633 : : * Allocate batch 0's hash table up front so we can load it
634 : : * directly while hashing.
635 : : */
636 : 116 : pstate->nbuckets = nbuckets;
637 : 116 : ExecParallelHashTableAlloc(hashtable, 0);
638 : : }
639 : :
640 : : /*
641 : : * The next Parallel Hash synchronization point is in
642 : : * MultiExecParallelHash(), which will progress it all the way to
643 : : * PHJ_BUILD_RUN. The caller must not return control from this
644 : : * executor node between now and then.
645 : : */
646 : : }
647 : : else
648 : : {
649 : : /*
650 : : * Prepare context for the first-scan space allocations; allocate the
651 : : * hashbucket array therein, and set each bucket "empty".
652 : : */
653 : 18611 : MemoryContextSwitchTo(hashtable->batchCxt);
654 : :
1331 peter@eisentraut.org 655 : 18611 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
656 : :
657 : : /*
658 : : * Set up for skew optimization, if possible and there's a need for
659 : : * more than one batch. (In a one-batch join, there's no point in
660 : : * it.)
661 : : */
3058 andres@anarazel.de 662 [ + + ]: 18611 : if (nbatch > 1)
623 drowley@postgresql.o 663 : 84 : ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
664 : :
3058 andres@anarazel.de 665 : 18611 : MemoryContextSwitchTo(oldcxt);
666 : : }
667 : :
10108 bruce@momjian.us 668 : 18887 : return hashtable;
669 : : }
670 : :
671 : :
672 : : /*
673 : : * Compute appropriate size for hashtable given the estimated size of the
674 : : * relation to be hashed (number of rows and average row width).
675 : : *
676 : : * This is exported so that the planner's costsize.c can use it.
677 : : */
678 : :
679 : : /* Target bucket loading (tuples per bucket) */
680 : : #define NTUP_PER_BUCKET 1
681 : :
682 : : void
6254 tgl@sss.pgh.pa.us 683 : 679223 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
684 : : bool try_combined_hash_mem,
685 : : int parallel_workers,
686 : : size_t *space_allowed,
687 : : int *numbuckets,
688 : : int *numbatches,
689 : : int *num_skew_mcvs)
690 : : {
691 : : int tupsize;
692 : : double inner_rel_bytes;
693 : : size_t hash_table_bytes;
694 : : size_t bucket_bytes;
695 : : size_t max_pointers;
4253 rhaas@postgresql.org 696 : 679223 : int nbatch = 1;
697 : : int nbuckets;
698 : : double dbuckets;
699 : :
700 : : /* Force a plausible relation size if no info */
9094 tgl@sss.pgh.pa.us 701 [ + + ]: 679223 : if (ntuples <= 0.0)
702 : 119 : ntuples = 1000.0;
703 : :
704 : : /*
705 : : * Estimate tupsize based on footprint of tuple in hashtable... note this
706 : : * does not allow for any palloc overhead. The manipulations of spaceUsed
707 : : * don't count palloc overhead either.
708 : : */
7252 709 : 679223 : tupsize = HJTUPLE_OVERHEAD +
4091 710 : 679223 : MAXALIGN(SizeofMinimalTupleHeader) +
7730 711 : 679223 : MAXALIGN(tupwidth);
712 : 679223 : inner_rel_bytes = ntuples * tupsize;
713 : :
714 : : /*
715 : : * Compute in-memory hashtable size limit from GUCs.
716 : : */
1745 717 : 679223 : hash_table_bytes = get_hash_memory_limit();
718 : :
719 : : /*
720 : : * Parallel Hash tries to use the combined hash_mem of all workers to
721 : : * avoid the need to batch. If that won't work, it falls back to hash_mem
722 : : * per worker and tries to process batches in parallel.
723 : : */
2106 pg@bowt.ie 724 [ + + ]: 679223 : if (try_combined_hash_mem)
725 : : {
726 : : /* Careful, this could overflow size_t */
727 : : double newlimit;
728 : :
1745 tgl@sss.pgh.pa.us 729 : 58154 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
730 [ + - ]: 58154 : newlimit = Min(newlimit, (double) SIZE_MAX);
731 : 58154 : hash_table_bytes = (size_t) newlimit;
732 : : }
733 : :
3058 andres@anarazel.de 734 : 679223 : *space_allowed = hash_table_bytes;
735 : :
736 : : /*
737 : : * If skew optimization is possible, estimate the number of skew buckets
738 : : * that will fit in the memory allowed, and decrement the assumed space
739 : : * available for the main hash table accordingly.
740 : : *
741 : : * We make the optimistic assumption that each skew bucket will contain
742 : : * one inner-relation tuple. If that turns out to be low, we will recover
743 : : * at runtime by reducing the number of skew buckets.
744 : : *
745 : : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
746 : : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
747 : : * will round up to the next power of 2 and then multiply by 4 to reduce
748 : : * collisions.
749 : : */
6254 tgl@sss.pgh.pa.us 750 [ + + ]: 679223 : if (useskew)
751 : : {
752 : : size_t bytes_per_mcv;
753 : : size_t skew_mcvs;
754 : :
755 : : /*----------
756 : : * Compute number of MCVs we could hold in hash_table_bytes
757 : : *
758 : : * Divisor is:
759 : : * size of a hash tuple +
760 : : * worst-case size of skewBucket[] per MCV +
761 : : * size of skewBucketNums[] entry +
762 : : * size of skew bucket struct itself
763 : : *----------
764 : : */
1745 765 : 675525 : bytes_per_mcv = tupsize +
766 : : (8 * sizeof(HashSkewBucket *)) +
767 : 675525 : sizeof(int) +
768 : : SKEW_BUCKET_OVERHEAD;
769 : 675525 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
770 : :
771 : : /*
772 : : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
773 : : * not to worry about size_t overflow in the multiplication)
774 : : */
775 : 675525 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
776 : :
777 : : /* Now clamp to integer range */
778 : 675525 : skew_mcvs = Min(skew_mcvs, INT_MAX);
779 : :
780 : 675525 : *num_skew_mcvs = (int) skew_mcvs;
781 : :
782 : : /* Reduce hash_table_bytes by the amount needed for the skew table */
783 [ + - ]: 675525 : if (skew_mcvs > 0)
784 : 675525 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
785 : : }
786 : : else
6254 787 : 3698 : *num_skew_mcvs = 0;
788 : :
789 : : /*
790 : : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
791 : : * memory is filled, assuming a single batch; but limit the value so that
792 : : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
793 : : * nor MaxAllocSize.
794 : : *
795 : : * Note that both nbuckets and nbatch must be powers of 2 to make
796 : : * ExecHashGetBucketAndBatch fast.
797 : : */
1745 798 : 679223 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
3866 799 : 679223 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
800 : : /* If max_pointers isn't a power of 2, must round it down to one */
1745 801 : 679223 : max_pointers = pg_prevpower2_size_t(max_pointers);
802 : :
803 : : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
804 : : /* (this step is redundant given the current value of MaxAllocSize) */
805 : 679223 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
806 : :
4253 rhaas@postgresql.org 807 : 679223 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
808 [ + + ]: 679223 : dbuckets = Min(dbuckets, max_pointers);
3866 tgl@sss.pgh.pa.us 809 : 679223 : nbuckets = (int) dbuckets;
810 : : /* don't let nbuckets be really small, though ... */
811 : 679223 : nbuckets = Max(nbuckets, 1024);
812 : : /* ... and force it to be a power of 2. */
1745 813 : 679223 : nbuckets = pg_nextpower2_32(nbuckets);
814 : :
815 : : /*
816 : : * If there's not enough space to store the projected number of tuples and
817 : : * the required bucket headers, we will need multiple batches.
818 : : */
3866 819 : 679223 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
4253 rhaas@postgresql.org 820 [ + + ]: 679223 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
821 : : {
822 : : /* We'll need multiple batches */
823 : : size_t sbuckets;
824 : : double dbatch;
825 : : int minbatch;
826 : : size_t bucket_size;
827 : :
828 : : /*
829 : : * If Parallel Hash with combined hash_mem would still need multiple
830 : : * batches, we'll have to fall back to regular hash_mem budget.
831 : : */
2106 pg@bowt.ie 832 [ + + ]: 3857 : if (try_combined_hash_mem)
833 : : {
3058 andres@anarazel.de 834 : 172 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
835 : : false, parallel_workers,
836 : : space_allowed,
837 : : numbuckets,
838 : : numbatches,
839 : : num_skew_mcvs);
840 : 172 : return;
841 : : }
842 : :
843 : : /*
844 : : * Estimate the number of buckets we'll want to have when hash_mem is
845 : : * entirely full. Each bucket will contain a bucket pointer plus
846 : : * NTUP_PER_BUCKET tuples, whose projected size already includes
847 : : * overhead for the hash code, pointer to the next tuple, etc.
848 : : */
4253 rhaas@postgresql.org 849 : 3685 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
1361 tgl@sss.pgh.pa.us 850 [ - + ]: 3685 : if (hash_table_bytes <= bucket_size)
1361 tgl@sss.pgh.pa.us 851 :UBC 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
852 : : else
1361 tgl@sss.pgh.pa.us 853 :CBC 3685 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
1745 854 : 3685 : sbuckets = Min(sbuckets, max_pointers);
855 : 3685 : nbuckets = (int) sbuckets;
856 : 3685 : nbuckets = pg_nextpower2_32(nbuckets);
4253 rhaas@postgresql.org 857 : 3685 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
858 : :
859 : : /*
860 : : * Buckets are simple pointers to hashjoin tuples, while tupsize
861 : : * includes the pointer, hash code, and MinimalTupleData. So buckets
862 : : * should never really exceed 25% of hash_mem (even for
863 : : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
864 : : * 2^N bytes, where we might get more because of doubling. So let's
865 : : * look for 50% here.
866 : : */
867 [ - + ]: 3685 : Assert(bucket_bytes <= hash_table_bytes / 2);
868 : :
869 : : /* Calculate required number of batches. */
870 : 3685 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
6031 tgl@sss.pgh.pa.us 871 [ + - ]: 3685 : dbatch = Min(dbatch, max_pointers);
7730 872 : 3685 : minbatch = (int) dbatch;
2218 drowley@postgresql.o 873 : 3685 : nbatch = pg_nextpower2_32(Max(2, minbatch));
874 : : }
875 : :
876 : : /*
877 : : * Optimize the total amount of memory consumed by the hash node.
878 : : *
879 : : * The nbatch calculation above focuses on the in-memory hash table,
880 : : * assuming no per-batch overhead. But each batch may have two files, each
881 : : * with a BLCKSZ buffer. For large nbatch values these buffers may use
882 : : * significantly more memory than the hash table.
883 : : *
884 : : * The total memory usage may be expressed by this formula:
885 : : *
886 : : * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ)
887 : : *
888 : : * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
889 : : * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
890 : : * buffers.
891 : : *
892 : : * The nbatch calculation however ignores the second part. And for very
893 : : * large inner_rel_bytes, there may be no nbatch that keeps total memory
894 : : * usage under the budget (work_mem * hash_mem_multiplier). To deal with
895 : : * that, we will adjust nbatch to minimize total memory consumption across
896 : : * both the hashtable and file buffers.
897 : : *
898 : : * As we increase the size of the hashtable, the number of batches
899 : : * decreases, and the total memory usage follows a U-shaped curve. We find
900 : : * the minimum nbatch by "walking back" -- checking if halving nbatch
901 : : * would lower the total memory usage. We stop when it no longer helps.
902 : : *
903 : : * We only reduce the number of batches. Adding batches reduces memory
904 : : * usage only when most of the memory is used by the hash table, with
905 : : * total memory usage within the limit or not far from it. We don't want
906 : : * to start batching when not needed, even if that would reduce memory
907 : : * usage.
908 : : *
909 : : * While growing the hashtable, we also adjust the number of buckets to
910 : : * maintain a load factor of NTUP_PER_BUCKET while squeezing tuples back
911 : : * from batches into the hashtable.
912 : : *
913 : : * Note that we can only change nbuckets during initial hashtable sizing.
914 : : * Once we start building the hash, nbuckets is fixed (we may still grow
915 : : * the hash table).
916 : : *
917 : : * We double several parameters (space_allowed, nbuckets, num_skew_mcvs),
918 : : * which introduces a risk of overflow. We avoid this by exiting the loop.
919 : : * We could do something smarter (e.g. capping nbuckets and continue), but
920 : : * the complexity is not worth it. Such cases are extremely rare, and this
921 : : * is a best-effort attempt to reduce memory usage.
922 : : */
200 tomas.vondra@postgre 923 [ + + ]: 679660 : while (nbatch > 1)
924 : : {
925 : : /* Check that buckets won't overflow MaxAllocSize */
926 [ - + ]: 4294 : if (nbuckets > (MaxAllocSize / sizeof(HashJoinTuple) / 2))
200 tomas.vondra@postgre 927 :UBC 0 : break;
928 : :
929 : : /* num_skew_mcvs should be less than nbuckets */
200 tomas.vondra@postgre 930 [ - + ]:CBC 4294 : Assert((*num_skew_mcvs) < (INT_MAX / 2));
931 : :
932 : : /*
933 : : * Check that space_allowed won't overflow SIZE_MAX.
934 : : *
935 : : * We don't use hash_table_bytes here, because it does not include the
936 : : * skew buckets. And we want to limit the overall memory limit.
937 : : */
938 [ - + ]: 4294 : if ((*space_allowed) > (SIZE_MAX / 2))
200 tomas.vondra@postgre 939 :UBC 0 : break;
940 : :
941 : : /*
942 : : * Will halving the number of batches and doubling the size of the
943 : : * hashtable reduce overall memory usage?
944 : : *
945 : : * This is the same as (S = space_allowed):
946 : : *
947 : : * (S + 2 * nbatch * BLCKSZ) < (S * 2 + nbatch * BLCKSZ)
948 : : *
949 : : * but avoiding intermediate overflow.
950 : : */
200 tomas.vondra@postgre 951 [ + + ]:CBC 4294 : if (nbatch < (*space_allowed) / BLCKSZ)
440 952 : 3685 : break;
953 : :
954 : : /*
955 : : * MaxAllocSize is sufficiently small that we are not worried about
956 : : * overflowing nbuckets.
957 : : */
958 : 609 : nbuckets *= 2;
959 : :
200 960 : 609 : *num_skew_mcvs = (*num_skew_mcvs) * 2;
440 961 : 609 : *space_allowed = (*space_allowed) * 2;
962 : :
200 963 : 609 : nbatch /= 2;
964 : : }
965 : :
3932 tgl@sss.pgh.pa.us 966 [ - + ]: 679051 : Assert(nbuckets > 0);
967 [ - + ]: 679051 : Assert(nbatch > 0);
968 : :
7730 969 : 679051 : *numbuckets = nbuckets;
9094 970 : 679051 : *numbatches = nbatch;
971 : : }
972 : :
973 : :
974 : : /* ----------------------------------------------------------------
975 : : * ExecHashTableDestroy
976 : : *
977 : : * destroy a hash table
978 : : * ----------------------------------------------------------------
979 : : */
980 : : void
9849 981 : 18814 : ExecHashTableDestroy(HashJoinTable hashtable)
982 : : {
983 : : int i;
984 : :
985 : : /*
986 : : * Make sure all the temp files are closed. We skip batch 0, since it
987 : : * can't have any temp files (and the arrays might not even exist if
988 : : * nbatch is only 1). Parallel hash joins don't use these files.
989 : : */
3058 andres@anarazel.de 990 [ + + ]: 18814 : if (hashtable->innerBatchFile != NULL)
991 : : {
992 [ + + ]: 976 : for (i = 1; i < hashtable->nbatch; i++)
993 : : {
994 [ - + ]: 828 : if (hashtable->innerBatchFile[i])
3058 andres@anarazel.de 995 :UBC 0 : BufFileClose(hashtable->innerBatchFile[i]);
3058 andres@anarazel.de 996 [ - + ]:CBC 828 : if (hashtable->outerBatchFile[i])
3058 andres@anarazel.de 997 :UBC 0 : BufFileClose(hashtable->outerBatchFile[i]);
998 : : }
999 : : }
1000 : :
1001 : : /* Release working memory (batchCxt is a child, so it goes away too) */
9442 tgl@sss.pgh.pa.us 1002 :CBC 18814 : MemoryContextDelete(hashtable->hashCxt);
1003 : :
1004 : : /* And drop the control block */
9849 1005 : 18814 : pfree(hashtable);
1006 : 18814 : }
1007 : :
1008 : : /*
1009 : : * Consider adjusting the allowed hash table size, depending on the number
1010 : : * of batches, to minimize the overall memory usage (for both the hashtable
1011 : : * and batch files).
1012 : : *
1013 : : * We're adjusting the size of the hash table, not the (optimal) number of
1014 : : * buckets. We can't change that once we start building the hash, due to how
1015 : : * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
1016 : : * means the load factor may not be optimal, but we're in damage control so
1017 : : * we accept slower lookups. It's still much better than batch explosion.
1018 : : *
1019 : : * Returns true if we chose to increase the batch size (and thus we don't
1020 : : * need to add batches), and false if we should increase nbatch.
1021 : : */
1022 : : static bool
440 tomas.vondra@postgre 1023 : 132 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
1024 : : {
1025 : : /*
1026 : : * How much additional memory would doubling nbatch use? Each batch may
1027 : : * require two buffered files (inner/outer), with a BLCKSZ buffer.
1028 : : */
200 1029 : 132 : size_t batchSpace = (hashtable->nbatch * 2 * (size_t) BLCKSZ);
1030 : :
1031 : : /*
1032 : : * Compare the new space needed for doubling nbatch and for enlarging the
1033 : : * in-memory hash table. If doubling the hash table needs less memory,
1034 : : * just do that. Otherwise, continue with doubling the nbatch.
1035 : : *
1036 : : * We're either doubling spaceAllowed or batchSpace, so which of those
1037 : : * increases the memory usage the least is the same as comparing the
1038 : : * values directly.
1039 : : */
440 1040 [ - + ]: 132 : if (hashtable->spaceAllowed <= batchSpace)
1041 : : {
440 tomas.vondra@postgre 1042 :UBC 0 : hashtable->spaceAllowed *= 2;
1043 : 0 : return true;
1044 : : }
1045 : :
440 tomas.vondra@postgre 1046 :CBC 132 : return false;
1047 : : }
1048 : :
1049 : : /*
1050 : : * ExecHashIncreaseNumBatches
1051 : : * increase the original number of batches in order to reduce
1052 : : * current memory consumption
1053 : : */
1054 : : static void
7730 tgl@sss.pgh.pa.us 1055 : 552772 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
1056 : : {
1057 : 552772 : int oldnbatch = hashtable->nbatch;
1058 : 552772 : int curbatch = hashtable->curbatch;
1059 : : int nbatch;
1060 : : long ninmemory;
1061 : : long nfreed;
1062 : : HashMemoryChunk oldchunks;
1063 : :
1064 : : /* do nothing if we've decided to shut off growth */
1065 [ + + ]: 552772 : if (!hashtable->growEnabled)
1066 : 552640 : return;
1067 : :
1068 : : /* safety check to avoid overflow */
6031 1069 [ - + ]: 132 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
7730 tgl@sss.pgh.pa.us 1070 :UBC 0 : return;
1071 : :
1072 : : /* consider increasing size of the in-memory hash table instead */
440 tomas.vondra@postgre 1073 [ - + ]:CBC 132 : if (ExecHashIncreaseBatchSize(hashtable))
440 tomas.vondra@postgre 1074 :UBC 0 : return;
1075 : :
7730 tgl@sss.pgh.pa.us 1076 :CBC 132 : nbatch = oldnbatch * 2;
1077 [ - + ]: 132 : Assert(nbatch > 1);
1078 : :
1079 : : #ifdef HJDEBUG
1080 : : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
1081 : : hashtable, nbatch, hashtable->spaceUsed);
1082 : : #endif
1083 : :
1084 [ + + ]: 132 : if (hashtable->innerBatchFile == NULL)
1085 : : {
1082 tomas.vondra@postgre 1086 : 64 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
1087 : :
1088 : : /* we had no file arrays before */
1331 peter@eisentraut.org 1089 : 64 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
1090 : 64 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
1091 : :
1082 tomas.vondra@postgre 1092 : 64 : MemoryContextSwitchTo(oldcxt);
1093 : :
1094 : : /* time to establish the temp tablespaces, too */
6907 tgl@sss.pgh.pa.us 1095 : 64 : PrepareTempTablespaces();
1096 : : }
1097 : : else
1098 : : {
1099 : : /* enlarge arrays and zero out added entries */
1270 peter@eisentraut.org 1100 : 68 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
1101 : 68 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
1102 : : }
1103 : :
7730 tgl@sss.pgh.pa.us 1104 : 132 : hashtable->nbatch = nbatch;
1105 : :
1106 : : /*
1107 : : * Scan through the existing hash table entries and dump out any that are
1108 : : * no longer of the current batch.
1109 : : */
1110 : 132 : ninmemory = nfreed = 0;
1111 : :
1112 : : /* If know we need to resize nbuckets, we can do it while rebatching. */
4222 kgrittn@postgresql.o 1113 [ + + ]: 132 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
1114 : : {
1115 : : /* we never decrease the number of buckets */
1116 [ - + ]: 64 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
1117 : :
1118 : 64 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1119 : 64 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1120 : :
3058 andres@anarazel.de 1121 : 64 : hashtable->buckets.unshared =
1331 peter@eisentraut.org 1122 : 64 : repalloc_array(hashtable->buckets.unshared,
1123 : : HashJoinTuple, hashtable->nbuckets);
1124 : : }
1125 : :
1126 : : /*
1127 : : * We will scan through the chunks directly, so that we can reset the
1128 : : * buckets now and not have to keep track which tuples in the buckets have
1129 : : * already been processed. We will free the old chunks as we go.
1130 : : */
3058 andres@anarazel.de 1131 : 132 : memset(hashtable->buckets.unshared, 0,
1132 : 132 : sizeof(HashJoinTuple) * hashtable->nbuckets);
4255 heikki.linnakangas@i 1133 : 132 : oldchunks = hashtable->chunks;
1134 : 132 : hashtable->chunks = NULL;
1135 : :
1136 : : /* so, let's scan through the old chunks, and all tuples in each chunk */
1137 [ + + ]: 660 : while (oldchunks != NULL)
1138 : : {
3058 andres@anarazel.de 1139 : 528 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
1140 : :
1141 : : /* position within the buffer (up to oldchunks->used) */
4255 heikki.linnakangas@i 1142 : 528 : size_t idx = 0;
1143 : :
1144 : : /* process all tuples stored in this chunk (and then free it) */
1145 [ + + ]: 360732 : while (idx < oldchunks->used)
1146 : : {
3045 tgl@sss.pgh.pa.us 1147 : 360204 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
4255 heikki.linnakangas@i 1148 : 360204 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1149 : 360204 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1150 : : int bucketno;
1151 : : int batchno;
1152 : :
7730 tgl@sss.pgh.pa.us 1153 : 360204 : ninmemory++;
4255 heikki.linnakangas@i 1154 : 360204 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1155 : : &bucketno, &batchno);
1156 : :
7730 tgl@sss.pgh.pa.us 1157 [ + + ]: 360204 : if (batchno == curbatch)
1158 : : {
1159 : : /* keep tuple in memory - copy it into the new chunk */
1160 : : HashJoinTuple copyTuple;
1161 : :
3999 1162 : 135244 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
4255 heikki.linnakangas@i 1163 : 135244 : memcpy(copyTuple, hashTuple, hashTupleSize);
1164 : :
1165 : : /* and add it back to the appropriate bucket */
3058 andres@anarazel.de 1166 : 135244 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1167 : 135244 : hashtable->buckets.unshared[bucketno] = copyTuple;
1168 : : }
1169 : : else
1170 : : {
1171 : : /* dump it out */
7730 tgl@sss.pgh.pa.us 1172 [ - + ]: 224960 : Assert(batchno > curbatch);
4255 heikki.linnakangas@i 1173 : 224960 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1174 : : hashTuple->hashvalue,
1082 tomas.vondra@postgre 1175 : 224960 : &hashtable->innerBatchFile[batchno],
1176 : : hashtable);
1177 : :
4255 heikki.linnakangas@i 1178 : 224960 : hashtable->spaceUsed -= hashTupleSize;
7730 tgl@sss.pgh.pa.us 1179 : 224960 : nfreed++;
1180 : : }
1181 : :
1182 : : /* next tuple in this chunk */
4255 heikki.linnakangas@i 1183 : 360204 : idx += MAXALIGN(hashTupleSize);
1184 : :
1185 : : /* allow this loop to be cancellable */
3366 tgl@sss.pgh.pa.us 1186 [ - + ]: 360204 : CHECK_FOR_INTERRUPTS();
1187 : : }
1188 : :
1189 : : /* we're done with this chunk - free it and proceed to the next one */
4255 heikki.linnakangas@i 1190 : 528 : pfree(oldchunks);
1191 : 528 : oldchunks = nextchunk;
1192 : : }
1193 : :
1194 : : #ifdef HJDEBUG
1195 : : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1196 : : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1197 : : #endif
1198 : :
1199 : : /*
1200 : : * If we dumped out either all or none of the tuples in the table, disable
1201 : : * further expansion of nbatch. This situation implies that we have
1202 : : * enough tuples of identical hashvalues to overflow spaceAllowed.
1203 : : * Increasing nbatch will not fix it since there's no way to subdivide the
1204 : : * group any more finely. We have to just gut it out and hope the server
1205 : : * has enough RAM.
1206 : : */
7730 tgl@sss.pgh.pa.us 1207 [ + - + + ]: 132 : if (nfreed == 0 || nfreed == ninmemory)
1208 : : {
1209 : 32 : hashtable->growEnabled = false;
1210 : : #ifdef HJDEBUG
1211 : : printf("Hashjoin %p: disabling further increase of nbatch\n",
1212 : : hashtable);
1213 : : #endif
1214 : : }
1215 : : }
1216 : :
1217 : : /*
1218 : : * ExecParallelHashIncreaseNumBatches
1219 : : * Every participant attached to grow_batches_barrier must run this
1220 : : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1221 : : */
1222 : : static void
3058 andres@anarazel.de 1223 : 45 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1224 : : {
1225 : 45 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1226 : :
1139 tmunro@postgresql.or 1227 [ - + ]: 45 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1228 : :
1229 : : /*
1230 : : * It's unlikely, but we need to be prepared for new participants to show
1231 : : * up while we're in the middle of this operation so we need to switch on
1232 : : * barrier phase here.
1233 : : */
3058 andres@anarazel.de 1234 [ + - + - : 45 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
- - ]
1235 : : {
1139 tmunro@postgresql.or 1236 : 43 : case PHJ_GROW_BATCHES_ELECT:
1237 : :
1238 : : /*
1239 : : * Elect one participant to prepare to grow the number of batches.
1240 : : * This involves reallocating or resetting the buckets of batch 0
1241 : : * in preparation for all participants to begin repartitioning the
1242 : : * tuples.
1243 : : */
3058 andres@anarazel.de 1244 [ + + ]: 43 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1245 : : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1246 : : {
1247 : : dsa_pointer_atomic *buckets;
1248 : : ParallelHashJoinBatch *old_batch0;
1249 : : int new_nbatch;
1250 : : int i;
1251 : :
1252 : : /* Move the old batch out of the way. */
1253 : 32 : old_batch0 = hashtable->batches[0].shared;
1254 : 32 : pstate->old_batches = pstate->batches;
1255 : 32 : pstate->old_nbatch = hashtable->nbatch;
1256 : 32 : pstate->batches = InvalidDsaPointer;
1257 : :
1258 : : /* Free this backend's old accessors. */
1259 : 32 : ExecParallelHashCloseBatchAccessors(hashtable);
1260 : :
1261 : : /* Figure out how many batches to use. */
1262 [ + + ]: 32 : if (hashtable->nbatch == 1)
1263 : : {
1264 : : /*
1265 : : * We are going from single-batch to multi-batch. We need
1266 : : * to switch from one large combined memory budget to the
1267 : : * regular hash_mem budget.
1268 : : */
1745 tgl@sss.pgh.pa.us 1269 : 24 : pstate->space_allowed = get_hash_memory_limit();
1270 : :
1271 : : /*
1272 : : * The combined hash_mem of all participants wasn't
1273 : : * enough. Therefore one batch per participant would be
1274 : : * approximately equivalent and would probably also be
1275 : : * insufficient. So try two batches per participant,
1276 : : * rounded up to a power of two.
1277 : : */
1278 : 24 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1279 : : }
1280 : : else
1281 : : {
1282 : : /*
1283 : : * We were already multi-batched. Try doubling the number
1284 : : * of batches.
1285 : : */
3058 andres@anarazel.de 1286 : 8 : new_nbatch = hashtable->nbatch * 2;
1287 : : }
1288 : :
1289 : : /* Allocate new larger generation of batches. */
1290 [ - + ]: 32 : Assert(hashtable->nbatch == pstate->nbatch);
1291 : 32 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1292 [ - + ]: 32 : Assert(hashtable->nbatch == pstate->nbatch);
1293 : :
1294 : : /* Replace or recycle batch 0's bucket array. */
1295 [ + + ]: 32 : if (pstate->old_nbatch == 1)
1296 : : {
1297 : : double dtuples;
1298 : : double dbuckets;
1299 : : int new_nbuckets;
1300 : : uint32 max_buckets;
1301 : :
1302 : : /*
1303 : : * We probably also need a smaller bucket array. How many
1304 : : * tuples do we expect per batch, assuming we have only
1305 : : * half of them so far? Normally we don't need to change
1306 : : * the bucket array's size, because the size of each batch
1307 : : * stays the same as we add more batches, but in this
1308 : : * special case we move from a large batch to many smaller
1309 : : * batches and it would be wasteful to keep the large
1310 : : * array.
1311 : : */
1312 : 24 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1313 : :
1314 : : /*
1315 : : * We need to calculate the maximum number of buckets to
1316 : : * stay within the MaxAllocSize boundary. Round the
1317 : : * maximum number to the previous power of 2 given that
1318 : : * later we round the number to the next power of 2.
1319 : : */
849 akorotkov@postgresql 1320 : 24 : max_buckets = pg_prevpower2_32((uint32)
1321 : : (MaxAllocSize / sizeof(dsa_pointer_atomic)));
3058 andres@anarazel.de 1322 : 24 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
849 akorotkov@postgresql 1323 [ + - ]: 24 : dbuckets = Min(dbuckets, max_buckets);
3058 andres@anarazel.de 1324 : 24 : new_nbuckets = (int) dbuckets;
1325 : 24 : new_nbuckets = Max(new_nbuckets, 1024);
1745 tgl@sss.pgh.pa.us 1326 : 24 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
3058 andres@anarazel.de 1327 : 24 : dsa_free(hashtable->area, old_batch0->buckets);
1328 : 48 : hashtable->batches[0].shared->buckets =
1329 : 24 : dsa_allocate(hashtable->area,
1330 : : sizeof(dsa_pointer_atomic) * new_nbuckets);
1331 : : buckets = (dsa_pointer_atomic *)
1332 : 24 : dsa_get_address(hashtable->area,
1333 : 24 : hashtable->batches[0].shared->buckets);
1334 [ + + ]: 71704 : for (i = 0; i < new_nbuckets; ++i)
1335 : 71680 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1336 : 24 : pstate->nbuckets = new_nbuckets;
1337 : : }
1338 : : else
1339 : : {
1340 : : /* Recycle the existing bucket array. */
1341 : 8 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1342 : : buckets = (dsa_pointer_atomic *)
1343 : 8 : dsa_get_address(hashtable->area, old_batch0->buckets);
1344 [ + + ]: 32776 : for (i = 0; i < hashtable->nbuckets; ++i)
1345 : 32768 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1346 : : }
1347 : :
1348 : : /* Move all chunks to the work queue for parallel processing. */
1349 : 32 : pstate->chunk_work_queue = old_batch0->chunks;
1350 : :
1351 : : /* Disable further growth temporarily while we're growing. */
1352 : 32 : pstate->growth = PHJ_GROWTH_DISABLED;
1353 : : }
1354 : : else
1355 : : {
1356 : : /* All other participants just flush their tuples to disk. */
1357 : 11 : ExecParallelHashCloseBatchAccessors(hashtable);
1358 : : }
1359 : : pg_fallthrough;
1360 : :
1361 : : case PHJ_GROW_BATCHES_REALLOCATE:
1362 : : /* Wait for the above to be finished. */
1363 : 43 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1364 : : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1365 : : pg_fallthrough;
1366 : :
1139 tmunro@postgresql.or 1367 : 45 : case PHJ_GROW_BATCHES_REPARTITION:
1368 : : /* Make sure that we have the current dimensions and buckets. */
3058 andres@anarazel.de 1369 : 45 : ExecParallelHashEnsureBatchAccessors(hashtable);
1370 : 45 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1371 : : /* Then partition, flush counters. */
1372 : 45 : ExecParallelHashRepartitionFirst(hashtable);
1373 : 45 : ExecParallelHashRepartitionRest(hashtable);
1374 : 45 : ExecParallelHashMergeCounters(hashtable);
1375 : : /* Wait for the above to be finished. */
1376 : 45 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1377 : : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1378 : : pg_fallthrough;
1379 : :
1139 tmunro@postgresql.or 1380 : 45 : case PHJ_GROW_BATCHES_DECIDE:
1381 : :
1382 : : /*
1383 : : * Elect one participant to clean up and decide whether further
1384 : : * repartitioning is needed, or should be disabled because it's
1385 : : * not helping.
1386 : : */
3058 andres@anarazel.de 1387 [ + + ]: 45 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1388 : : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1389 : : {
1390 : : ParallelHashJoinBatch *old_batches;
1391 : 32 : bool space_exhausted = false;
1392 : 32 : bool extreme_skew_detected = false;
1393 : :
1394 : : /* Make sure that we have the current dimensions and buckets. */
1395 : 32 : ExecParallelHashEnsureBatchAccessors(hashtable);
1396 : 32 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1397 : :
565 tmunro@postgresql.or 1398 : 32 : old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
1399 : :
1400 : : /* Are any of the new generation of batches exhausted? */
1350 drowley@postgresql.o 1401 [ + + ]: 224 : for (int i = 0; i < hashtable->nbatch; ++i)
1402 : : {
1403 : : ParallelHashJoinBatch *batch;
1404 : : ParallelHashJoinBatch *old_batch;
1405 : : int parent;
1406 : :
565 tmunro@postgresql.or 1407 : 192 : batch = hashtable->batches[i].shared;
3058 andres@anarazel.de 1408 [ + - ]: 192 : if (batch->space_exhausted ||
1409 [ + + ]: 192 : batch->estimated_size > pstate->space_allowed)
1410 : 16 : space_exhausted = true;
1411 : :
565 tmunro@postgresql.or 1412 : 192 : parent = i % pstate->old_nbatch;
1413 : 192 : old_batch = NthParallelHashJoinBatch(old_batches, parent);
1414 [ + + ]: 192 : if (old_batch->space_exhausted ||
1415 [ - + ]: 48 : batch->estimated_size > pstate->space_allowed)
1416 : : {
1417 : : /*
1418 : : * Did this batch receive ALL of the tuples from its
1419 : : * parent batch? That would indicate that further
1420 : : * repartitioning isn't going to help (the hash values
1421 : : * are probably all the same).
1422 : : */
3058 andres@anarazel.de 1423 [ + + ]: 144 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1424 : 16 : extreme_skew_detected = true;
1425 : : }
1426 : : }
1427 : :
1428 : : /* Don't keep growing if it's not helping or we'd overflow. */
1429 [ + + - + ]: 32 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1430 : 16 : pstate->growth = PHJ_GROWTH_DISABLED;
1431 [ - + ]: 16 : else if (space_exhausted)
3058 andres@anarazel.de 1432 :UBC 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1433 : : else
3058 andres@anarazel.de 1434 :CBC 16 : pstate->growth = PHJ_GROWTH_OK;
1435 : :
1436 : : /* Free the old batches in shared memory. */
1437 : 32 : dsa_free(hashtable->area, pstate->old_batches);
1438 : 32 : pstate->old_batches = InvalidDsaPointer;
1439 : : }
1440 : : pg_fallthrough;
1441 : :
1442 : : case PHJ_GROW_BATCHES_FINISH:
1443 : : /* Wait for the above to complete. */
1444 : 45 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1445 : : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1446 : : }
1447 : 45 : }
1448 : :
1449 : : /*
1450 : : * Repartition the tuples currently loaded into memory for inner batch 0
1451 : : * because the number of batches has been increased. Some tuples are retained
1452 : : * in memory and some are written out to a later batch.
1453 : : */
1454 : : static void
1455 : 45 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1456 : : {
1457 : : dsa_pointer chunk_shared;
1458 : : HashMemoryChunk chunk;
1459 : :
3054 1460 [ - + ]: 45 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1461 : :
3058 1462 [ + + ]: 282 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1463 : : {
1464 : 192 : size_t idx = 0;
1465 : :
1466 : : /* Repartition all tuples in this chunk. */
1467 [ + + ]: 146835 : while (idx < chunk->used)
1468 : : {
3045 tgl@sss.pgh.pa.us 1469 : 146643 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
3058 andres@anarazel.de 1470 : 146643 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1471 : : HashJoinTuple copyTuple;
1472 : : dsa_pointer shared;
1473 : : int bucketno;
1474 : : int batchno;
1475 : :
1476 : 146643 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1477 : : &bucketno, &batchno);
1478 : :
1479 [ - + ]: 146643 : Assert(batchno < hashtable->nbatch);
1480 [ + + ]: 146643 : if (batchno == 0)
1481 : : {
1482 : : /* It still belongs in batch 0. Copy to a new chunk. */
1483 : : copyTuple =
1484 : 33394 : ExecParallelHashTupleAlloc(hashtable,
1485 : 33394 : HJTUPLE_OVERHEAD + tuple->t_len,
1486 : : &shared);
1487 : 33394 : copyTuple->hashvalue = hashTuple->hashvalue;
1488 : 33394 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1489 : 33394 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1490 : : copyTuple, shared);
1491 : : }
1492 : : else
1493 : : {
1494 : 113249 : size_t tuple_size =
1082 tgl@sss.pgh.pa.us 1495 : 113249 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1496 : :
1497 : : /* It belongs in a later batch. */
3058 andres@anarazel.de 1498 : 113249 : hashtable->batches[batchno].estimated_size += tuple_size;
1499 : 113249 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1500 : 113249 : &hashTuple->hashvalue, tuple);
1501 : : }
1502 : :
1503 : : /* Count this tuple. */
1504 : 146643 : ++hashtable->batches[0].old_ntuples;
1505 : 146643 : ++hashtable->batches[batchno].ntuples;
1506 : :
1507 : 146643 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1508 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1509 : : }
1510 : :
1511 : : /* Free this chunk. */
1512 : 192 : dsa_free(hashtable->area, chunk_shared);
1513 : :
1514 [ - + ]: 192 : CHECK_FOR_INTERRUPTS();
1515 : : }
1516 : 45 : }
1517 : :
1518 : : /*
1519 : : * Help repartition inner batches 1..n.
1520 : : */
1521 : : static void
1522 : 45 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1523 : : {
1524 : 45 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1525 : 45 : int old_nbatch = pstate->old_nbatch;
1526 : : SharedTuplestoreAccessor **old_inner_tuples;
1527 : : ParallelHashJoinBatch *old_batches;
1528 : : int i;
1529 : :
1530 : : /* Get our hands on the previous generation of batches. */
1531 : : old_batches = (ParallelHashJoinBatch *)
1532 : 45 : dsa_get_address(hashtable->area, pstate->old_batches);
1331 peter@eisentraut.org 1533 : 45 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
3058 andres@anarazel.de 1534 [ + + ]: 93 : for (i = 1; i < old_nbatch; ++i)
1535 : : {
1536 : 48 : ParallelHashJoinBatch *shared =
1082 tgl@sss.pgh.pa.us 1537 : 48 : NthParallelHashJoinBatch(old_batches, i);
1538 : :
3058 andres@anarazel.de 1539 : 48 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1540 : : ParallelWorkerNumber + 1,
1541 : : &pstate->fileset);
1542 : : }
1543 : :
1544 : : /* Join in the effort to repartition them. */
1545 [ + + ]: 93 : for (i = 1; i < old_nbatch; ++i)
1546 : : {
1547 : : MinimalTuple tuple;
1548 : : uint32 hashvalue;
1549 : :
1550 : : /* Scan one partition from the previous generation. */
1551 : 48 : sts_begin_parallel_scan(old_inner_tuples[i]);
1552 [ + + ]: 103793 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1553 : : {
1554 : 103745 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1555 : : int bucketno;
1556 : : int batchno;
1557 : :
1558 : : /* Decide which partition it goes to in the new generation. */
1559 : 103745 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1560 : : &batchno);
1561 : :
1562 : 103745 : hashtable->batches[batchno].estimated_size += tuple_size;
1563 : 103745 : ++hashtable->batches[batchno].ntuples;
1564 : 103745 : ++hashtable->batches[i].old_ntuples;
1565 : :
1566 : : /* Store the tuple its new batch. */
1567 : 103745 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1568 : : &hashvalue, tuple);
1569 : :
1570 [ - + ]: 103745 : CHECK_FOR_INTERRUPTS();
1571 : : }
1572 : 48 : sts_end_parallel_scan(old_inner_tuples[i]);
1573 : : }
1574 : :
1575 : 45 : pfree(old_inner_tuples);
1576 : 45 : }
1577 : :
1578 : : /*
1579 : : * Transfer the backend-local per-batch counters to the shared totals.
1580 : : */
1581 : : static void
1582 : 281 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1583 : : {
1584 : 281 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1585 : : int i;
1586 : :
1587 : 281 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1588 : 281 : pstate->total_tuples = 0;
1589 [ + + ]: 1583 : for (i = 0; i < hashtable->nbatch; ++i)
1590 : : {
1591 : 1302 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1592 : :
1593 : 1302 : batch->shared->size += batch->size;
1594 : 1302 : batch->shared->estimated_size += batch->estimated_size;
1595 : 1302 : batch->shared->ntuples += batch->ntuples;
1596 : 1302 : batch->shared->old_ntuples += batch->old_ntuples;
1597 : 1302 : batch->size = 0;
1598 : 1302 : batch->estimated_size = 0;
1599 : 1302 : batch->ntuples = 0;
1600 : 1302 : batch->old_ntuples = 0;
1601 : 1302 : pstate->total_tuples += batch->shared->ntuples;
1602 : : }
1603 : 281 : LWLockRelease(&pstate->lock);
1604 : 281 : }
1605 : :
1606 : : /*
1607 : : * ExecHashIncreaseNumBuckets
1608 : : * increase the original number of buckets in order to reduce
1609 : : * number of tuples per bucket
1610 : : */
1611 : : static void
4222 kgrittn@postgresql.o 1612 : 94 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1613 : : {
1614 : : HashMemoryChunk chunk;
1615 : :
1616 : : /* do nothing if not an increase (it's called increase for a reason) */
1617 [ - + ]: 94 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
4222 kgrittn@postgresql.o 1618 :UBC 0 : return;
1619 : :
1620 : : #ifdef HJDEBUG
1621 : : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1622 : : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1623 : : #endif
1624 : :
4222 kgrittn@postgresql.o 1625 :CBC 94 : hashtable->nbuckets = hashtable->nbuckets_optimal;
3866 tgl@sss.pgh.pa.us 1626 : 94 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1627 : :
4222 kgrittn@postgresql.o 1628 [ - + ]: 94 : Assert(hashtable->nbuckets > 1);
1629 [ - + ]: 94 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1630 [ - + ]: 94 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1631 : :
1632 : : /*
1633 : : * Just reallocate the proper number of buckets - we don't need to walk
1634 : : * through them - we can walk the dense-allocated chunks (just like in
1635 : : * ExecHashIncreaseNumBatches, but without all the copying into new
1636 : : * chunks)
1637 : : */
3058 andres@anarazel.de 1638 : 94 : hashtable->buckets.unshared =
1331 peter@eisentraut.org 1639 : 94 : repalloc_array(hashtable->buckets.unshared,
1640 : : HashJoinTuple, hashtable->nbuckets);
1641 : :
3058 andres@anarazel.de 1642 : 94 : memset(hashtable->buckets.unshared, 0,
1643 : 94 : hashtable->nbuckets * sizeof(HashJoinTuple));
1644 : :
1645 : : /* scan through all tuples in all chunks to rebuild the hash table */
1646 [ + + ]: 2022 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1647 : : {
1648 : : /* process all tuples stored in this chunk */
4000 bruce@momjian.us 1649 : 1928 : size_t idx = 0;
1650 : :
4222 kgrittn@postgresql.o 1651 [ + + ]: 355522 : while (idx < chunk->used)
1652 : : {
3045 tgl@sss.pgh.pa.us 1653 : 353594 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1654 : : int bucketno;
1655 : : int batchno;
1656 : :
4222 kgrittn@postgresql.o 1657 : 353594 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1658 : : &bucketno, &batchno);
1659 : :
1660 : : /* add the tuple to the proper bucket */
3058 andres@anarazel.de 1661 : 353594 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1662 : 353594 : hashtable->buckets.unshared[bucketno] = hashTuple;
1663 : :
1664 : : /* advance index past the tuple */
4222 kgrittn@postgresql.o 1665 : 353594 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1666 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1667 : : }
1668 : :
1669 : : /* allow this loop to be cancellable */
3206 andres@anarazel.de 1670 [ - + ]: 1928 : CHECK_FOR_INTERRUPTS();
1671 : : }
1672 : : }
1673 : :
1674 : : static void
3058 1675 : 47 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1676 : : {
1677 : 47 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1678 : : int i;
1679 : : HashMemoryChunk chunk;
1680 : : dsa_pointer chunk_s;
1681 : :
1139 tmunro@postgresql.or 1682 [ - + ]: 47 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1683 : :
1684 : : /*
1685 : : * It's unlikely, but we need to be prepared for new participants to show
1686 : : * up while we're in the middle of this operation so we need to switch on
1687 : : * barrier phase here.
1688 : : */
3058 andres@anarazel.de 1689 [ + - - - ]: 47 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1690 : : {
1139 tmunro@postgresql.or 1691 : 47 : case PHJ_GROW_BUCKETS_ELECT:
1692 : : /* Elect one participant to prepare to increase nbuckets. */
3058 andres@anarazel.de 1693 [ + - ]: 47 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1694 : : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1695 : : {
1696 : : size_t size;
1697 : : dsa_pointer_atomic *buckets;
1698 : :
1699 : : /* Double the size of the bucket array. */
1700 : 47 : pstate->nbuckets *= 2;
1701 : 47 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1702 : 47 : hashtable->batches[0].shared->size += size / 2;
1703 : 47 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1704 : 94 : hashtable->batches[0].shared->buckets =
1705 : 47 : dsa_allocate(hashtable->area, size);
1706 : : buckets = (dsa_pointer_atomic *)
1707 : 47 : dsa_get_address(hashtable->area,
1708 : 47 : hashtable->batches[0].shared->buckets);
1709 [ + + ]: 221231 : for (i = 0; i < pstate->nbuckets; ++i)
1710 : 221184 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1711 : :
1712 : : /* Put the chunk list onto the work queue. */
1713 : 47 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1714 : :
1715 : : /* Clear the flag. */
1716 : 47 : pstate->growth = PHJ_GROWTH_OK;
1717 : : }
1718 : : pg_fallthrough;
1719 : :
1720 : : case PHJ_GROW_BUCKETS_REALLOCATE:
1721 : : /* Wait for the above to complete. */
1722 : 47 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1723 : : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1724 : : pg_fallthrough;
1725 : :
1139 tmunro@postgresql.or 1726 : 47 : case PHJ_GROW_BUCKETS_REINSERT:
1727 : : /* Reinsert all tuples into the hash table. */
3058 andres@anarazel.de 1728 : 47 : ExecParallelHashEnsureBatchAccessors(hashtable);
1729 : 47 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1730 [ + + ]: 264 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1731 : : {
1732 : 170 : size_t idx = 0;
1733 : :
1734 [ + + ]: 139230 : while (idx < chunk->used)
1735 : : {
3045 tgl@sss.pgh.pa.us 1736 : 139060 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
3058 andres@anarazel.de 1737 : 139060 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1738 : : int bucketno;
1739 : : int batchno;
1740 : :
1741 : 139060 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1742 : : &bucketno, &batchno);
1743 [ - + ]: 139060 : Assert(batchno == 0);
1744 : :
1745 : : /* add the tuple to the proper bucket */
1746 : 139060 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1747 : : hashTuple, shared);
1748 : :
1749 : : /* advance index past the tuple */
1750 : 139060 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1751 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1752 : : }
1753 : :
1754 : : /* allow this loop to be cancellable */
1755 [ - + ]: 170 : CHECK_FOR_INTERRUPTS();
1756 : : }
1757 : 47 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1758 : : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1759 : : }
1760 : 47 : }
1761 : :
1762 : : /*
1763 : : * ExecHashTableInsert
1764 : : * insert a tuple into the hash table depending on the hash value
1765 : : * it may just go to a temp file for later batches
1766 : : *
1767 : : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1768 : : * tuple; the minimal case in particular is certain to happen while reloading
1769 : : * tuples from batch files. We could save some cycles in the regular-tuple
1770 : : * case by not forcing the slot contents into minimal form; not clear if it's
1771 : : * worth the messiness required.
1772 : : */
1773 : : void
10892 scrappy@hub.org 1774 : 8930767 : ExecHashTableInsert(HashJoinTable hashtable,
1775 : : TupleTableSlot *slot,
1776 : : uint32 hashvalue)
1777 : : {
1778 : : bool shouldFree;
2728 andres@anarazel.de 1779 : 8930767 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1780 : : int bucketno;
1781 : : int batchno;
1782 : :
7730 tgl@sss.pgh.pa.us 1783 : 8930767 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1784 : : &bucketno, &batchno);
1785 : :
1786 : : /*
1787 : : * decide whether to put the tuple in the hash table or a temp file
1788 : : */
1789 [ + + ]: 8930767 : if (batchno == hashtable->curbatch)
1790 : : {
1791 : : /*
1792 : : * put the tuple in hash table
1793 : : */
1794 : : HashJoinTuple hashTuple;
1795 : : int hashTupleSize;
1796 : :
1797 : : /* Create the HashJoinTuple */
7252 1798 : 6785751 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
4255 heikki.linnakangas@i 1799 : 6785751 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1800 : :
7730 tgl@sss.pgh.pa.us 1801 : 6785751 : hashTuple->hashvalue = hashvalue;
7252 1802 : 6785751 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1803 : :
1804 : : /*
1805 : : * We always reset the tuple-matched flag on insertion. This is okay
1806 : : * even when reloading a tuple from a batch file, since the tuple
1807 : : * could not possibly have been matched to an outer tuple before it
1808 : : * went into the batch file.
1809 : : */
5605 1810 : 6785751 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1811 : :
1812 : : /* Push it onto the front of the bucket's list */
3058 andres@anarazel.de 1813 : 6785751 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1814 : 6785751 : hashtable->buckets.unshared[bucketno] = hashTuple;
1815 : :
1816 : : /*
1817 : : * Increase the (optimal) number of buckets if we just exceeded the
1818 : : * NTUP_PER_BUCKET threshold, but only when there's still a single
1819 : : * batch. Note that totalTuples - skewTuples is a reliable indicator
1820 : : * of the hash table's size only as long as there's just one batch.
1821 : : */
3866 tgl@sss.pgh.pa.us 1822 [ + + ]: 6785751 : if (hashtable->nbatch == 1 &&
47 tgl@sss.pgh.pa.us 1823 :GNC 4295827 : (hashtable->totalTuples - hashtable->skewTuples) >
1824 [ + + ]: 4295827 : (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1825 : : {
1826 : : /* Guard against integer overflow and alloc size overflow */
3866 tgl@sss.pgh.pa.us 1827 [ + - ]:CBC 222 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1828 [ + - ]: 222 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1829 : : {
1830 : 222 : hashtable->nbuckets_optimal *= 2;
1831 : 222 : hashtable->log2_nbuckets_optimal += 1;
1832 : : }
1833 : : }
1834 : :
1835 : : /* Account for space used, and back off if we've used too much */
7730 1836 : 6785751 : hashtable->spaceUsed += hashTupleSize;
5937 rhaas@postgresql.org 1837 [ + + ]: 6785751 : if (hashtable->spaceUsed > hashtable->spacePeak)
1838 : 4998919 : hashtable->spacePeak = hashtable->spaceUsed;
4222 kgrittn@postgresql.o 1839 : 6785751 : if (hashtable->spaceUsed +
1840 : 6785751 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
4253 rhaas@postgresql.org 1841 [ + + ]: 6785751 : > hashtable->spaceAllowed)
7730 tgl@sss.pgh.pa.us 1842 : 552772 : ExecHashIncreaseNumBatches(hashtable);
1843 : : }
1844 : : else
1845 : : {
1846 : : /*
1847 : : * put the tuple into a temp file for later batches
1848 : : */
1849 [ - + ]: 2145016 : Assert(batchno > hashtable->curbatch);
6907 1850 : 2145016 : ExecHashJoinSaveTuple(tuple,
1851 : : hashvalue,
1082 tomas.vondra@postgre 1852 : 2145016 : &hashtable->innerBatchFile[batchno],
1853 : : hashtable);
1854 : : }
1855 : :
2728 andres@anarazel.de 1856 [ + + ]: 8930767 : if (shouldFree)
1857 : 6515591 : heap_free_minimal_tuple(tuple);
10892 scrappy@hub.org 1858 : 8930767 : }
1859 : :
1860 : : /*
1861 : : * ExecParallelHashTableInsert
1862 : : * insert a tuple into a shared hash table or shared batch tuplestore
1863 : : */
1864 : : void
3058 andres@anarazel.de 1865 : 1440128 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1866 : : TupleTableSlot *slot,
1867 : : uint32 hashvalue)
1868 : : {
1869 : : bool shouldFree;
2728 1870 : 1440128 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1871 : : dsa_pointer shared;
1872 : : int bucketno;
1873 : : int batchno;
1874 : :
3058 1875 : 169 : retry:
1876 : 1440297 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1877 : :
1878 [ + + ]: 1440297 : if (batchno == 0)
1879 : : {
1880 : : HashJoinTuple hashTuple;
1881 : :
1882 : : /* Try to load it into memory. */
1883 [ - + ]: 832420 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1884 : : PHJ_BUILD_HASH_INNER);
1885 : 832420 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1886 : 832420 : HJTUPLE_OVERHEAD + tuple->t_len,
1887 : : &shared);
1888 [ + + ]: 832420 : if (hashTuple == NULL)
1889 : 155 : goto retry;
1890 : :
1891 : : /* Store the hash value in the HashJoinTuple header. */
1892 : 832265 : hashTuple->hashvalue = hashvalue;
1893 : 832265 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1117 tmunro@postgresql.or 1894 : 832265 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1895 : :
1896 : : /* Push it onto the front of the bucket's list */
3058 andres@anarazel.de 1897 : 832265 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1898 : : hashTuple, shared);
1899 : : }
1900 : : else
1901 : : {
1902 : 607877 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1903 : :
1904 [ - + ]: 607877 : Assert(batchno > 0);
1905 : :
1906 : : /* Try to preallocate space in the batch if necessary. */
1907 [ + + ]: 607877 : if (hashtable->batches[batchno].preallocated < tuple_size)
1908 : : {
1909 [ + + ]: 1109 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1910 : 14 : goto retry;
1911 : : }
1912 : :
1913 [ - + ]: 607863 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1914 : 607863 : hashtable->batches[batchno].preallocated -= tuple_size;
1915 : 607863 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1916 : : tuple);
1917 : : }
1918 : 1440128 : ++hashtable->batches[batchno].ntuples;
1919 : :
2728 1920 [ + - ]: 1440128 : if (shouldFree)
1921 : 1440128 : heap_free_minimal_tuple(tuple);
3058 1922 : 1440128 : }
1923 : :
1924 : : /*
1925 : : * Insert a tuple into the current hash table. Unlike
1926 : : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1927 : : * to other batches or to run out of memory, and should only be called with
1928 : : * tuples that belong in the current batch once growth has been disabled.
1929 : : */
1930 : : void
1931 : 721112 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1932 : : TupleTableSlot *slot,
1933 : : uint32 hashvalue)
1934 : : {
1935 : : bool shouldFree;
2728 1936 : 721112 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1937 : : HashJoinTuple hashTuple;
1938 : : dsa_pointer shared;
1939 : : int batchno;
1940 : : int bucketno;
1941 : :
3058 1942 : 721112 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1943 [ - + ]: 721112 : Assert(batchno == hashtable->curbatch);
1944 : 721112 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1945 : 721112 : HJTUPLE_OVERHEAD + tuple->t_len,
1946 : : &shared);
1947 : 721112 : hashTuple->hashvalue = hashvalue;
1948 : 721112 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1949 : 721112 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1950 : 721112 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1951 : : hashTuple, shared);
1952 : :
2728 1953 [ - + ]: 721112 : if (shouldFree)
2728 andres@anarazel.de 1954 :UBC 0 : heap_free_minimal_tuple(tuple);
3058 andres@anarazel.de 1955 :CBC 721112 : }
1956 : :
1957 : :
1958 : : /*
1959 : : * ExecHashGetBucketAndBatch
1960 : : * Determine the bucket number and batch number for a hash value
1961 : : *
1962 : : * Note: on-the-fly increases of nbatch must not change the bucket number
1963 : : * for a given hash code (since we don't move tuples to different hash
1964 : : * chains), and must only cause the batch number to remain the same or
1965 : : * increase. Our algorithm is
1966 : : * bucketno = hashvalue MOD nbuckets
1967 : : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1968 : : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1969 : : * do the computations by shifting and masking. (This assumes that all hash
1970 : : * functions are good about randomizing all their output bits, else we are
1971 : : * likely to have very skewed bucket or batch occupancy.)
1972 : : *
1973 : : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1974 : : * bucket count growth. Once we start batching, the value is fixed and does
1975 : : * not change over the course of the join (making it possible to compute batch
1976 : : * number the way we do here).
1977 : : *
1978 : : * nbatch is always a power of 2; we increase it only by doubling it. This
1979 : : * effectively adds one more bit to the top of the batchno. In very large
1980 : : * joins, we might run out of bits to add, so we do this by rotating the hash
1981 : : * value. This causes batchno to steal bits from bucketno when the number of
1982 : : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1983 : : * than to lose the ability to divide batches.
1984 : : */
1985 : : void
7730 tgl@sss.pgh.pa.us 1986 : 26866173 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1987 : : uint32 hashvalue,
1988 : : int *bucketno,
1989 : : int *batchno)
1990 : : {
7507 bruce@momjian.us 1991 : 26866173 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1992 : 26866173 : uint32 nbatch = (uint32) hashtable->nbatch;
1993 : :
7730 tgl@sss.pgh.pa.us 1994 [ + + ]: 26866173 : if (nbatch > 1)
1995 : : {
6913 1996 : 10453582 : *bucketno = hashvalue & (nbuckets - 1);
2324 tmunro@postgresql.or 1997 : 10453582 : *batchno = pg_rotate_right32(hashvalue,
1998 : 10453582 : hashtable->log2_nbuckets) & (nbatch - 1);
1999 : : }
2000 : : else
2001 : : {
6913 tgl@sss.pgh.pa.us 2002 : 16412591 : *bucketno = hashvalue & (nbuckets - 1);
7730 2003 : 16412591 : *batchno = 0;
2004 : : }
8527 2005 : 26866173 : }
2006 : :
2007 : : /*
2008 : : * ExecScanHashBucket
2009 : : * scan a hash bucket for matches to the current outer tuple
2010 : : *
2011 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2012 : : *
2013 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2014 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2015 : : * for the latter.
2016 : : */
2017 : : bool
10466 bruce@momjian.us 2018 : 14913540 : ExecScanHashBucket(HashJoinState *hjstate,
2019 : : ExprContext *econtext)
2020 : : {
3339 andres@anarazel.de 2021 : 14913540 : ExprState *hjclauses = hjstate->hashclauses;
9842 bruce@momjian.us 2022 : 14913540 : HashJoinTable hashtable = hjstate->hj_HashTable;
2023 : 14913540 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
7730 tgl@sss.pgh.pa.us 2024 : 14913540 : uint32 hashvalue = hjstate->hj_CurHashValue;
2025 : :
2026 : : /*
2027 : : * hj_CurTuple is the address of the tuple last returned from the current
2028 : : * bucket, or NULL if it's time to start scanning a new bucket.
2029 : : *
2030 : : * If the tuple hashed to a skew bucket then scan the skew bucket
2031 : : * otherwise scan the standard hashtable bucket.
2032 : : */
6254 2033 [ + + ]: 14913540 : if (hashTuple != NULL)
3058 andres@anarazel.de 2034 : 3445765 : hashTuple = hashTuple->next.unshared;
6254 tgl@sss.pgh.pa.us 2035 [ + + ]: 11467775 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
2036 : 1600 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
2037 : : else
3058 andres@anarazel.de 2038 : 11466175 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2039 : :
2040 [ + + ]: 17858628 : while (hashTuple != NULL)
2041 : : {
2042 [ + + ]: 10046446 : if (hashTuple->hashvalue == hashvalue)
2043 : : {
2044 : : TupleTableSlot *inntuple;
2045 : :
2046 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2047 : 7101370 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2048 : : hjstate->hj_HashTupleSlot,
2049 : : false); /* do not pfree */
2050 : 7101370 : econtext->ecxt_innertuple = inntuple;
2051 : :
3018 2052 [ + + ]: 7101370 : if (ExecQualAndReset(hjclauses, econtext))
2053 : : {
3058 2054 : 7101358 : hjstate->hj_CurTuple = hashTuple;
2055 : 7101358 : return true;
2056 : : }
2057 : : }
2058 : :
2059 : 2945088 : hashTuple = hashTuple->next.unshared;
2060 : : }
2061 : :
2062 : : /*
2063 : : * no match
2064 : : */
2065 : 7812182 : return false;
2066 : : }
2067 : :
2068 : : /*
2069 : : * ExecParallelScanHashBucket
2070 : : * scan a hash bucket for matches to the current outer tuple
2071 : : *
2072 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2073 : : *
2074 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2075 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2076 : : * for the latter.
2077 : : */
2078 : : bool
2079 : 2804080 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2080 : : ExprContext *econtext)
2081 : : {
2082 : 2804080 : ExprState *hjclauses = hjstate->hashclauses;
2083 : 2804080 : HashJoinTable hashtable = hjstate->hj_HashTable;
2084 : 2804080 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2085 : 2804080 : uint32 hashvalue = hjstate->hj_CurHashValue;
2086 : :
2087 : : /*
2088 : : * hj_CurTuple is the address of the tuple last returned from the current
2089 : : * bucket, or NULL if it's time to start scanning a new bucket.
2090 : : */
2091 [ + + ]: 2804080 : if (hashTuple != NULL)
2092 : 1360052 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2093 : : else
2094 : 1444028 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2095 : : hjstate->hj_CurBucketNo);
2096 : :
9849 tgl@sss.pgh.pa.us 2097 [ + + ]: 3685828 : while (hashTuple != NULL)
2098 : : {
7730 2099 [ + + ]: 2241800 : if (hashTuple->hashvalue == hashvalue)
2100 : : {
2101 : : TupleTableSlot *inntuple;
2102 : :
2103 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
7252 2104 : 1360052 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2105 : : hjstate->hj_HashTupleSlot,
2106 : : false); /* do not pfree */
7730 2107 : 1360052 : econtext->ecxt_innertuple = inntuple;
2108 : :
3018 andres@anarazel.de 2109 [ + - ]: 1360052 : if (ExecQualAndReset(hjclauses, econtext))
2110 : : {
7730 tgl@sss.pgh.pa.us 2111 : 1360052 : hjstate->hj_CurTuple = hashTuple;
5605 2112 : 1360052 : return true;
2113 : : }
2114 : : }
2115 : :
3058 andres@anarazel.de 2116 : 881748 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2117 : : }
2118 : :
2119 : : /*
2120 : : * no match
2121 : : */
5605 tgl@sss.pgh.pa.us 2122 : 1444028 : return false;
2123 : : }
2124 : :
2125 : : /*
2126 : : * ExecPrepHashTableForUnmatched
2127 : : * set up for a series of ExecScanHashTableForUnmatched calls
2128 : : */
2129 : : void
2130 : 2573 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2131 : : {
2132 : : /*----------
2133 : : * During this scan we use the HashJoinState fields as follows:
2134 : : *
2135 : : * hj_CurBucketNo: next regular bucket to scan
2136 : : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2137 : : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2138 : : *----------
2139 : : */
2140 : 2573 : hjstate->hj_CurBucketNo = 0;
2141 : 2573 : hjstate->hj_CurSkewBucketNo = 0;
2142 : 2573 : hjstate->hj_CurTuple = NULL;
2143 : 2573 : }
2144 : :
2145 : : /*
2146 : : * Decide if this process is allowed to run the unmatched scan. If so, the
2147 : : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2148 : : * Otherwise the batch is detached and false is returned.
2149 : : */
2150 : : bool
1131 tmunro@postgresql.or 2151 : 70 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2152 : : {
2153 : 70 : HashJoinTable hashtable = hjstate->hj_HashTable;
2154 : 70 : int curbatch = hashtable->curbatch;
2155 : 70 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2156 : :
2157 [ - + ]: 70 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2158 : :
2159 : : /*
2160 : : * It would not be deadlock-free to wait on the batch barrier, because it
2161 : : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2162 : : * already emitted tuples. Therefore, we'll hold a wait-free election:
2163 : : * only one process can continue to the next phase, and all others detach
2164 : : * from this batch. They can still go any work on other batches, if there
2165 : : * are any.
2166 : : */
2167 [ + + ]: 70 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2168 : : {
2169 : : /* This process considers the batch to be done. */
2170 : 26 : hashtable->batches[hashtable->curbatch].done = true;
2171 : :
2172 : : /* Make sure any temporary files are closed. */
2173 : 26 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2174 : 26 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2175 : :
2176 : : /*
2177 : : * Track largest batch we've seen, which would normally happen in
2178 : : * ExecHashTableDetachBatch().
2179 : : */
2180 : 26 : hashtable->spacePeak =
2181 : 26 : Max(hashtable->spacePeak,
2182 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2183 : 26 : hashtable->curbatch = -1;
2184 : 26 : return false;
2185 : : }
2186 : :
2187 : : /* Now we are alone with this batch. */
2188 [ - + ]: 44 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2189 : :
2190 : : /*
2191 : : * Has another process decided to give up early and command all processes
2192 : : * to skip the unmatched scan?
2193 : : */
2194 [ - + ]: 44 : if (batch->skip_unmatched)
2195 : : {
1131 tmunro@postgresql.or 2196 :UBC 0 : hashtable->batches[hashtable->curbatch].done = true;
2197 : 0 : ExecHashTableDetachBatch(hashtable);
2198 : 0 : return false;
2199 : : }
2200 : :
2201 : : /* Now prepare the process local state, just as for non-parallel join. */
1131 tmunro@postgresql.or 2202 :CBC 44 : ExecPrepHashTableForUnmatched(hjstate);
2203 : :
2204 : 44 : return true;
2205 : : }
2206 : :
2207 : : /*
2208 : : * ExecScanHashTableForUnmatched
2209 : : * scan the hash table for unmatched inner tuples
2210 : : *
2211 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2212 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2213 : : * for the latter.
2214 : : */
2215 : : bool
5605 tgl@sss.pgh.pa.us 2216 : 254855 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2217 : : {
2218 : 254855 : HashJoinTable hashtable = hjstate->hj_HashTable;
2219 : 254855 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2220 : :
2221 : : for (;;)
2222 : : {
2223 : : /*
2224 : : * hj_CurTuple is the address of the tuple last returned from the
2225 : : * current bucket, or NULL if it's time to start scanning a new
2226 : : * bucket.
2227 : : */
2228 [ + + ]: 3594783 : if (hashTuple != NULL)
3058 andres@anarazel.de 2229 : 252326 : hashTuple = hashTuple->next.unshared;
5605 tgl@sss.pgh.pa.us 2230 [ + + ]: 3342457 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2231 : : {
3058 andres@anarazel.de 2232 : 3339932 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
5605 tgl@sss.pgh.pa.us 2233 : 3339932 : hjstate->hj_CurBucketNo++;
2234 : : }
2235 [ - + ]: 2525 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2236 : : {
5504 bruce@momjian.us 2237 :UBC 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2238 : :
5605 tgl@sss.pgh.pa.us 2239 : 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2240 : 0 : hjstate->hj_CurSkewBucketNo++;
2241 : : }
2242 : : else
5605 tgl@sss.pgh.pa.us 2243 :CBC 2525 : break; /* finished all buckets */
2244 : :
2245 [ + + ]: 3864900 : while (hashTuple != NULL)
2246 : : {
2247 [ + + ]: 524972 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2248 : : {
2249 : : TupleTableSlot *inntuple;
2250 : :
2251 : : /* insert hashtable's tuple into exec slot */
2252 : 252330 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2253 : : hjstate->hj_HashTupleSlot,
2254 : : false); /* do not pfree */
2255 : 252330 : econtext->ecxt_innertuple = inntuple;
2256 : :
2257 : : /*
2258 : : * Reset temp memory each time; although this function doesn't
2259 : : * do any qual eval, the caller will, so let's keep it
2260 : : * parallel to ExecScanHashBucket.
2261 : : */
2262 : 252330 : ResetExprContext(econtext);
2263 : :
2264 : 252330 : hjstate->hj_CurTuple = hashTuple;
2265 : 252330 : return true;
2266 : : }
2267 : :
3058 andres@anarazel.de 2268 : 272642 : hashTuple = hashTuple->next.unshared;
2269 : : }
2270 : :
2271 : : /* allow this loop to be cancellable */
3206 2272 [ - + ]: 3339928 : CHECK_FOR_INTERRUPTS();
2273 : : }
2274 : :
2275 : : /*
2276 : : * no more unmatched tuples
2277 : : */
5605 tgl@sss.pgh.pa.us 2278 : 2525 : return false;
2279 : : }
2280 : :
2281 : : /*
2282 : : * ExecParallelScanHashTableForUnmatched
2283 : : * scan the hash table for unmatched inner tuples, in parallel join
2284 : : *
2285 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2286 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2287 : : * for the latter.
2288 : : */
2289 : : bool
1131 tmunro@postgresql.or 2290 : 80048 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2291 : : ExprContext *econtext)
2292 : : {
2293 : 80048 : HashJoinTable hashtable = hjstate->hj_HashTable;
2294 : 80048 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2295 : :
2296 : : for (;;)
2297 : : {
2298 : : /*
2299 : : * hj_CurTuple is the address of the tuple last returned from the
2300 : : * current bucket, or NULL if it's time to start scanning a new
2301 : : * bucket.
2302 : : */
2303 [ + + ]: 489648 : if (hashTuple != NULL)
2304 : 80004 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2305 [ + + ]: 409644 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2306 : 409600 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2307 : 409600 : hjstate->hj_CurBucketNo++);
2308 : : else
2309 : 44 : break; /* finished all buckets */
2310 : :
2311 [ + + ]: 649604 : while (hashTuple != NULL)
2312 : : {
2313 [ + + ]: 240004 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2314 : : {
2315 : : TupleTableSlot *inntuple;
2316 : :
2317 : : /* insert hashtable's tuple into exec slot */
2318 : 80004 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2319 : : hjstate->hj_HashTupleSlot,
2320 : : false); /* do not pfree */
2321 : 80004 : econtext->ecxt_innertuple = inntuple;
2322 : :
2323 : : /*
2324 : : * Reset temp memory each time; although this function doesn't
2325 : : * do any qual eval, the caller will, so let's keep it
2326 : : * parallel to ExecScanHashBucket.
2327 : : */
2328 : 80004 : ResetExprContext(econtext);
2329 : :
2330 : 80004 : hjstate->hj_CurTuple = hashTuple;
2331 : 80004 : return true;
2332 : : }
2333 : :
2334 : 160000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2335 : : }
2336 : :
2337 : : /* allow this loop to be cancellable */
2338 [ - + ]: 409600 : CHECK_FOR_INTERRUPTS();
2339 : : }
2340 : :
2341 : : /*
2342 : : * no more unmatched tuples
2343 : : */
2344 : 44 : return false;
2345 : : }
2346 : :
2347 : : /*
2348 : : * ExecHashTableReset
2349 : : *
2350 : : * reset hash table header for new batch
2351 : : */
2352 : : void
7730 tgl@sss.pgh.pa.us 2353 : 828 : ExecHashTableReset(HashJoinTable hashtable)
2354 : : {
2355 : : MemoryContext oldcxt;
9849 2356 : 828 : int nbuckets = hashtable->nbuckets;
2357 : :
2358 : : /*
2359 : : * Release all the hash buckets and tuples acquired in the prior pass, and
2360 : : * reinitialize the context for a new pass.
2361 : : */
9442 2362 : 828 : MemoryContextReset(hashtable->batchCxt);
9849 2363 : 828 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2364 : :
2365 : : /* Reallocate and reinitialize the hash bucket headers. */
1331 peter@eisentraut.org 2366 : 828 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2367 : :
7730 tgl@sss.pgh.pa.us 2368 : 828 : hashtable->spaceUsed = 0;
2369 : :
9849 2370 : 828 : MemoryContextSwitchTo(oldcxt);
2371 : :
2372 : : /* Forget the chunks (the memory was freed by the context reset above). */
4255 heikki.linnakangas@i 2373 : 828 : hashtable->chunks = NULL;
10892 scrappy@hub.org 2374 : 828 : }
2375 : :
2376 : : /*
2377 : : * ExecHashTableResetMatchFlags
2378 : : * Clear all the HeapTupleHeaderHasMatch flags in the table
2379 : : */
2380 : : void
5605 tgl@sss.pgh.pa.us 2381 : 44 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2382 : : {
2383 : : HashJoinTuple tuple;
2384 : : int i;
2385 : :
2386 : : /* Reset all flags in the main table ... */
2387 [ + + ]: 45100 : for (i = 0; i < hashtable->nbuckets; i++)
2388 : : {
3058 andres@anarazel.de 2389 [ + + ]: 45236 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2390 : 180 : tuple = tuple->next.unshared)
5605 tgl@sss.pgh.pa.us 2391 : 180 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2392 : : }
2393 : :
2394 : : /* ... and the same for the skew buckets, if any */
2395 [ - + ]: 44 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2396 : : {
5504 bruce@momjian.us 2397 :UBC 0 : int j = hashtable->skewBucketNums[i];
5605 tgl@sss.pgh.pa.us 2398 : 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2399 : :
3058 andres@anarazel.de 2400 [ # # ]: 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
5605 tgl@sss.pgh.pa.us 2401 : 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2402 : : }
5605 tgl@sss.pgh.pa.us 2403 :CBC 44 : }
2404 : :
2405 : :
2406 : : void
5776 2407 : 1053 : ExecReScanHash(HashState *node)
2408 : : {
1398 2409 : 1053 : PlanState *outerPlan = outerPlanState(node);
2410 : :
2411 : : /*
2412 : : * if chgParam of subnode is not null then plan will be re-scanned by
2413 : : * first ExecProcNode.
2414 : : */
2415 [ + + ]: 1053 : if (outerPlan->chgParam == NULL)
2416 : 20 : ExecReScan(outerPlan);
10308 vadim4o@yahoo.com 2417 : 1053 : }
2418 : :
2419 : :
2420 : : /*
2421 : : * ExecHashBuildSkewHash
2422 : : *
2423 : : * Set up for skew optimization if we can identify the most common values
2424 : : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2425 : : * for the hash value of each MCV, up to the number of slots allowed
2426 : : * based on available memory.
2427 : : */
2428 : : static void
623 drowley@postgresql.o 2429 : 84 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
2430 : : Hash *node, int mcvsToUse)
2431 : : {
2432 : : HeapTupleData *statsTuple;
2433 : : AttStatsSlot sslot;
2434 : :
2435 : : /* Do nothing if planner didn't identify the outer relation's join key */
6254 tgl@sss.pgh.pa.us 2436 [ - + ]: 84 : if (!OidIsValid(node->skewTable))
6254 tgl@sss.pgh.pa.us 2437 :LBC (36) : return;
2438 : : /* Also, do nothing if we don't have room for at least one skew bucket */
6254 tgl@sss.pgh.pa.us 2439 [ - + ]:CBC 84 : if (mcvsToUse <= 0)
6254 tgl@sss.pgh.pa.us 2440 :UBC 0 : return;
2441 : :
2442 : : /*
2443 : : * Try to find the MCV statistics for the outer relation's join key.
2444 : : */
5924 rhaas@postgresql.org 2445 :CBC 84 : statsTuple = SearchSysCache3(STATRELATTINH,
2446 : : ObjectIdGetDatum(node->skewTable),
2447 : 84 : Int16GetDatum(node->skewColumn),
2448 : 84 : BoolGetDatum(node->skewInherit));
6254 tgl@sss.pgh.pa.us 2449 [ - + ]: 84 : if (!HeapTupleIsValid(statsTuple))
6254 tgl@sss.pgh.pa.us 2450 :LBC (36) : return;
2451 : :
3279 tgl@sss.pgh.pa.us 2452 [ + + ]:CBC 84 : if (get_attstatsslot(&sslot, statsTuple,
2453 : : STATISTIC_KIND_MCV, InvalidOid,
2454 : : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2455 : : {
2456 : : double frac;
2457 : : int nbuckets;
2458 : : int i;
2459 : :
2460 [ - + ]: 4 : if (mcvsToUse > sslot.nvalues)
3279 tgl@sss.pgh.pa.us 2461 :UBC 0 : mcvsToUse = sslot.nvalues;
2462 : :
2463 : : /*
2464 : : * Calculate the expected fraction of outer relation that will
2465 : : * participate in the skew optimization. If this isn't at least
2466 : : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2467 : : */
6254 tgl@sss.pgh.pa.us 2468 :CBC 4 : frac = 0;
2469 [ + + ]: 88 : for (i = 0; i < mcvsToUse; i++)
3279 2470 : 84 : frac += sslot.numbers[i];
6254 2471 [ - + ]: 4 : if (frac < SKEW_MIN_OUTER_FRACTION)
2472 : : {
3279 tgl@sss.pgh.pa.us 2473 :UBC 0 : free_attstatsslot(&sslot);
6254 2474 : 0 : ReleaseSysCache(statsTuple);
2475 : 0 : return;
2476 : : }
2477 : :
2478 : : /*
2479 : : * Okay, set up the skew hashtable.
2480 : : *
2481 : : * skewBucket[] is an open addressing hashtable with a power of 2 size
2482 : : * that is greater than the number of MCV values. (This ensures there
2483 : : * will be at least one null entry, so searches will always
2484 : : * terminate.)
2485 : : *
2486 : : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2487 : : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2488 : : * since we limit pg_statistic entries to much less than that.
2489 : : */
2218 drowley@postgresql.o 2490 :CBC 4 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2491 : : /* use two more bits just to help avoid collisions */
6254 tgl@sss.pgh.pa.us 2492 : 4 : nbuckets <<= 2;
2493 : :
2494 : 4 : hashtable->skewEnabled = true;
2495 : 4 : hashtable->skewBucketLen = nbuckets;
2496 : :
2497 : : /*
2498 : : * We allocate the bucket memory in the hashtable's batch context. It
2499 : : * is only needed during the first batch, and this ensures it will be
2500 : : * automatically removed once the first batch is done.
2501 : : */
2502 : 4 : hashtable->skewBucket = (HashSkewBucket **)
2503 : 4 : MemoryContextAllocZero(hashtable->batchCxt,
2504 : : nbuckets * sizeof(HashSkewBucket *));
2505 : 4 : hashtable->skewBucketNums = (int *)
2506 : 4 : MemoryContextAllocZero(hashtable->batchCxt,
2507 : : mcvsToUse * sizeof(int));
2508 : :
2509 : 4 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2510 : 4 : + mcvsToUse * sizeof(int);
2511 : 4 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2512 : 4 : + mcvsToUse * sizeof(int);
5937 rhaas@postgresql.org 2513 [ + - ]: 4 : if (hashtable->spaceUsed > hashtable->spacePeak)
2514 : 4 : hashtable->spacePeak = hashtable->spaceUsed;
2515 : :
2516 : : /*
2517 : : * Create a skew bucket for each MCV hash value.
2518 : : *
2519 : : * Note: it is very important that we create the buckets in order of
2520 : : * decreasing MCV frequency. If we have to remove some buckets, they
2521 : : * must be removed in reverse order of creation (see notes in
2522 : : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2523 : : * be removed first.
2524 : : */
2525 : :
6254 tgl@sss.pgh.pa.us 2526 [ + + ]: 88 : for (i = 0; i < mcvsToUse; i++)
2527 : : {
2528 : : uint32 hashvalue;
2529 : : int bucket;
2530 : :
623 drowley@postgresql.o 2531 : 84 : hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
2532 : : hashstate->skew_collation,
2601 peter@eisentraut.org 2533 : 84 : sslot.values[i]));
2534 : :
2535 : : /*
2536 : : * While we have not hit a hole in the hashtable and have not hit
2537 : : * the desired bucket, we have collided with some previous hash
2538 : : * value, so try the next bucket location. NB: this code must
2539 : : * match ExecHashGetSkewBucket.
2540 : : */
6254 tgl@sss.pgh.pa.us 2541 : 84 : bucket = hashvalue & (nbuckets - 1);
2542 [ - + ]: 84 : while (hashtable->skewBucket[bucket] != NULL &&
6254 tgl@sss.pgh.pa.us 2543 [ # # ]:UBC 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2544 : 0 : bucket = (bucket + 1) & (nbuckets - 1);
2545 : :
2546 : : /*
2547 : : * If we found an existing bucket with the same hashvalue, leave
2548 : : * it alone. It's okay for two MCVs to share a hashvalue.
2549 : : */
6254 tgl@sss.pgh.pa.us 2550 [ - + ]:CBC 84 : if (hashtable->skewBucket[bucket] != NULL)
6254 tgl@sss.pgh.pa.us 2551 :UBC 0 : continue;
2552 : :
2553 : : /* Okay, create a new skew bucket for this hashvalue. */
6254 tgl@sss.pgh.pa.us 2554 :CBC 168 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2555 : 84 : MemoryContextAlloc(hashtable->batchCxt,
2556 : : sizeof(HashSkewBucket));
2557 : 84 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2558 : 84 : hashtable->skewBucket[bucket]->tuples = NULL;
2559 : 84 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2560 : 84 : hashtable->nSkewBuckets++;
2561 : 84 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2562 : 84 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
5937 rhaas@postgresql.org 2563 [ + - ]: 84 : if (hashtable->spaceUsed > hashtable->spacePeak)
2564 : 84 : hashtable->spacePeak = hashtable->spaceUsed;
2565 : : }
2566 : :
3279 tgl@sss.pgh.pa.us 2567 : 4 : free_attstatsslot(&sslot);
2568 : : }
2569 : :
6254 2570 : 84 : ReleaseSysCache(statsTuple);
2571 : : }
2572 : :
2573 : : /*
2574 : : * ExecHashGetSkewBucket
2575 : : *
2576 : : * Returns the index of the skew bucket for this hashvalue,
2577 : : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2578 : : * associated with any active skew bucket.
2579 : : */
2580 : : int
2581 : 20431702 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2582 : : {
2583 : : int bucket;
2584 : :
2585 : : /*
2586 : : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2587 : : * particular, this happens after the initial batch is done).
2588 : : */
2589 [ + + ]: 20431702 : if (!hashtable->skewEnabled)
2590 : 20351702 : return INVALID_SKEW_BUCKET_NO;
2591 : :
2592 : : /*
2593 : : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2594 : : */
2595 : 80000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2596 : :
2597 : : /*
2598 : : * While we have not hit a hole in the hashtable and have not hit the
2599 : : * desired bucket, we have collided with some other hash value, so try the
2600 : : * next bucket location.
2601 : : */
2602 [ + + ]: 85220 : while (hashtable->skewBucket[bucket] != NULL &&
2603 [ + + ]: 7212 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2604 : 5220 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2605 : :
2606 : : /*
2607 : : * Found the desired bucket?
2608 : : */
2609 [ + + ]: 80000 : if (hashtable->skewBucket[bucket] != NULL)
2610 : 1992 : return bucket;
2611 : :
2612 : : /*
2613 : : * There must not be any hashtable entry for this hash value.
2614 : : */
2615 : 78008 : return INVALID_SKEW_BUCKET_NO;
2616 : : }
2617 : :
2618 : : /*
2619 : : * ExecHashSkewTableInsert
2620 : : *
2621 : : * Insert a tuple into the skew hashtable.
2622 : : *
2623 : : * This should generally match up with the current-batch case in
2624 : : * ExecHashTableInsert.
2625 : : */
2626 : : static void
2627 : 392 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2628 : : TupleTableSlot *slot,
2629 : : uint32 hashvalue,
2630 : : int bucketNumber)
2631 : : {
2632 : : bool shouldFree;
2728 andres@anarazel.de 2633 : 392 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2634 : : HashJoinTuple hashTuple;
2635 : : int hashTupleSize;
2636 : :
2637 : : /* Create the HashJoinTuple */
6254 tgl@sss.pgh.pa.us 2638 : 392 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2639 : 392 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2640 : : hashTupleSize);
2641 : 392 : hashTuple->hashvalue = hashvalue;
2642 : 392 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
5605 2643 : 392 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2644 : :
2645 : : /* Push it onto the front of the skew bucket's list */
3058 andres@anarazel.de 2646 : 392 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
6254 tgl@sss.pgh.pa.us 2647 : 392 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
3058 andres@anarazel.de 2648 [ - + ]: 392 : Assert(hashTuple != hashTuple->next.unshared);
2649 : :
2650 : : /* Account for space used, and back off if we've used too much */
47 tgl@sss.pgh.pa.us 2651 :GNC 392 : hashtable->skewTuples += 1;
6254 tgl@sss.pgh.pa.us 2652 :CBC 392 : hashtable->spaceUsed += hashTupleSize;
2653 : 392 : hashtable->spaceUsedSkew += hashTupleSize;
5937 rhaas@postgresql.org 2654 [ + + ]: 392 : if (hashtable->spaceUsed > hashtable->spacePeak)
2655 : 288 : hashtable->spacePeak = hashtable->spaceUsed;
6254 tgl@sss.pgh.pa.us 2656 [ + + ]: 460 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2657 : 68 : ExecHashRemoveNextSkewBucket(hashtable);
2658 : :
2659 : : /* Check we are not over the total spaceAllowed, either */
2660 [ - + ]: 392 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
6254 tgl@sss.pgh.pa.us 2661 :UBC 0 : ExecHashIncreaseNumBatches(hashtable);
2662 : :
2728 andres@anarazel.de 2663 [ + - ]:CBC 392 : if (shouldFree)
2664 : 392 : heap_free_minimal_tuple(tuple);
6254 tgl@sss.pgh.pa.us 2665 : 392 : }
2666 : :
2667 : : /*
2668 : : * ExecHashRemoveNextSkewBucket
2669 : : *
2670 : : * Remove the least valuable skew bucket by pushing its tuples into
2671 : : * the main hash table.
2672 : : */
2673 : : static void
2674 : 68 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2675 : : {
2676 : : int bucketToRemove;
2677 : : HashSkewBucket *bucket;
2678 : : uint32 hashvalue;
2679 : : int bucketno;
2680 : : int batchno;
2681 : : HashJoinTuple hashTuple;
2682 : :
2683 : : /* Locate the bucket to remove */
2684 : 68 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2685 : 68 : bucket = hashtable->skewBucket[bucketToRemove];
2686 : :
2687 : : /*
2688 : : * Calculate which bucket and batch the tuples belong to in the main
2689 : : * hashtable. They all have the same hash value, so it's the same for all
2690 : : * of them. Also note that it's not possible for nbatch to increase while
2691 : : * we are processing the tuples.
2692 : : */
2693 : 68 : hashvalue = bucket->hashvalue;
2694 : 68 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2695 : :
2696 : : /* Process all tuples in the bucket */
2697 : 68 : hashTuple = bucket->tuples;
2698 [ + + ]: 300 : while (hashTuple != NULL)
2699 : : {
3058 andres@anarazel.de 2700 : 232 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2701 : : MinimalTuple tuple;
2702 : : Size tupleSize;
2703 : :
2704 : : /*
2705 : : * This code must agree with ExecHashTableInsert. We do not use
2706 : : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2707 : : * TupleTableSlot while we already have HashJoinTuples.
2708 : : */
6254 tgl@sss.pgh.pa.us 2709 : 232 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2710 : 232 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2711 : :
2712 : : /* Decide whether to put the tuple in the hash table or a temp file */
2713 [ + + ]: 232 : if (batchno == hashtable->curbatch)
2714 : : {
2715 : : /* Move the tuple to the main hash table */
2716 : : HashJoinTuple copyTuple;
2717 : :
2718 : : /*
2719 : : * We must copy the tuple into the dense storage, else it will not
2720 : : * be found by, eg, ExecHashIncreaseNumBatches.
2721 : : */
3740 2722 : 92 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2723 : 92 : memcpy(copyTuple, hashTuple, tupleSize);
2724 : 92 : pfree(hashTuple);
2725 : :
3058 andres@anarazel.de 2726 : 92 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2727 : 92 : hashtable->buckets.unshared[bucketno] = copyTuple;
2728 : :
2729 : : /* We have reduced skew space, but overall space doesn't change */
6254 tgl@sss.pgh.pa.us 2730 : 92 : hashtable->spaceUsedSkew -= tupleSize;
2731 : : }
2732 : : else
2733 : : {
2734 : : /* Put the tuple into a temp file for later batches */
2735 [ - + ]: 140 : Assert(batchno > hashtable->curbatch);
2736 : 140 : ExecHashJoinSaveTuple(tuple, hashvalue,
1082 tomas.vondra@postgre 2737 : 140 : &hashtable->innerBatchFile[batchno],
2738 : : hashtable);
6254 tgl@sss.pgh.pa.us 2739 : 140 : pfree(hashTuple);
2740 : 140 : hashtable->spaceUsed -= tupleSize;
2741 : 140 : hashtable->spaceUsedSkew -= tupleSize;
2742 : : }
2743 : :
2744 : : /*
2745 : : * We must reduce skewTuples, but totalTuples doesn't change since it
2746 : : * counts both main and skew tuples.
2747 : : */
47 tgl@sss.pgh.pa.us 2748 :GNC 232 : hashtable->skewTuples -= 1;
2749 : :
6254 tgl@sss.pgh.pa.us 2750 :CBC 232 : hashTuple = nextHashTuple;
2751 : :
2752 : : /* allow this loop to be cancellable */
3366 2753 [ - + ]: 232 : CHECK_FOR_INTERRUPTS();
2754 : : }
2755 : :
2756 : : /*
2757 : : * Free the bucket struct itself and reset the hashtable entry to NULL.
2758 : : *
2759 : : * NOTE: this is not nearly as simple as it looks on the surface, because
2760 : : * of the possibility of collisions in the hashtable. Suppose that hash
2761 : : * values A and B collide at a particular hashtable entry, and that A was
2762 : : * entered first so B gets shifted to a different table entry. If we were
2763 : : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2764 : : * reporting that B is not in the hashtable, because it would hit the NULL
2765 : : * before finding B. However, we always remove entries in the reverse
2766 : : * order of creation, so this failure cannot happen.
2767 : : */
6254 2768 : 68 : hashtable->skewBucket[bucketToRemove] = NULL;
2769 : 68 : hashtable->nSkewBuckets--;
2770 : 68 : pfree(bucket);
2771 : 68 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2772 : 68 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2773 : :
2774 : : /*
2775 : : * If we have removed all skew buckets then give up on skew optimization.
2776 : : * Release the arrays since they aren't useful any more.
2777 : : */
2778 [ - + ]: 68 : if (hashtable->nSkewBuckets == 0)
2779 : : {
6254 tgl@sss.pgh.pa.us 2780 :UBC 0 : hashtable->skewEnabled = false;
2781 : 0 : pfree(hashtable->skewBucket);
2782 : 0 : pfree(hashtable->skewBucketNums);
2783 : 0 : hashtable->skewBucket = NULL;
2784 : 0 : hashtable->skewBucketNums = NULL;
2785 : 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2786 : 0 : hashtable->spaceUsedSkew = 0;
2787 : : }
6254 tgl@sss.pgh.pa.us 2788 :CBC 68 : }
2789 : :
2790 : : /*
2791 : : * Build a tuplestore suitable for holding null-keyed input tuples.
2792 : : * (This function doesn't care whether it's for outer or inner tuples.)
2793 : : *
2794 : : * Note that in a parallel hash join, each worker has its own tuplestore(s)
2795 : : * for these. There's no need to interact with other workers to decide
2796 : : * what to do with them. So they're always in private storage.
2797 : : */
2798 : : Tuplestorestate *
47 tgl@sss.pgh.pa.us 2799 :GNC 155 : ExecHashBuildNullTupleStore(HashJoinTable hashtable)
2800 : : {
2801 : : Tuplestorestate *tstore;
2802 : : MemoryContext oldcxt;
2803 : :
2804 : : /*
2805 : : * We keep the tuplestore in the hashCxt to ensure it won't go away too
2806 : : * soon. Size it at work_mem/16 so that it doesn't bloat the node's space
2807 : : * consumption too much.
2808 : : */
2809 : 155 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
2810 : 155 : tstore = tuplestore_begin_heap(false, false, work_mem / 16);
2811 : 155 : MemoryContextSwitchTo(oldcxt);
2812 : 155 : return tstore;
2813 : : }
2814 : :
2815 : : /*
2816 : : * Reserve space in the DSM segment for instrumentation data.
2817 : : */
2818 : : void
3073 andres@anarazel.de 2819 :CBC 208 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2820 : : {
2821 : : size_t size;
2822 : :
2823 : : /* don't need this if not instrumenting or no workers */
3012 tgl@sss.pgh.pa.us 2824 [ + + - + ]: 208 : if (!node->ps.instrument || pcxt->nworkers == 0)
2825 : 152 : return;
2826 : :
3073 andres@anarazel.de 2827 : 56 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2828 : 56 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2829 : 56 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2830 : 56 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2831 : : }
2832 : :
2833 : : /*
2834 : : * Set up a space in the DSM for all workers to record instrumentation data
2835 : : * about their hash table.
2836 : : */
2837 : : void
2838 : 208 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2839 : : {
2840 : : size_t size;
2841 : :
2842 : : /* don't need this if not instrumenting or no workers */
3012 tgl@sss.pgh.pa.us 2843 [ + + - + ]: 208 : if (!node->ps.instrument || pcxt->nworkers == 0)
2844 : 152 : return;
2845 : :
3073 andres@anarazel.de 2846 : 56 : size = offsetof(SharedHashInfo, hinstrument) +
2847 : 56 : pcxt->nworkers * sizeof(HashInstrumentation);
2848 : 56 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2849 : :
2850 : : /* Each per-worker area must start out as zeroes. */
2851 : 56 : memset(node->shared_info, 0, size);
2852 : :
2853 : 56 : node->shared_info->num_workers = pcxt->nworkers;
2854 : 56 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2855 : 56 : node->shared_info);
2856 : : }
2857 : :
2858 : : /*
2859 : : * Locate the DSM space for hash table instrumentation data that we'll write
2860 : : * to at shutdown time.
2861 : : */
2862 : : void
2863 : 524 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2864 : : {
2865 : : SharedHashInfo *shared_info;
2866 : :
2867 : : /* don't need this if not instrumenting */
3012 tgl@sss.pgh.pa.us 2868 [ + + ]: 524 : if (!node->ps.instrument)
2869 : 356 : return;
2870 : :
2871 : : /*
2872 : : * Find our entry in the shared area, and set up a pointer to it so that
2873 : : * we'll accumulate stats there when shutting down or rebuilding the hash
2874 : : * table.
2875 : : */
2876 : : shared_info = (SharedHashInfo *)
2877 : 168 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2878 : 168 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2879 : : }
2880 : :
2881 : : /*
2882 : : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2883 : : * ExecHashInitializeWorker was called, or local storage if not. In the
2884 : : * parallel case, this must be done in ExecShutdownHash() rather than
2885 : : * ExecEndHash() because the latter runs after we've detached from the DSM
2886 : : * segment.
2887 : : */
2888 : : void
3073 andres@anarazel.de 2889 : 24493 : ExecShutdownHash(HashState *node)
2890 : : {
2891 : : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2215 tgl@sss.pgh.pa.us 2892 [ + + + + ]: 24493 : if (node->ps.instrument && !node->hinstrument)
1331 peter@eisentraut.org 2893 : 79 : node->hinstrument = palloc0_object(HashInstrumentation);
2894 : : /* Now accumulate data for the current (final) hash table */
3073 andres@anarazel.de 2895 [ + + + + ]: 24493 : if (node->hinstrument && node->hashtable)
2215 tgl@sss.pgh.pa.us 2896 : 227 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
3073 andres@anarazel.de 2897 : 24493 : }
2898 : :
2899 : : /*
2900 : : * Retrieve instrumentation data from workers before the DSM segment is
2901 : : * detached, so that EXPLAIN can access it.
2902 : : */
2903 : : void
2904 : 56 : ExecHashRetrieveInstrumentation(HashState *node)
2905 : : {
2906 : 56 : SharedHashInfo *shared_info = node->shared_info;
2907 : : size_t size;
2908 : :
3012 tgl@sss.pgh.pa.us 2909 [ - + ]: 56 : if (shared_info == NULL)
3012 tgl@sss.pgh.pa.us 2910 :UBC 0 : return;
2911 : :
2912 : : /* Replace node->shared_info with a copy in backend-local memory. */
3073 andres@anarazel.de 2913 :CBC 56 : size = offsetof(SharedHashInfo, hinstrument) +
2914 : 56 : shared_info->num_workers * sizeof(HashInstrumentation);
2915 : 56 : node->shared_info = palloc(size);
2916 : 56 : memcpy(node->shared_info, shared_info, size);
2917 : : }
2918 : :
2919 : : /*
2920 : : * Accumulate instrumentation data from 'hashtable' into an
2921 : : * initially-zeroed HashInstrumentation struct.
2922 : : *
2923 : : * This is used to merge information across successive hash table instances
2924 : : * within a single plan node. We take the maximum values of each interesting
2925 : : * number. The largest nbuckets and largest nbatch values might have occurred
2926 : : * in different instances, so there's some risk of confusion from reporting
2927 : : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2928 : : * issue if we don't report the largest values. Similarly, we want to report
2929 : : * the largest spacePeak regardless of whether it happened in the same
2930 : : * instance as the largest nbuckets or nbatch. All the instances should have
2931 : : * the same nbuckets_original and nbatch_original; but there's little value
2932 : : * in depending on that here, so handle them the same way.
2933 : : */
2934 : : void
2215 tgl@sss.pgh.pa.us 2935 : 227 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2936 : : HashJoinTable hashtable)
2937 : : {
2938 : 227 : instrument->nbuckets = Max(instrument->nbuckets,
2939 : : hashtable->nbuckets);
2940 : 227 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2941 : : hashtable->nbuckets_original);
2942 : 227 : instrument->nbatch = Max(instrument->nbatch,
2943 : : hashtable->nbatch);
2944 : 227 : instrument->nbatch_original = Max(instrument->nbatch_original,
2945 : : hashtable->nbatch_original);
2946 : 227 : instrument->space_peak = Max(instrument->space_peak,
2947 : : hashtable->spacePeak);
3073 andres@anarazel.de 2948 : 227 : }
2949 : :
2950 : : /*
2951 : : * Allocate 'size' bytes from the currently active HashMemoryChunk
2952 : : */
2953 : : static void *
4255 heikki.linnakangas@i 2954 : 6921087 : dense_alloc(HashJoinTable hashtable, Size size)
2955 : : {
2956 : : HashMemoryChunk newChunk;
2957 : : char *ptr;
2958 : :
2959 : : /* just in case the size is not already aligned properly */
2960 : 6921087 : size = MAXALIGN(size);
2961 : :
2962 : : /*
2963 : : * If tuple size is larger than threshold, allocate a separate chunk.
2964 : : */
2965 [ - + ]: 6921087 : if (size > HASH_CHUNK_THRESHOLD)
2966 : : {
2967 : : /* allocate new chunk and put it at the beginning of the list */
4255 heikki.linnakangas@i 2968 :UBC 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2969 : : HASH_CHUNK_HEADER_SIZE + size);
2970 : 0 : newChunk->maxlen = size;
3045 tgl@sss.pgh.pa.us 2971 : 0 : newChunk->used = size;
2972 : 0 : newChunk->ntuples = 1;
2973 : :
2974 : : /*
2975 : : * Add this chunk to the list after the first existing chunk, so that
2976 : : * we don't lose the remaining space in the "current" chunk.
2977 : : */
4255 heikki.linnakangas@i 2978 [ # # ]: 0 : if (hashtable->chunks != NULL)
2979 : : {
2980 : 0 : newChunk->next = hashtable->chunks->next;
3058 andres@anarazel.de 2981 : 0 : hashtable->chunks->next.unshared = newChunk;
2982 : : }
2983 : : else
2984 : : {
2985 : 0 : newChunk->next.unshared = hashtable->chunks;
4255 heikki.linnakangas@i 2986 : 0 : hashtable->chunks = newChunk;
2987 : : }
2988 : :
3045 tgl@sss.pgh.pa.us 2989 : 0 : return HASH_CHUNK_DATA(newChunk);
2990 : : }
2991 : :
2992 : : /*
2993 : : * See if we have enough space for it in the current chunk (if any). If
2994 : : * not, allocate a fresh chunk.
2995 : : */
4255 heikki.linnakangas@i 2996 [ + + ]:CBC 6921087 : if ((hashtable->chunks == NULL) ||
2997 [ + + ]: 6904563 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2998 : : {
2999 : : /* allocate new chunk and put it at the beginning of the list */
3000 : 27081 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
3001 : : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
3002 : :
3003 : 27081 : newChunk->maxlen = HASH_CHUNK_SIZE;
3004 : 27081 : newChunk->used = size;
3005 : 27081 : newChunk->ntuples = 1;
3006 : :
3058 andres@anarazel.de 3007 : 27081 : newChunk->next.unshared = hashtable->chunks;
4255 heikki.linnakangas@i 3008 : 27081 : hashtable->chunks = newChunk;
3009 : :
3045 tgl@sss.pgh.pa.us 3010 : 27081 : return HASH_CHUNK_DATA(newChunk);
3011 : : }
3012 : :
3013 : : /* There is enough space in the current chunk, let's add the tuple */
3014 : 6894006 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
4255 heikki.linnakangas@i 3015 : 6894006 : hashtable->chunks->used += size;
3016 : 6894006 : hashtable->chunks->ntuples += 1;
3017 : :
3018 : : /* return pointer to the start of the tuple memory */
3019 : 6894006 : return ptr;
3020 : : }
3021 : :
3022 : : /*
3023 : : * Allocate space for a tuple in shared dense storage. This is equivalent to
3024 : : * dense_alloc but for Parallel Hash using shared memory.
3025 : : *
3026 : : * While loading a tuple into shared memory, we might run out of memory and
3027 : : * decide to repartition, or determine that the load factor is too high and
3028 : : * decide to expand the bucket array, or discover that another participant has
3029 : : * commanded us to help do that. Return NULL if number of buckets or batches
3030 : : * has changed, indicating that the caller must retry (considering the
3031 : : * possibility that the tuple no longer belongs in the same batch).
3032 : : */
3033 : : static HashJoinTuple
3058 andres@anarazel.de 3034 : 1586926 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
3035 : : dsa_pointer *shared)
3036 : : {
3037 : 1586926 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3038 : : dsa_pointer chunk_shared;
3039 : : HashMemoryChunk chunk;
3040 : : Size chunk_size;
3041 : : HashJoinTuple result;
3042 : 1586926 : int curbatch = hashtable->curbatch;
3043 : :
3044 : 1586926 : size = MAXALIGN(size);
3045 : :
3046 : : /*
3047 : : * Fast path: if there is enough space in this backend's current chunk,
3048 : : * then we can allocate without any locking.
3049 : : */
3050 : 1586926 : chunk = hashtable->current_chunk;
3051 [ + + + - ]: 1586926 : if (chunk != NULL &&
3044 tgl@sss.pgh.pa.us 3052 : 1586246 : size <= HASH_CHUNK_THRESHOLD &&
3058 andres@anarazel.de 3053 [ + + ]: 1586246 : chunk->maxlen - chunk->used >= size)
3054 : : {
3055 : :
3056 : 1584470 : chunk_shared = hashtable->current_chunk_shared;
3057 [ - + ]: 1584470 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
3058 : 1584470 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
3045 tgl@sss.pgh.pa.us 3059 : 1584470 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
3058 andres@anarazel.de 3060 : 1584470 : chunk->used += size;
3061 : :
3062 [ - + ]: 1584470 : Assert(chunk->used <= chunk->maxlen);
3063 [ - + ]: 1584470 : Assert(result == dsa_get_address(hashtable->area, *shared));
3064 : :
3065 : 1584470 : return result;
3066 : : }
3067 : :
3068 : : /* Slow path: try to allocate a new chunk. */
3069 : 2456 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3070 : :
3071 : : /*
3072 : : * Check if we need to help increase the number of buckets or batches.
3073 : : */
3074 [ + + ]: 2456 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3075 [ + + ]: 2422 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3076 : : {
3077 : 81 : ParallelHashGrowth growth = pstate->growth;
3078 : :
3079 : 81 : hashtable->current_chunk = NULL;
3080 : 81 : LWLockRelease(&pstate->lock);
3081 : :
3082 : : /* Another participant has commanded us to help grow. */
3083 [ + + ]: 81 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3084 : 34 : ExecParallelHashIncreaseNumBatches(hashtable);
3085 [ + - ]: 47 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3086 : 47 : ExecParallelHashIncreaseNumBuckets(hashtable);
3087 : :
3088 : : /* The caller must retry. */
3089 : 81 : return NULL;
3090 : : }
3091 : :
3092 : : /* Oversized tuples get their own chunk. */
3093 [ + + ]: 2375 : if (size > HASH_CHUNK_THRESHOLD)
3094 : 32 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3095 : : else
3096 : 2343 : chunk_size = HASH_CHUNK_SIZE;
3097 : :
3098 : : /* Check if it's time to grow batches or buckets. */
3099 [ + + ]: 2375 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3100 : : {
3101 [ - + ]: 1210 : Assert(curbatch == 0);
1139 tmunro@postgresql.or 3102 [ - + ]: 1210 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3103 : :
3104 : : /*
3105 : : * Check if our space limit would be exceeded. To avoid choking on
3106 : : * very large tuples or very low hash_mem setting, we'll always allow
3107 : : * each backend to allocate at least one chunk.
3108 : : */
3058 andres@anarazel.de 3109 [ + + ]: 1210 : if (hashtable->batches[0].at_least_one_chunk &&
3110 : 918 : hashtable->batches[0].shared->size +
3111 [ + + ]: 918 : chunk_size > pstate->space_allowed)
3112 : : {
3113 : 27 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3114 : 27 : hashtable->batches[0].shared->space_exhausted = true;
3115 : 27 : LWLockRelease(&pstate->lock);
3116 : :
3117 : 27 : return NULL;
3118 : : }
3119 : :
3120 : : /* Check if our load factor limit would be exceeded. */
3121 [ + + ]: 1183 : if (hashtable->nbatch == 1)
3122 : : {
3123 : 1042 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3124 : 1042 : hashtable->batches[0].ntuples = 0;
3125 : : /* Guard against integer overflow and alloc size overflow */
3126 : 1042 : if (hashtable->batches[0].shared->ntuples + 1 >
3127 [ + + ]: 1042 : hashtable->nbuckets * NTUP_PER_BUCKET &&
2886 tmunro@postgresql.or 3128 [ + - ]: 47 : hashtable->nbuckets < (INT_MAX / 2) &&
3129 [ + - ]: 47 : hashtable->nbuckets * 2 <=
3130 : : MaxAllocSize / sizeof(dsa_pointer_atomic))
3131 : : {
3058 andres@anarazel.de 3132 : 47 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3133 : 47 : LWLockRelease(&pstate->lock);
3134 : :
3135 : 47 : return NULL;
3136 : : }
3137 : : }
3138 : : }
3139 : :
3140 : : /* We are cleared to allocate a new chunk. */
3141 : 2301 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3142 : 2301 : hashtable->batches[curbatch].shared->size += chunk_size;
3143 : 2301 : hashtable->batches[curbatch].at_least_one_chunk = true;
3144 : :
3145 : : /* Set up the chunk. */
3146 : 2301 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3147 : 2301 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3148 : 2301 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3149 : 2301 : chunk->used = size;
3150 : :
3151 : : /*
3152 : : * Push it onto the list of chunks, so that it can be found if we need to
3153 : : * increase the number of buckets or batches (batch 0 only) and later for
3154 : : * freeing the memory (all batches).
3155 : : */
3156 : 2301 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3157 : 2301 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3158 : :
3159 [ + + ]: 2301 : if (size <= HASH_CHUNK_THRESHOLD)
3160 : : {
3161 : : /*
3162 : : * Make this the current chunk so that we can use the fast path to
3163 : : * fill the rest of it up in future calls.
3164 : : */
3165 : 2277 : hashtable->current_chunk = chunk;
3166 : 2277 : hashtable->current_chunk_shared = chunk_shared;
3167 : : }
3168 : 2301 : LWLockRelease(&pstate->lock);
3169 : :
3045 tgl@sss.pgh.pa.us 3170 [ - + ]: 2301 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3171 : 2301 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3172 : :
3058 andres@anarazel.de 3173 : 2301 : return result;
3174 : : }
3175 : :
3176 : : /*
3177 : : * One backend needs to set up the shared batch state including tuplestores.
3178 : : * Other backends will ensure they have correctly configured accessors by
3179 : : * called ExecParallelHashEnsureBatchAccessors().
3180 : : */
3181 : : static void
3182 : 148 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3183 : : {
3184 : 148 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3185 : : ParallelHashJoinBatch *batches;
3186 : : MemoryContext oldcxt;
3187 : : int i;
3188 : :
3189 [ - + ]: 148 : Assert(hashtable->batches == NULL);
3190 : :
3191 : : /* Allocate space. */
3192 : 148 : pstate->batches =
3193 : 148 : dsa_allocate0(hashtable->area,
3194 : : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3195 : 148 : pstate->nbatch = nbatch;
3196 : 148 : batches = dsa_get_address(hashtable->area, pstate->batches);
3197 : :
3198 : : /*
3199 : : * Use hash join spill memory context to allocate accessors, including
3200 : : * buffers for the temporary files.
3201 : : */
1082 tomas.vondra@postgre 3202 : 148 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3203 : :
3204 : : /* Allocate this backend's accessor array. */
3058 andres@anarazel.de 3205 : 148 : hashtable->nbatch = nbatch;
1331 peter@eisentraut.org 3206 : 148 : hashtable->batches =
3207 : 148 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3208 : :
3209 : : /* Set up the shared state, tuplestores and backend-local accessors. */
3058 andres@anarazel.de 3210 [ + + ]: 708 : for (i = 0; i < hashtable->nbatch; ++i)
3211 : : {
3212 : 560 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3213 : 560 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3214 : : char name[MAXPGPATH];
3215 : :
3216 : : /*
3217 : : * All members of shared were zero-initialized. We just need to set
3218 : : * up the Barrier.
3219 : : */
3220 : 560 : BarrierInit(&shared->batch_barrier, 0);
3221 [ + + ]: 560 : if (i == 0)
3222 : : {
3223 : : /* Batch 0 doesn't need to be loaded. */
3224 : 148 : BarrierAttach(&shared->batch_barrier);
1139 tmunro@postgresql.or 3225 [ + + ]: 592 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3058 andres@anarazel.de 3226 : 444 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3227 : 148 : BarrierDetach(&shared->batch_barrier);
3228 : : }
3229 : :
3230 : : /* Initialize accessor state. All members were zero-initialized. */
3231 : 560 : accessor->shared = shared;
3232 : :
3233 : : /* Initialize the shared tuplestores. */
3234 : 560 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3235 : 560 : accessor->inner_tuples =
3236 : 560 : sts_initialize(ParallelHashJoinBatchInner(shared),
3237 : : pstate->nparticipants,
3238 : : ParallelWorkerNumber + 1,
3239 : : sizeof(uint32),
3240 : : SHARED_TUPLESTORE_SINGLE_PASS,
3241 : : &pstate->fileset,
3242 : : name);
3243 : 560 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3244 : 560 : accessor->outer_tuples =
3245 : 560 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3246 : : pstate->nparticipants),
3247 : : pstate->nparticipants,
3248 : : ParallelWorkerNumber + 1,
3249 : : sizeof(uint32),
3250 : : SHARED_TUPLESTORE_SINGLE_PASS,
3251 : : &pstate->fileset,
3252 : : name);
3253 : : }
3254 : :
3255 : 148 : MemoryContextSwitchTo(oldcxt);
3256 : 148 : }
3257 : :
3258 : : /*
3259 : : * Free the current set of ParallelHashJoinBatchAccessor objects.
3260 : : */
3261 : : static void
3262 : 43 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3263 : : {
3264 : : int i;
3265 : :
3266 [ + + ]: 134 : for (i = 0; i < hashtable->nbatch; ++i)
3267 : : {
3268 : : /* Make sure no files are left open. */
3269 : 91 : sts_end_write(hashtable->batches[i].inner_tuples);
3270 : 91 : sts_end_write(hashtable->batches[i].outer_tuples);
3271 : 91 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3272 : 91 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3273 : : }
3274 : 43 : pfree(hashtable->batches);
3275 : 43 : hashtable->batches = NULL;
3276 : 43 : }
3277 : :
3278 : : /*
3279 : : * Make sure this backend has up-to-date accessors for the current set of
3280 : : * batches.
3281 : : */
3282 : : static void
3283 : 636 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3284 : : {
3285 : 636 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3286 : : ParallelHashJoinBatch *batches;
3287 : : MemoryContext oldcxt;
3288 : : int i;
3289 : :
3290 [ + + ]: 636 : if (hashtable->batches != NULL)
3291 : : {
3292 [ + - ]: 465 : if (hashtable->nbatch == pstate->nbatch)
3293 : 465 : return;
3058 andres@anarazel.de 3294 :UBC 0 : ExecParallelHashCloseBatchAccessors(hashtable);
3295 : : }
3296 : :
3297 : : /*
3298 : : * We should never see a state where the batch-tracking array is freed,
3299 : : * because we should have given up sooner if we join when the build
3300 : : * barrier has reached the PHJ_BUILD_FREE phase.
3301 : : */
1141 tmunro@postgresql.or 3302 [ - + ]:CBC 171 : Assert(DsaPointerIsValid(pstate->batches));
3303 : :
3304 : : /*
3305 : : * Use hash join spill memory context to allocate accessors, including
3306 : : * buffers for the temporary files.
3307 : : */
1082 tomas.vondra@postgre 3308 : 171 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3309 : :
3310 : : /* Allocate this backend's accessor array. */
3058 andres@anarazel.de 3311 : 171 : hashtable->nbatch = pstate->nbatch;
1331 peter@eisentraut.org 3312 : 171 : hashtable->batches =
3313 : 171 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3314 : :
3315 : : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3316 : : batches = (ParallelHashJoinBatch *)
3058 andres@anarazel.de 3317 : 171 : dsa_get_address(hashtable->area, pstate->batches);
3318 : :
3319 : : /* Set up the accessor array and attach to the tuplestores. */
3320 [ + + ]: 894 : for (i = 0; i < hashtable->nbatch; ++i)
3321 : : {
3322 : 723 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3323 : 723 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3324 : :
3325 : 723 : accessor->shared = shared;
3326 : 723 : accessor->preallocated = 0;
3327 : 723 : accessor->done = false;
1131 tmunro@postgresql.or 3328 : 723 : accessor->outer_eof = false;
3058 andres@anarazel.de 3329 : 723 : accessor->inner_tuples =
3330 : 723 : sts_attach(ParallelHashJoinBatchInner(shared),
3331 : : ParallelWorkerNumber + 1,
3332 : : &pstate->fileset);
3333 : 723 : accessor->outer_tuples =
3334 : 723 : sts_attach(ParallelHashJoinBatchOuter(shared,
3335 : : pstate->nparticipants),
3336 : : ParallelWorkerNumber + 1,
3337 : : &pstate->fileset);
3338 : : }
3339 : :
3340 : 171 : MemoryContextSwitchTo(oldcxt);
3341 : : }
3342 : :
3343 : : /*
3344 : : * Allocate an empty shared memory hash table for a given batch.
3345 : : */
3346 : : void
3347 : 504 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3348 : : {
3349 : 504 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3350 : : dsa_pointer_atomic *buckets;
3351 : 504 : int nbuckets = hashtable->parallel_state->nbuckets;
3352 : : int i;
3353 : :
3354 : 504 : batch->buckets =
3355 : 504 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3356 : : buckets = (dsa_pointer_atomic *)
3357 : 504 : dsa_get_address(hashtable->area, batch->buckets);
3358 [ + + ]: 2656760 : for (i = 0; i < nbuckets; ++i)
3359 : 2656256 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3360 : 504 : }
3361 : :
3362 : : /*
3363 : : * If we are currently attached to a shared hash join batch, detach. If we
3364 : : * are last to detach, clean up.
3365 : : */
3366 : : void
3367 : 18527 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3368 : : {
3369 [ + + ]: 18527 : if (hashtable->parallel_state != NULL &&
3370 [ + + ]: 888 : hashtable->curbatch >= 0)
3371 : : {
3372 : 612 : int curbatch = hashtable->curbatch;
3373 : 612 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
1131 tmunro@postgresql.or 3374 : 612 : bool attached = true;
3375 : :
3376 : : /* Make sure any temporary files are closed. */
3058 andres@anarazel.de 3377 : 612 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3378 : 612 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3379 : :
3380 : : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
1131 tmunro@postgresql.or 3381 [ + + - + ]: 612 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3382 : : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3383 : :
3384 : : /*
3385 : : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3386 : : * reached the end of it, it means the plan doesn't want any more
3387 : : * tuples, and it is happy to abandon any tuples buffered in this
3388 : : * process's subplans. For correctness, we can't allow any process to
3389 : : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3390 : : * complete set of match bits. Therefore we skip emitting unmatched
3391 : : * tuples in all backends (if this is a full/right join), as if those
3392 : : * tuples were all due to be emitted by this process and it has
3393 : : * abandoned them too.
3394 : : */
3395 [ + + ]: 612 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3396 [ - + ]: 567 : !hashtable->batches[curbatch].outer_eof)
3397 : : {
3398 : : /*
3399 : : * This flag may be written to by multiple backends during
3400 : : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3401 : : * phase so requires no extra locking.
3402 : : */
1131 tmunro@postgresql.or 3403 :UBC 0 : batch->skip_unmatched = true;
3404 : : }
3405 : :
3406 : : /*
3407 : : * Even if we aren't doing a full/right outer join, we'll step through
3408 : : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3409 : : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3410 : : */
1131 tmunro@postgresql.or 3411 [ + + ]:CBC 612 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3412 : 567 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3413 [ + + + + ]: 612 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3414 : : {
3415 : : /*
3416 : : * We are not longer attached to the batch barrier, but we're the
3417 : : * process that was chosen to free resources and it's safe to
3418 : : * assert the current phase. The ParallelHashJoinBatch can't go
3419 : : * away underneath us while we are attached to the build barrier,
3420 : : * making this access safe.
3421 : : */
1139 3422 [ - + ]: 504 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3423 : :
3424 : : /* Free shared chunks and buckets. */
3058 andres@anarazel.de 3425 [ + + ]: 2613 : while (DsaPointerIsValid(batch->chunks))
3426 : : {
3427 : : HashMemoryChunk chunk =
1082 tgl@sss.pgh.pa.us 3428 : 2109 : dsa_get_address(hashtable->area, batch->chunks);
3058 andres@anarazel.de 3429 : 2109 : dsa_pointer next = chunk->next.shared;
3430 : :
3431 : 2109 : dsa_free(hashtable->area, batch->chunks);
3432 : 2109 : batch->chunks = next;
3433 : : }
3434 [ + - ]: 504 : if (DsaPointerIsValid(batch->buckets))
3435 : : {
3436 : 504 : dsa_free(hashtable->area, batch->buckets);
3437 : 504 : batch->buckets = InvalidDsaPointer;
3438 : : }
3439 : : }
3440 : :
3441 : : /*
3442 : : * Track the largest batch we've been attached to. Though each
3443 : : * backend might see a different subset of batches, explain.c will
3444 : : * scan the results from all backends to find the largest value.
3445 : : */
3046 3446 : 612 : hashtable->spacePeak =
3447 : 612 : Max(hashtable->spacePeak,
3448 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3449 : :
3450 : : /* Remember that we are not attached to a batch. */
3058 3451 : 612 : hashtable->curbatch = -1;
3452 : : }
3453 : 18527 : }
3454 : :
3455 : : /*
3456 : : * Detach from all shared resources. If we are last to detach, clean up.
3457 : : */
3458 : : void
3459 : 17915 : ExecHashTableDetach(HashJoinTable hashtable)
3460 : : {
1141 tmunro@postgresql.or 3461 : 17915 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3462 : :
3463 : : /*
3464 : : * If we're involved in a parallel query, we must either have gotten all
3465 : : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3466 : : */
3467 [ + + - + ]: 17915 : Assert(!pstate ||
3468 : : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3469 : :
1139 3470 [ + + + - ]: 17915 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3471 : : {
3472 : : int i;
3473 : :
3474 : : /* Make sure any temporary files are closed. */
3058 andres@anarazel.de 3475 [ + - ]: 276 : if (hashtable->batches)
3476 : : {
3477 [ + + ]: 1468 : for (i = 0; i < hashtable->nbatch; ++i)
3478 : : {
3479 : 1192 : sts_end_write(hashtable->batches[i].inner_tuples);
3480 : 1192 : sts_end_write(hashtable->batches[i].outer_tuples);
3481 : 1192 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3482 : 1192 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3483 : : }
3484 : : }
3485 : :
3486 : : /* If we're last to detach, clean up shared memory. */
1141 tmunro@postgresql.or 3487 [ + + ]: 276 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3488 : : {
3489 : : /*
3490 : : * Late joining processes will see this state and give up
3491 : : * immediately.
3492 : : */
1139 3493 [ - + ]: 116 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3494 : :
3058 andres@anarazel.de 3495 [ + - ]: 116 : if (DsaPointerIsValid(pstate->batches))
3496 : : {
3497 : 116 : dsa_free(hashtable->area, pstate->batches);
3498 : 116 : pstate->batches = InvalidDsaPointer;
3499 : : }
3500 : : }
3501 : : }
1141 tmunro@postgresql.or 3502 : 17915 : hashtable->parallel_state = NULL;
3058 andres@anarazel.de 3503 : 17915 : }
3504 : :
3505 : : /*
3506 : : * Get the first tuple in a given bucket identified by number.
3507 : : */
3508 : : static inline HashJoinTuple
3509 : 1853628 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3510 : : {
3511 : : HashJoinTuple tuple;
3512 : : dsa_pointer p;
3513 : :
3514 [ - + ]: 1853628 : Assert(hashtable->parallel_state);
3515 : 1853628 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3516 : 1853628 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3517 : :
3518 : 1853628 : return tuple;
3519 : : }
3520 : :
3521 : : /*
3522 : : * Get the next tuple in the same bucket as 'tuple'.
3523 : : */
3524 : : static inline HashJoinTuple
3525 : 2481804 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3526 : : {
3527 : : HashJoinTuple next;
3528 : :
3529 [ - + ]: 2481804 : Assert(hashtable->parallel_state);
3530 : 2481804 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3531 : :
3532 : 2481804 : return next;
3533 : : }
3534 : :
3535 : : /*
3536 : : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3537 : : */
3538 : : static inline void
3539 : 1725831 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3540 : : HashJoinTuple tuple,
3541 : : dsa_pointer tuple_shared)
3542 : : {
3543 : : for (;;)
3544 : : {
3545 : 1725919 : tuple->next.shared = dsa_pointer_atomic_read(head);
3546 [ + + ]: 1725919 : if (dsa_pointer_atomic_compare_exchange(head,
3547 : 1725919 : &tuple->next.shared,
3548 : : tuple_shared))
3549 : 1725831 : break;
3550 : : }
3551 : 1725831 : }
3552 : :
3553 : : /*
3554 : : * Prepare to work on a given batch.
3555 : : */
3556 : : void
3557 : 1416 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3558 : : {
3559 [ - + ]: 1416 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3560 : :
3561 : 1416 : hashtable->curbatch = batchno;
3562 : 1416 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3563 : 1416 : dsa_get_address(hashtable->area,
3564 : 1416 : hashtable->batches[batchno].shared->buckets);
3565 : 1416 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
237 michael@paquier.xyz 3566 :GNC 1416 : hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
3058 andres@anarazel.de 3567 :CBC 1416 : hashtable->current_chunk = NULL;
3568 : 1416 : hashtable->current_chunk_shared = InvalidDsaPointer;
3569 : 1416 : hashtable->batches[batchno].at_least_one_chunk = false;
3570 : 1416 : }
3571 : :
3572 : : /*
3573 : : * Take the next available chunk from the queue of chunks being worked on in
3574 : : * parallel. Return NULL if there are none left. Otherwise return a pointer
3575 : : * to the chunk, and set *shared to the DSA pointer to the chunk.
3576 : : */
3577 : : static HashMemoryChunk
3578 : 454 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3579 : : {
3580 : 454 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3581 : : HashMemoryChunk chunk;
3582 : :
3583 : 454 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3584 [ + + ]: 454 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3585 : : {
3586 : 362 : *shared = pstate->chunk_work_queue;
3587 : : chunk = (HashMemoryChunk)
3588 : 362 : dsa_get_address(hashtable->area, *shared);
3589 : 362 : pstate->chunk_work_queue = chunk->next.shared;
3590 : : }
3591 : : else
3592 : 92 : chunk = NULL;
3593 : 454 : LWLockRelease(&pstate->lock);
3594 : :
3595 : 454 : return chunk;
3596 : : }
3597 : :
3598 : : /*
3599 : : * Increase the space preallocated in this backend for a given inner batch by
3600 : : * at least a given amount. This allows us to track whether a given batch
3601 : : * would fit in memory when loaded back in. Also increase the number of
3602 : : * batches or buckets if required.
3603 : : *
3604 : : * This maintains a running estimation of how much space will be taken when we
3605 : : * load the batch back into memory by simulating the way chunks will be handed
3606 : : * out to workers. It's not perfectly accurate because the tuples will be
3607 : : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3608 : : * it should be pretty close. It tends to overestimate by a fraction of a
3609 : : * chunk per worker since all workers gang up to preallocate during hashing,
3610 : : * but workers tend to reload batches alone if there are enough to go around,
3611 : : * leaving fewer partially filled chunks. This effect is bounded by
3612 : : * nparticipants.
3613 : : *
3614 : : * Return false if the number of batches or buckets has changed, and the
3615 : : * caller should reconsider which batch a given tuple now belongs in and call
3616 : : * again.
3617 : : */
3618 : : static bool
3619 : 1109 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3620 : : {
3621 : 1109 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3622 : 1109 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3623 : 1109 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3624 : :
3625 [ - + ]: 1109 : Assert(batchno > 0);
3626 [ - + ]: 1109 : Assert(batchno < hashtable->nbatch);
3044 tgl@sss.pgh.pa.us 3627 [ - + ]: 1109 : Assert(size == MAXALIGN(size));
3628 : :
3058 andres@anarazel.de 3629 : 1109 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3630 : :
3631 : : /* Has another participant commanded us to help grow? */
3632 [ + + ]: 1109 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3633 [ - + ]: 1100 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3634 : : {
3635 : 9 : ParallelHashGrowth growth = pstate->growth;
3636 : :
3637 : 9 : LWLockRelease(&pstate->lock);
3638 [ + - ]: 9 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3639 : 9 : ExecParallelHashIncreaseNumBatches(hashtable);
3058 andres@anarazel.de 3640 [ # # ]:UBC 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3641 : 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3642 : :
3058 andres@anarazel.de 3643 :CBC 9 : return false;
3644 : : }
3645 : :
3646 [ + + ]: 1100 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3647 [ + + ]: 946 : batch->at_least_one_chunk &&
3044 tgl@sss.pgh.pa.us 3648 : 212 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3649 [ + + ]: 212 : > pstate->space_allowed))
3650 : : {
3651 : : /*
3652 : : * We have determined that this batch would exceed the space budget if
3653 : : * loaded into memory. Command all participants to help repartition.
3654 : : */
3058 andres@anarazel.de 3655 : 5 : batch->shared->space_exhausted = true;
3656 : 5 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3657 : 5 : LWLockRelease(&pstate->lock);
3658 : :
3659 : 5 : return false;
3660 : : }
3661 : :
3662 : 1095 : batch->at_least_one_chunk = true;
3663 : 1095 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3664 : 1095 : batch->preallocated = want;
3665 : 1095 : LWLockRelease(&pstate->lock);
3666 : :
3667 : 1095 : return true;
3668 : : }
3669 : :
3670 : : /*
3671 : : * Calculate the limit on how much memory can be used by Hash and similar
3672 : : * plan types. This is work_mem times hash_mem_multiplier, and is
3673 : : * expressed in bytes.
3674 : : *
3675 : : * Exported for use by the planner, as well as other hash-like executor
3676 : : * nodes. This is a rather random place for this, but there is no better
3677 : : * place.
3678 : : */
3679 : : size_t
1745 tgl@sss.pgh.pa.us 3680 : 1300451 : get_hash_memory_limit(void)
3681 : : {
3682 : : double mem_limit;
3683 : :
3684 : : /* Do initial calculation in double arithmetic */
3685 : 1300451 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3686 : :
3687 : : /* Clamp in case it doesn't fit in size_t */
3688 [ + - ]: 1300451 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3689 : :
3690 : 1300451 : return (size_t) mem_limit;
3691 : : }
|