Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeGatherMerge.c
4 : : * Scan a plan in multiple workers, and do order-preserving merge.
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : * IDENTIFICATION
10 : : * src/backend/executor/nodeGatherMerge.c
11 : : *
12 : : *-------------------------------------------------------------------------
13 : : */
14 : :
15 : : #include "postgres.h"
16 : :
17 : : #include "executor/executor.h"
18 : : #include "executor/execParallel.h"
19 : : #include "executor/nodeGatherMerge.h"
20 : : #include "executor/tqueue.h"
21 : : #include "lib/binaryheap.h"
22 : : #include "miscadmin.h"
23 : : #include "optimizer/optimizer.h"
24 : :
25 : : /*
26 : : * When we read tuples from workers, it's a good idea to read several at once
27 : : * for efficiency when possible: this minimizes context-switching overhead.
28 : : * But reading too many at a time wastes memory without improving performance.
29 : : * We'll read up to MAX_TUPLE_STORE tuples (in addition to the first one).
30 : : */
31 : : #define MAX_TUPLE_STORE 10
32 : :
33 : : /*
34 : : * Pending-tuple array for each worker. This holds additional tuples that
35 : : * we were able to fetch from the worker, but can't process yet. In addition,
36 : : * this struct holds the "done" flag indicating the worker is known to have
37 : : * no more tuples. (We do not use this struct for the leader; we don't keep
38 : : * any pending tuples for the leader, and the need_to_scan_locally flag serves
39 : : * as its "done" indicator.)
40 : : */
41 : : typedef struct GMReaderTupleBuffer
42 : : {
43 : : MinimalTuple *tuple; /* array of length MAX_TUPLE_STORE */
44 : : int nTuples; /* number of tuples currently stored */
45 : : int readCounter; /* index of next tuple to extract */
46 : : bool done; /* true if reader is known exhausted */
47 : : } GMReaderTupleBuffer;
48 : :
49 : : static TupleTableSlot *ExecGatherMerge(PlanState *pstate);
50 : : static int32 heap_compare_slots(Datum a, Datum b, void *arg);
51 : : static TupleTableSlot *gather_merge_getnext(GatherMergeState *gm_state);
52 : : static MinimalTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader,
53 : : bool nowait, bool *done);
54 : : static void ExecShutdownGatherMergeWorkers(GatherMergeState *node);
55 : : static void gather_merge_setup(GatherMergeState *gm_state);
56 : : static void gather_merge_init(GatherMergeState *gm_state);
57 : : static void gather_merge_clear_tuples(GatherMergeState *gm_state);
58 : : static bool gather_merge_readnext(GatherMergeState *gm_state, int reader,
59 : : bool nowait);
60 : : static void load_tuple_array(GatherMergeState *gm_state, int reader);
61 : :
62 : : /* ----------------------------------------------------------------
63 : : * ExecInitGather
64 : : * ----------------------------------------------------------------
65 : : */
66 : : GatherMergeState *
3103 rhaas@postgresql.org 67 :CBC 171 : ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags)
68 : : {
69 : : GatherMergeState *gm_state;
70 : : Plan *outerNode;
71 : : TupleDesc tupDesc;
72 : :
73 : : /* Gather merge node doesn't have innerPlan node. */
74 [ - + ]: 171 : Assert(innerPlan(node) == NULL);
75 : :
76 : : /*
77 : : * create state structure
78 : : */
79 : 171 : gm_state = makeNode(GatherMergeState);
80 : 171 : gm_state->ps.plan = (Plan *) node;
81 : 171 : gm_state->ps.state = estate;
2973 andres@anarazel.de 82 : 171 : gm_state->ps.ExecProcNode = ExecGatherMerge;
83 : :
2929 tgl@sss.pgh.pa.us 84 : 171 : gm_state->initialized = false;
85 : 171 : gm_state->gm_initialized = false;
2930 rhaas@postgresql.org 86 : 171 : gm_state->tuples_needed = -1;
87 : :
88 : : /*
89 : : * Miscellaneous initialization
90 : : *
91 : : * create expression context for node
92 : : */
3103 93 : 171 : ExecAssignExprContext(estate, &gm_state->ps);
94 : :
95 : : /*
96 : : * GatherMerge doesn't support checking a qual (it's always more efficient
97 : : * to do it in the child node).
98 : : */
2929 tgl@sss.pgh.pa.us 99 [ - + ]: 171 : Assert(!node->plan.qual);
100 : :
101 : : /*
102 : : * now initialize outer plan
103 : : */
3103 rhaas@postgresql.org 104 : 171 : outerNode = outerPlan(node);
105 : 171 : outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags);
106 : :
107 : : /*
108 : : * Leader may access ExecProcNode result directly (if
109 : : * need_to_scan_locally), or from workers via tuple queue. So we can't
110 : : * trivially rely on the slot type being fixed for expressions evaluated
111 : : * within this node.
112 : : */
2487 andres@anarazel.de 113 : 171 : gm_state->ps.outeropsset = true;
114 : 171 : gm_state->ps.outeropsfixed = false;
115 : :
116 : : /*
117 : : * Store the tuple descriptor into gather merge state, so we can use it
118 : : * while initializing the gather merge slots.
119 : : */
2759 120 : 171 : tupDesc = ExecGetResultType(outerPlanState(gm_state));
2842 rhaas@postgresql.org 121 : 171 : gm_state->tupDesc = tupDesc;
122 : :
123 : : /*
124 : : * Initialize result type and projection.
125 : : */
2493 andres@anarazel.de 126 : 171 : ExecInitResultTypeTL(&gm_state->ps);
2842 rhaas@postgresql.org 127 : 171 : ExecConditionalAssignProjectionInfo(&gm_state->ps, tupDesc, OUTER_VAR);
128 : :
129 : : /*
130 : : * Without projections result slot type is not trivially known, see
131 : : * comment above.
132 : : */
2487 andres@anarazel.de 133 [ + + ]: 171 : if (gm_state->ps.ps_ProjInfo == NULL)
134 : : {
135 : 165 : gm_state->ps.resultopsset = true;
136 : 165 : gm_state->ps.resultopsfixed = false;
137 : : }
138 : :
139 : : /*
140 : : * initialize sort-key information
141 : : */
3103 rhaas@postgresql.org 142 [ + - ]: 171 : if (node->numCols)
143 : : {
144 : : int i;
145 : :
146 : 171 : gm_state->gm_nkeys = node->numCols;
147 : 171 : gm_state->gm_sortkeys =
148 : 171 : palloc0(sizeof(SortSupportData) * node->numCols);
149 : :
150 [ + + ]: 402 : for (i = 0; i < node->numCols; i++)
151 : : {
152 : 231 : SortSupport sortKey = gm_state->gm_sortkeys + i;
153 : :
154 : 231 : sortKey->ssup_cxt = CurrentMemoryContext;
155 : 231 : sortKey->ssup_collation = node->collations[i];
156 : 231 : sortKey->ssup_nulls_first = node->nullsFirst[i];
157 : 231 : sortKey->ssup_attno = node->sortColIdx[i];
158 : :
159 : : /*
160 : : * We don't perform abbreviated key conversion here, for the same
161 : : * reasons that it isn't used in MergeAppend
162 : : */
163 : 231 : sortKey->abbreviate = false;
164 : :
165 : 231 : PrepareSortSupportFromOrderingOp(node->sortOperators[i], sortKey);
166 : : }
167 : : }
168 : :
169 : : /* Now allocate the workspace for gather merge */
2928 tgl@sss.pgh.pa.us 170 : 171 : gather_merge_setup(gm_state);
171 : :
3103 rhaas@postgresql.org 172 : 171 : return gm_state;
173 : : }
174 : :
175 : : /* ----------------------------------------------------------------
176 : : * ExecGatherMerge(node)
177 : : *
178 : : * Scans the relation via multiple workers and returns
179 : : * the next qualifying tuple.
180 : : * ----------------------------------------------------------------
181 : : */
182 : : static TupleTableSlot *
2973 andres@anarazel.de 183 : 128067 : ExecGatherMerge(PlanState *pstate)
184 : : {
185 : 128067 : GatherMergeState *node = castNode(GatherMergeState, pstate);
186 : : TupleTableSlot *slot;
187 : : ExprContext *econtext;
188 : :
2965 189 [ + + ]: 128067 : CHECK_FOR_INTERRUPTS();
190 : :
191 : : /*
192 : : * As with Gather, we don't launch workers until this node is actually
193 : : * executed.
194 : : */
3103 rhaas@postgresql.org 195 [ + + ]: 128067 : if (!node->initialized)
196 : : {
197 : 81 : EState *estate = node->ps.state;
2929 tgl@sss.pgh.pa.us 198 : 81 : GatherMerge *gm = castNode(GatherMerge, node->ps.plan);
199 : :
200 : : /*
201 : : * Sometimes we might have to run without parallelism; but if parallel
202 : : * mode is active then we can try to fire up some workers.
203 : : */
2871 rhaas@postgresql.org 204 [ + - + - ]: 81 : if (gm->num_workers > 0 && estate->es_use_parallel_mode)
205 : : {
206 : : ParallelContext *pcxt;
207 : :
208 : : /* Initialize, or re-initialize, shared state needed by workers. */
3103 209 [ + + ]: 81 : if (!node->pei)
1157 tgl@sss.pgh.pa.us 210 : 66 : node->pei = ExecInitParallelPlan(outerPlanState(node),
211 : : estate,
212 : : gm->initParam,
213 : : gm->num_workers,
214 : : node->tuples_needed);
215 : : else
216 : 15 : ExecParallelReinitialize(outerPlanState(node),
2851 rhaas@postgresql.org 217 : 15 : node->pei,
218 : : gm->initParam);
219 : :
220 : : /* Try to launch workers. */
3103 221 : 81 : pcxt = node->pei->pcxt;
222 : 81 : LaunchParallelWorkers(pcxt);
223 : : /* We save # workers launched for the benefit of EXPLAIN */
224 : 81 : node->nworkers_launched = pcxt->nworkers_launched;
225 : :
226 : : /*
227 : : * Count number of workers originally wanted and actually
228 : : * launched.
229 : : */
332 michael@paquier.xyz 230 : 81 : estate->es_parallel_workers_to_launch += pcxt->nworkers_to_launch;
231 : 81 : estate->es_parallel_workers_launched += pcxt->nworkers_launched;
232 : :
233 : : /* Set up tuple queue readers to read the results. */
3103 rhaas@postgresql.org 234 [ + + ]: 81 : if (pcxt->nworkers_launched > 0)
235 : : {
2914 andres@anarazel.de 236 : 75 : ExecParallelCreateReaders(node->pei);
237 : : /* Make a working array showing the active readers */
2927 tgl@sss.pgh.pa.us 238 : 75 : node->nreaders = pcxt->nworkers_launched;
239 : 75 : node->reader = (TupleQueueReader **)
240 : 75 : palloc(node->nreaders * sizeof(TupleQueueReader *));
241 : 75 : memcpy(node->reader, node->pei->reader,
242 : 75 : node->nreaders * sizeof(TupleQueueReader *));
243 : : }
244 : : else
245 : : {
246 : : /* No workers? Then never mind. */
247 : 6 : node->nreaders = 0;
248 : 6 : node->reader = NULL;
249 : : }
250 : : }
251 : :
252 : : /* allow leader to participate if enabled or no choice */
2852 rhaas@postgresql.org 253 [ + + + + ]: 81 : if (parallel_leader_participation || node->nreaders == 0)
254 : 78 : node->need_to_scan_locally = true;
3103 255 : 81 : node->initialized = true;
256 : : }
257 : :
258 : : /*
259 : : * Reset per-tuple memory context to free any expression evaluation
260 : : * storage allocated in the previous tuple cycle.
261 : : */
262 : 128067 : econtext = node->ps.ps_ExprContext;
263 : 128067 : ResetExprContext(econtext);
264 : :
265 : : /*
266 : : * Get next tuple, either from one of our workers, or by running the plan
267 : : * ourselves.
268 : : */
269 : 128067 : slot = gather_merge_getnext(node);
270 [ + + - + ]: 128067 : if (TupIsNull(slot))
271 : 63 : return NULL;
272 : :
273 : : /* If no projection is required, we're done. */
2842 274 [ + - ]: 128004 : if (node->ps.ps_ProjInfo == NULL)
275 : 128004 : return slot;
276 : :
277 : : /*
278 : : * Form the result tuple using ExecProject(), and return it.
279 : : */
3103 rhaas@postgresql.org 280 :UBC 0 : econtext->ecxt_outertuple = slot;
281 : 0 : return ExecProject(node->ps.ps_ProjInfo);
282 : : }
283 : :
284 : : /* ----------------------------------------------------------------
285 : : * ExecEndGatherMerge
286 : : *
287 : : * frees any storage allocated through C routines.
288 : : * ----------------------------------------------------------------
289 : : */
290 : : void
3103 rhaas@postgresql.org 291 :CBC 171 : ExecEndGatherMerge(GatherMergeState *node)
292 : : {
3034 bruce@momjian.us 293 : 171 : ExecEndNode(outerPlanState(node)); /* let children clean up first */
3103 rhaas@postgresql.org 294 : 171 : ExecShutdownGatherMerge(node);
295 : 171 : }
296 : :
297 : : /* ----------------------------------------------------------------
298 : : * ExecShutdownGatherMerge
299 : : *
300 : : * Destroy the setup for parallel workers including parallel context.
301 : : * ----------------------------------------------------------------
302 : : */
303 : : void
304 : 237 : ExecShutdownGatherMerge(GatherMergeState *node)
305 : : {
306 : 237 : ExecShutdownGatherMergeWorkers(node);
307 : :
308 : : /* Now destroy the parallel context. */
309 [ + + ]: 237 : if (node->pei != NULL)
310 : : {
311 : 66 : ExecParallelCleanup(node->pei);
312 : 66 : node->pei = NULL;
313 : : }
314 : 237 : }
315 : :
316 : : /* ----------------------------------------------------------------
317 : : * ExecShutdownGatherMergeWorkers
318 : : *
319 : : * Stop all the parallel workers.
320 : : * ----------------------------------------------------------------
321 : : */
322 : : static void
323 : 261 : ExecShutdownGatherMergeWorkers(GatherMergeState *node)
324 : : {
325 [ + + ]: 261 : if (node->pei != NULL)
326 : 81 : ExecParallelFinish(node->pei);
327 : :
328 : : /* Flush local copy of reader array */
2927 tgl@sss.pgh.pa.us 329 [ + + ]: 261 : if (node->reader)
330 : 75 : pfree(node->reader);
331 : 261 : node->reader = NULL;
3103 rhaas@postgresql.org 332 : 261 : }
333 : :
334 : : /* ----------------------------------------------------------------
335 : : * ExecReScanGatherMerge
336 : : *
337 : : * Prepare to re-scan the result of a GatherMerge.
338 : : * ----------------------------------------------------------------
339 : : */
340 : : void
341 : 24 : ExecReScanGatherMerge(GatherMergeState *node)
342 : : {
2929 tgl@sss.pgh.pa.us 343 : 24 : GatherMerge *gm = (GatherMerge *) node->ps.plan;
344 : 24 : PlanState *outerPlan = outerPlanState(node);
345 : :
346 : : /* Make sure any existing workers are gracefully shut down */
3103 rhaas@postgresql.org 347 : 24 : ExecShutdownGatherMergeWorkers(node);
348 : :
349 : : /* Free any unused tuples, so we don't leak memory across rescans */
2928 tgl@sss.pgh.pa.us 350 : 24 : gather_merge_clear_tuples(node);
351 : :
352 : : /* Mark node so that shared state will be rebuilt at next call */
3103 rhaas@postgresql.org 353 : 24 : node->initialized = false;
2942 tgl@sss.pgh.pa.us 354 : 24 : node->gm_initialized = false;
355 : :
356 : : /*
357 : : * Set child node's chgParam to tell it that the next scan might deliver a
358 : : * different set of rows within the leader process. (The overall rowset
359 : : * shouldn't change, but the leader process's subset might; hence nodes
360 : : * between here and the parallel table scan node mustn't optimize on the
361 : : * assumption of an unchanging rowset.)
362 : : */
2929 363 [ + - ]: 24 : if (gm->rescan_param >= 0)
364 : 24 : outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
365 : : gm->rescan_param);
366 : :
367 : : /*
368 : : * If chgParam of subnode is not null then plan will be re-scanned by
369 : : * first ExecProcNode. Note: because this does nothing if we have a
370 : : * rescan_param, it's currently guaranteed that parallel-aware child nodes
371 : : * will not see a ReScan call until after they get a ReInitializeDSM call.
372 : : * That ordering might not be something to rely on, though. A good rule
373 : : * of thumb is that ReInitializeDSM should reset only shared state, ReScan
374 : : * should reset only local state, and anything that depends on both of
375 : : * those steps being finished must wait until the first ExecProcNode call.
376 : : */
377 [ - + ]: 24 : if (outerPlan->chgParam == NULL)
2929 tgl@sss.pgh.pa.us 378 :UBC 0 : ExecReScan(outerPlan);
3103 rhaas@postgresql.org 379 :CBC 24 : }
380 : :
381 : : /*
382 : : * Set up the data structures that we'll need for Gather Merge.
383 : : *
384 : : * We allocate these once on the basis of gm->num_workers, which is an
385 : : * upper bound for the number of workers we'll actually have. During
386 : : * a rescan, we reset the structures to empty. This approach simplifies
387 : : * not leaking memory across rescans.
388 : : *
389 : : * In the gm_slots[] array, index 0 is for the leader, and indexes 1 to n
390 : : * are for workers. The values placed into gm_heap correspond to indexes
391 : : * in gm_slots[]. The gm_tuple_buffers[] array, however, is indexed from
392 : : * 0 to n-1; it has no entry for the leader.
393 : : */
394 : : static void
2928 tgl@sss.pgh.pa.us 395 : 171 : gather_merge_setup(GatherMergeState *gm_state)
396 : : {
397 : 171 : GatherMerge *gm = castNode(GatherMerge, gm_state->ps.plan);
398 : 171 : int nreaders = gm->num_workers;
399 : : int i;
400 : :
401 : : /*
402 : : * Allocate gm_slots for the number of workers + one more slot for leader.
403 : : * Slot 0 is always for the leader. Leader always calls ExecProcNode() to
404 : : * read the tuple, and then stores it directly into its gm_slots entry.
405 : : * For other slots, code below will call ExecInitExtraTupleSlot() to
406 : : * create a slot for the worker's results. Note that during any single
407 : : * scan, we might have fewer than num_workers available workers, in which
408 : : * case the extra array entries go unused.
409 : : */
410 : 171 : gm_state->gm_slots = (TupleTableSlot **)
411 : 171 : palloc0((nreaders + 1) * sizeof(TupleTableSlot *));
412 : :
413 : : /* Allocate the tuple slot and tuple array for each worker */
414 : 171 : gm_state->gm_tuple_buffers = (GMReaderTupleBuffer *)
415 : 171 : palloc0(nreaders * sizeof(GMReaderTupleBuffer));
416 : :
417 [ + + ]: 627 : for (i = 0; i < nreaders; i++)
418 : : {
419 : : /* Allocate the tuple array with length MAX_TUPLE_STORE */
3103 rhaas@postgresql.org 420 : 912 : gm_state->gm_tuple_buffers[i].tuple =
1877 tmunro@postgresql.or 421 : 456 : (MinimalTuple *) palloc0(sizeof(MinimalTuple) * MAX_TUPLE_STORE);
422 : :
423 : : /* Initialize tuple slot for worker */
2759 andres@anarazel.de 424 : 456 : gm_state->gm_slots[i + 1] =
2487 425 : 456 : ExecInitExtraTupleSlot(gm_state->ps.state, gm_state->tupDesc,
426 : : &TTSOpsMinimalTuple);
427 : : }
428 : :
429 : : /* Allocate the resources for the merge */
2928 tgl@sss.pgh.pa.us 430 : 171 : gm_state->gm_heap = binaryheap_allocate(nreaders + 1,
431 : : heap_compare_slots,
432 : : gm_state);
433 : 171 : }
434 : :
435 : : /*
436 : : * Initialize the Gather Merge.
437 : : *
438 : : * Reset data structures to ensure they're empty. Then pull at least one
439 : : * tuple from leader + each worker (or set its "done" indicator), and set up
440 : : * the heap.
441 : : */
442 : : static void
443 : 81 : gather_merge_init(GatherMergeState *gm_state)
444 : : {
445 : 81 : int nreaders = gm_state->nreaders;
446 : 81 : bool nowait = true;
447 : : int i;
448 : :
449 : : /* Assert that gather_merge_setup made enough space */
450 [ - + ]: 81 : Assert(nreaders <= castNode(GatherMerge, gm_state->ps.plan)->num_workers);
451 : :
452 : : /* Reset leader's tuple slot to empty */
453 : 81 : gm_state->gm_slots[0] = NULL;
454 : :
455 : : /* Reset the tuple slot and tuple array for each worker */
456 [ + + ]: 288 : for (i = 0; i < nreaders; i++)
457 : : {
458 : : /* Reset tuple array to empty */
459 : 207 : gm_state->gm_tuple_buffers[i].nTuples = 0;
460 : 207 : gm_state->gm_tuple_buffers[i].readCounter = 0;
461 : : /* Reset done flag to not-done */
462 : 207 : gm_state->gm_tuple_buffers[i].done = false;
463 : : /* Ensure output slot is empty */
464 : 207 : ExecClearTuple(gm_state->gm_slots[i + 1]);
465 : : }
466 : :
467 : : /* Reset binary heap to empty */
468 : 81 : binaryheap_reset(gm_state->gm_heap);
469 : :
470 : : /*
471 : : * First, try to read a tuple from each worker (including leader) in
472 : : * nowait mode. After this, if not all workers were able to produce a
473 : : * tuple (or a "done" indication), then re-read from remaining workers,
474 : : * this time using wait mode. Add all live readers (those producing at
475 : : * least one tuple) to the heap.
476 : : */
3103 rhaas@postgresql.org 477 : 139 : reread:
2928 tgl@sss.pgh.pa.us 478 [ + + ]: 643 : for (i = 0; i <= nreaders; i++)
479 : : {
2965 andres@anarazel.de 480 [ - + ]: 504 : CHECK_FOR_INTERRUPTS();
481 : :
482 : : /* skip this source if already known done */
2928 tgl@sss.pgh.pa.us 483 [ + + + + ]: 869 : if ((i == 0) ? gm_state->need_to_scan_locally :
484 : 365 : !gm_state->gm_tuple_buffers[i - 1].done)
485 : : {
2929 486 [ + + + + ]: 474 : if (TupIsNull(gm_state->gm_slots[i]))
487 : : {
488 : : /* Don't have a tuple yet, try to get one */
489 [ + + ]: 622 : if (gather_merge_readnext(gm_state, i, nowait))
490 : 218 : binaryheap_add_unordered(gm_state->gm_heap,
491 : : Int32GetDatum(i));
492 : : }
493 : : else
494 : : {
495 : : /*
496 : : * We already got at least one tuple from this worker, but
497 : : * might as well see if it has any more ready by now.
498 : : */
499 : 70 : load_tuple_array(gm_state, i);
500 : : }
501 : : }
502 : : }
503 : :
504 : : /* need not recheck leader, since nowait doesn't matter for it */
2928 505 [ + + ]: 367 : for (i = 1; i <= nreaders; i++)
506 : : {
507 [ + + ]: 286 : if (!gm_state->gm_tuple_buffers[i - 1].done &&
2929 508 [ + - + + ]: 146 : TupIsNull(gm_state->gm_slots[i]))
509 : : {
510 : 58 : nowait = false;
3103 rhaas@postgresql.org 511 : 58 : goto reread;
512 : : }
513 : : }
514 : :
515 : : /* Now heapify the heap. */
516 : 81 : binaryheap_build(gm_state->gm_heap);
517 : :
518 : 81 : gm_state->gm_initialized = true;
519 : 81 : }
520 : :
521 : : /*
522 : : * Clear out the tuple table slot, and any unused pending tuples,
523 : : * for each gather merge input.
524 : : */
525 : : static void
2928 tgl@sss.pgh.pa.us 526 : 87 : gather_merge_clear_tuples(GatherMergeState *gm_state)
527 : : {
528 : : int i;
529 : :
3103 rhaas@postgresql.org 530 [ + + ]: 324 : for (i = 0; i < gm_state->nreaders; i++)
531 : : {
2928 tgl@sss.pgh.pa.us 532 : 237 : GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[i];
533 : :
534 [ + + ]: 253 : while (tuple_buffer->readCounter < tuple_buffer->nTuples)
1877 tmunro@postgresql.or 535 : 16 : pfree(tuple_buffer->tuple[tuple_buffer->readCounter++]);
536 : :
2928 tgl@sss.pgh.pa.us 537 : 237 : ExecClearTuple(gm_state->gm_slots[i + 1]);
538 : : }
3103 rhaas@postgresql.org 539 : 87 : }
540 : :
541 : : /*
542 : : * Read the next tuple for gather merge.
543 : : *
544 : : * Fetch the sorted tuple out of the heap.
545 : : */
546 : : static TupleTableSlot *
547 : 128067 : gather_merge_getnext(GatherMergeState *gm_state)
548 : : {
549 : : int i;
550 : :
3100 tgl@sss.pgh.pa.us 551 [ + + ]: 128067 : if (!gm_state->gm_initialized)
552 : : {
553 : : /*
554 : : * First time through: pull the first tuple from each participant, and
555 : : * set up the heap.
556 : : */
3103 rhaas@postgresql.org 557 : 81 : gather_merge_init(gm_state);
558 : : }
559 : : else
560 : : {
561 : : /*
562 : : * Otherwise, pull the next tuple from whichever participant we
563 : : * returned from last time, and reinsert that participant's index into
564 : : * the heap, because it might now compare differently against the
565 : : * other elements of the heap.
566 : : */
567 : 127986 : i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
568 : :
569 [ + + ]: 127986 : if (gather_merge_readnext(gm_state, i, false))
570 : 127795 : binaryheap_replace_first(gm_state->gm_heap, Int32GetDatum(i));
571 : : else
572 : : {
573 : : /* reader exhausted, remove it from heap */
574 : 191 : (void) binaryheap_remove_first(gm_state->gm_heap);
575 : : }
576 : : }
577 : :
578 [ + + ]: 128067 : if (binaryheap_empty(gm_state->gm_heap))
579 : : {
580 : : /* All the queues are exhausted, and so is the heap */
2928 tgl@sss.pgh.pa.us 581 : 63 : gather_merge_clear_tuples(gm_state);
3081 rhaas@postgresql.org 582 : 63 : return NULL;
583 : : }
584 : : else
585 : : {
586 : : /* Return next tuple from whichever participant has the leading one */
3103 587 : 128004 : i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
588 : 128004 : return gm_state->gm_slots[i];
589 : : }
590 : : }
591 : :
592 : : /*
593 : : * Read tuple(s) for given reader in nowait mode, and load into its tuple
594 : : * array, until we have MAX_TUPLE_STORE of them or would have to block.
595 : : */
596 : : static void
2929 tgl@sss.pgh.pa.us 597 : 2070 : load_tuple_array(GatherMergeState *gm_state, int reader)
598 : : {
599 : : GMReaderTupleBuffer *tuple_buffer;
600 : : int i;
601 : :
602 : : /* Don't do anything if this is the leader. */
2928 603 [ + + ]: 2070 : if (reader == 0)
3103 rhaas@postgresql.org 604 : 55 : return;
605 : :
2928 tgl@sss.pgh.pa.us 606 : 2015 : tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1];
607 : :
608 : : /* If there's nothing in the array, reset the counters to zero. */
3103 rhaas@postgresql.org 609 [ + + ]: 2015 : if (tuple_buffer->nTuples == tuple_buffer->readCounter)
610 : 2000 : tuple_buffer->nTuples = tuple_buffer->readCounter = 0;
611 : :
612 : : /* Try to fill additional slots in the array. */
613 [ + + ]: 21305 : for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++)
614 : : {
615 : : MinimalTuple tuple;
616 : :
2929 tgl@sss.pgh.pa.us 617 : 19422 : tuple = gm_readnext_tuple(gm_state,
618 : : reader,
619 : : true,
620 : : &tuple_buffer->done);
1877 tmunro@postgresql.or 621 [ + + ]: 19422 : if (!tuple)
3103 rhaas@postgresql.org 622 : 132 : break;
2833 623 : 19290 : tuple_buffer->tuple[i] = tuple;
3103 624 : 19290 : tuple_buffer->nTuples++;
625 : : }
626 : : }
627 : :
628 : : /*
629 : : * Store the next tuple for a given reader into the appropriate slot.
630 : : *
631 : : * Returns true if successful, false if not (either reader is exhausted,
632 : : * or we didn't want to wait for a tuple). Sets done flag if reader
633 : : * is found to be exhausted.
634 : : */
635 : : static bool
636 : 128390 : gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
637 : : {
638 : : GMReaderTupleBuffer *tuple_buffer;
639 : : MinimalTuple tup;
640 : :
641 : : /*
642 : : * If we're being asked to generate a tuple from the leader, then we just
643 : : * call ExecProcNode as normal to produce one.
644 : : */
2928 tgl@sss.pgh.pa.us 645 [ + + ]: 128390 : if (reader == 0)
646 : : {
3103 rhaas@postgresql.org 647 [ + - ]: 106819 : if (gm_state->need_to_scan_locally)
648 : : {
649 : 106819 : PlanState *outerPlan = outerPlanState(gm_state);
650 : : TupleTableSlot *outerTupleSlot;
2690 tgl@sss.pgh.pa.us 651 : 106819 : EState *estate = gm_state->ps.state;
652 : :
653 : : /* Install our DSA area while executing the plan. */
2819 rhaas@postgresql.org 654 [ + - ]: 106819 : estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL;
3103 655 : 106819 : outerTupleSlot = ExecProcNode(outerPlan);
2819 656 : 106819 : estate->es_query_dsa = NULL;
657 : :
3103 658 [ + + + + ]: 106819 : if (!TupIsNull(outerTupleSlot))
659 : : {
2928 tgl@sss.pgh.pa.us 660 : 106759 : gm_state->gm_slots[0] = outerTupleSlot;
3103 rhaas@postgresql.org 661 : 106759 : return true;
662 : : }
663 : : /* need_to_scan_locally serves as "done" flag for leader */
664 : 60 : gm_state->need_to_scan_locally = false;
665 : : }
666 : 60 : return false;
667 : : }
668 : :
669 : : /* Otherwise, check the state of the relevant tuple buffer. */
2928 tgl@sss.pgh.pa.us 670 : 21571 : tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1];
671 : :
3103 rhaas@postgresql.org 672 [ + + ]: 21571 : if (tuple_buffer->nTuples > tuple_buffer->readCounter)
673 : : {
674 : : /* Return any tuple previously read that is still buffered. */
675 : 19254 : tup = tuple_buffer->tuple[tuple_buffer->readCounter++];
676 : : }
677 [ + + ]: 2317 : else if (tuple_buffer->done)
678 : : {
679 : : /* Reader is known to be exhausted. */
680 : 126 : return false;
681 : : }
682 : : else
683 : : {
684 : : /* Read and buffer next tuple. */
2929 tgl@sss.pgh.pa.us 685 : 2191 : tup = gm_readnext_tuple(gm_state,
686 : : reader,
687 : : nowait,
688 : : &tuple_buffer->done);
1877 tmunro@postgresql.or 689 [ + + ]: 2191 : if (!tup)
2929 tgl@sss.pgh.pa.us 690 : 191 : return false;
691 : :
692 : : /*
693 : : * Attempt to read more tuples in nowait mode and store them in the
694 : : * pending-tuple array for the reader.
695 : : */
696 : 2000 : load_tuple_array(gm_state, reader);
697 : : }
698 : :
1877 tmunro@postgresql.or 699 [ - + ]: 21254 : Assert(tup);
700 : :
701 : : /* Build the TupleTableSlot for the given tuple */
1578 tgl@sss.pgh.pa.us 702 : 21254 : ExecStoreMinimalTuple(tup, /* tuple to store */
703 : 21254 : gm_state->gm_slots[reader], /* slot in which to
704 : : * store the tuple */
705 : : true); /* pfree tuple when done with it */
706 : :
3103 rhaas@postgresql.org 707 : 21254 : return true;
708 : : }
709 : :
710 : : /*
711 : : * Attempt to read a tuple from given worker.
712 : : */
713 : : static MinimalTuple
714 : 21613 : gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
715 : : bool *done)
716 : : {
717 : : TupleQueueReader *reader;
718 : : MinimalTuple tup;
719 : :
720 : : /* Check for async events, particularly messages from workers. */
721 [ + + ]: 21613 : CHECK_FOR_INTERRUPTS();
722 : :
723 : : /*
724 : : * Attempt to read a tuple.
725 : : *
726 : : * Note that TupleQueueReaderNext will just return NULL for a worker which
727 : : * fails to initialize. We'll treat that worker as having produced no
728 : : * tuples; WaitForParallelWorkersToFinish will error out when we get
729 : : * there.
730 : : */
2928 tgl@sss.pgh.pa.us 731 : 21613 : reader = gm_state->reader[nreader - 1];
3103 rhaas@postgresql.org 732 : 21613 : tup = TupleQueueReaderNext(reader, nowait, done);
733 : :
734 : : /*
735 : : * Since we'll be buffering these across multiple calls, we need to make a
736 : : * copy.
737 : : */
166 jdavis@postgresql.or 738 [ + + ]: 21613 : return tup ? heap_copy_minimal_tuple(tup, 0) : NULL;
739 : : }
740 : :
741 : : /*
742 : : * We have one slot for each item in the heap array. We use SlotNumber
743 : : * to store slot indexes. This doesn't actually provide any formal
744 : : * type-safety, but it makes the code more self-documenting.
745 : : */
746 : : typedef int32 SlotNumber;
747 : :
748 : : /*
749 : : * Compare the tuples in the two given slots.
750 : : */
751 : : static int32
3103 rhaas@postgresql.org 752 : 171859 : heap_compare_slots(Datum a, Datum b, void *arg)
753 : : {
754 : 171859 : GatherMergeState *node = (GatherMergeState *) arg;
755 : 171859 : SlotNumber slot1 = DatumGetInt32(a);
756 : 171859 : SlotNumber slot2 = DatumGetInt32(b);
757 : :
758 : 171859 : TupleTableSlot *s1 = node->gm_slots[slot1];
759 : 171859 : TupleTableSlot *s2 = node->gm_slots[slot2];
760 : : int nkey;
761 : :
762 [ + - - + ]: 171859 : Assert(!TupIsNull(s1));
763 [ + - - + ]: 171859 : Assert(!TupIsNull(s2));
764 : :
765 [ + + ]: 254848 : for (nkey = 0; nkey < node->gm_nkeys; nkey++)
766 : : {
767 : 171859 : SortSupport sortKey = node->gm_sortkeys + nkey;
768 : 171859 : AttrNumber attno = sortKey->ssup_attno;
769 : : Datum datum1,
770 : : datum2;
771 : : bool isNull1,
772 : : isNull2;
773 : : int compare;
774 : :
775 : 171859 : datum1 = slot_getattr(s1, attno, &isNull1);
776 : 171859 : datum2 = slot_getattr(s2, attno, &isNull2);
777 : :
778 : 171859 : compare = ApplySortComparator(datum1, isNull1,
779 : : datum2, isNull2,
780 : : sortKey);
781 [ + + ]: 171859 : if (compare != 0)
782 : : {
2528 tgl@sss.pgh.pa.us 783 [ + + ]: 88870 : INVERT_COMPARE_RESULT(compare);
784 : 88870 : return compare;
785 : : }
786 : : }
3103 rhaas@postgresql.org 787 : 82989 : return 0;
788 : : }
|