Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * array_typanalyze.c
4 : : * Functions for gathering statistics from array columns
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/utils/adt/array_typanalyze.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : #include "postgres.h"
16 : :
17 : : #include "access/detoast.h"
18 : : #include "commands/vacuum.h"
19 : : #include "utils/array.h"
20 : : #include "utils/datum.h"
21 : : #include "utils/fmgrprotos.h"
22 : : #include "utils/lsyscache.h"
23 : : #include "utils/typcache.h"
24 : :
25 : :
26 : : /*
27 : : * To avoid consuming too much memory, IO and CPU load during analysis, and/or
28 : : * too much space in the resulting pg_statistic rows, we ignore arrays that
29 : : * are wider than ARRAY_WIDTH_THRESHOLD (after detoasting!). Note that this
30 : : * number is considerably more than the similar WIDTH_THRESHOLD limit used
31 : : * in analyze.c's standard typanalyze code.
32 : : */
33 : : #define ARRAY_WIDTH_THRESHOLD 0x10000
34 : :
35 : : /* Extra data for compute_array_stats function */
36 : : typedef struct
37 : : {
38 : : /* Information about array element type */
39 : : Oid type_id; /* element type's OID */
40 : : Oid eq_opr; /* default equality operator's OID */
41 : : Oid coll_id; /* collation to use */
42 : : bool typbyval; /* physical properties of element type */
43 : : int16 typlen;
44 : : char typalign;
45 : :
46 : : /*
47 : : * Lookup data for element type's comparison and hash functions (these are
48 : : * in the type's typcache entry, which we expect to remain valid over the
49 : : * lifespan of the ANALYZE run)
50 : : */
51 : : FmgrInfo *cmp;
52 : : FmgrInfo *hash;
53 : :
54 : : /* Saved state from std_typanalyze() */
55 : : AnalyzeAttrComputeStatsFunc std_compute_stats;
56 : : void *std_extra_data;
57 : : } ArrayAnalyzeExtraData;
58 : :
59 : : /*
60 : : * While compute_array_stats is running, we keep a pointer to the extra data
61 : : * here for use by assorted subroutines. compute_array_stats doesn't
62 : : * currently need to be re-entrant, so avoiding this is not worth the extra
63 : : * notational cruft that would be needed.
64 : : */
65 : : static ArrayAnalyzeExtraData *array_extra_data;
66 : :
67 : : /* A hash table entry for the Lossy Counting algorithm */
68 : : typedef struct
69 : : {
70 : : Datum key; /* This is 'e' from the LC algorithm. */
71 : : int frequency; /* This is 'f'. */
72 : : int delta; /* And this is 'delta'. */
73 : : int last_container; /* For de-duplication of array elements. */
74 : : } TrackItem;
75 : :
76 : : /* A hash table entry for distinct-elements counts */
77 : : typedef struct
78 : : {
79 : : int count; /* Count of distinct elements in an array */
80 : : int frequency; /* Number of arrays seen with this count */
81 : : } DECountItem;
82 : :
83 : : static void compute_array_stats(VacAttrStats *stats,
84 : : AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
85 : : static void prune_element_hashtable(HTAB *elements_tab, int b_current);
86 : : static uint32 element_hash(const void *key, Size keysize);
87 : : static int element_match(const void *key1, const void *key2, Size keysize);
88 : : static int element_compare(const void *key1, const void *key2);
89 : : static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg);
90 : : static int trackitem_compare_element(const void *e1, const void *e2, void *arg);
91 : : static int countitem_compare_count(const void *e1, const void *e2, void *arg);
92 : :
93 : :
94 : : /*
95 : : * array_typanalyze -- typanalyze function for array columns
96 : : */
97 : : Datum
5037 tgl@sss.pgh.pa.us 98 :CBC 3878 : array_typanalyze(PG_FUNCTION_ARGS)
99 : : {
100 : 3878 : VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
101 : : Oid element_typeid;
102 : : TypeCacheEntry *typentry;
103 : : ArrayAnalyzeExtraData *extra_data;
104 : :
105 : : /*
106 : : * Call the standard typanalyze function. It may fail to find needed
107 : : * operators, in which case we also can't do anything, so just fail.
108 : : */
109 [ - + ]: 3878 : if (!std_typanalyze(stats))
5037 tgl@sss.pgh.pa.us 110 :UBC 0 : PG_RETURN_BOOL(false);
111 : :
112 : : /*
113 : : * Check attribute data type is a varlena array (or a domain over one).
114 : : */
4838 tgl@sss.pgh.pa.us 115 :CBC 3878 : element_typeid = get_base_element_type(stats->attrtypid);
116 [ - + ]: 3878 : if (!OidIsValid(element_typeid))
5037 tgl@sss.pgh.pa.us 117 [ # # ]:UBC 0 : elog(ERROR, "array_typanalyze was invoked for non-array type %u",
118 : : stats->attrtypid);
119 : :
120 : : /*
121 : : * Gather information about the element type. If we fail to find
122 : : * something, return leaving the state from std_typanalyze() in place.
123 : : */
5037 tgl@sss.pgh.pa.us 124 :CBC 3878 : typentry = lookup_type_cache(element_typeid,
125 : : TYPECACHE_EQ_OPR |
126 : : TYPECACHE_CMP_PROC_FINFO |
127 : : TYPECACHE_HASH_PROC_FINFO);
128 : :
129 [ + + ]: 3878 : if (!OidIsValid(typentry->eq_opr) ||
130 [ + + ]: 3789 : !OidIsValid(typentry->cmp_proc_finfo.fn_oid) ||
131 [ - + ]: 2540 : !OidIsValid(typentry->hash_proc_finfo.fn_oid))
132 : 1338 : PG_RETURN_BOOL(true);
133 : :
134 : : /* Store our findings for use by compute_array_stats() */
7 michael@paquier.xyz 135 :GNC 2540 : extra_data = palloc_object(ArrayAnalyzeExtraData);
5037 tgl@sss.pgh.pa.us 136 :CBC 2540 : extra_data->type_id = typentry->type_id;
137 : 2540 : extra_data->eq_opr = typentry->eq_opr;
2560 138 : 2540 : extra_data->coll_id = stats->attrcollid; /* collation we should use */
5037 139 : 2540 : extra_data->typbyval = typentry->typbyval;
140 : 2540 : extra_data->typlen = typentry->typlen;
141 : 2540 : extra_data->typalign = typentry->typalign;
142 : 2540 : extra_data->cmp = &typentry->cmp_proc_finfo;
143 : 2540 : extra_data->hash = &typentry->hash_proc_finfo;
144 : :
145 : : /* Save old compute_stats and extra_data for scalar statistics ... */
146 : 2540 : extra_data->std_compute_stats = stats->compute_stats;
147 : 2540 : extra_data->std_extra_data = stats->extra_data;
148 : :
149 : : /* ... and replace with our info */
150 : 2540 : stats->compute_stats = compute_array_stats;
151 : 2540 : stats->extra_data = extra_data;
152 : :
153 : : /*
154 : : * Note we leave stats->minrows set as std_typanalyze set it. Should it
155 : : * be increased for array analysis purposes?
156 : : */
157 : :
158 : 2540 : PG_RETURN_BOOL(true);
159 : : }
160 : :
161 : : /*
162 : : * compute_array_stats() -- compute statistics for an array column
163 : : *
164 : : * This function computes statistics useful for determining selectivity of
165 : : * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
166 : : * compute_stats hook after sample rows have been collected.
167 : : *
168 : : * We also invoke the standard compute_stats function, which will compute
169 : : * "scalar" statistics relevant to the btree-style array comparison operators.
170 : : * However, exact duplicates of an entire array may be rare despite many
171 : : * arrays sharing individual elements. This especially afflicts long arrays,
172 : : * which are also liable to lack all scalar statistics due to the low
173 : : * WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
174 : : * we find the most common array elements and compute a histogram of distinct
175 : : * element counts.
176 : : *
177 : : * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
178 : : * frequency counts over data streams" by G. S. Manku and R. Motwani, in
179 : : * Proceedings of the 28th International Conference on Very Large Data Bases,
180 : : * Hong Kong, China, August 2002, section 4.2. The paper is available at
181 : : * http://www.vldb.org/conf/2002/S10P03.pdf
182 : : *
183 : : * The Lossy Counting (aka LC) algorithm goes like this:
184 : : * Let s be the threshold frequency for an item (the minimum frequency we
185 : : * are interested in) and epsilon the error margin for the frequency. Let D
186 : : * be a set of triples (e, f, delta), where e is an element value, f is that
187 : : * element's frequency (actually, its current occurrence count) and delta is
188 : : * the maximum error in f. We start with D empty and process the elements in
189 : : * batches of size w. (The batch size is also known as "bucket size" and is
190 : : * equal to 1/epsilon.) Let the current batch number be b_current, starting
191 : : * with 1. For each element e we either increment its f count, if it's
192 : : * already in D, or insert a new triple into D with values (e, 1, b_current
193 : : * - 1). After processing each batch we prune D, by removing from it all
194 : : * elements with f + delta <= b_current. After the algorithm finishes we
195 : : * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
196 : : * where N is the total number of elements in the input. We emit the
197 : : * remaining elements with estimated frequency f/N. The LC paper proves
198 : : * that this algorithm finds all elements with true frequency at least s,
199 : : * and that no frequency is overestimated or is underestimated by more than
200 : : * epsilon. Furthermore, given reasonable assumptions about the input
201 : : * distribution, the required table size is no more than about 7 times w.
202 : : *
203 : : * In the absence of a principled basis for other particular values, we
204 : : * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
205 : : * But we leave out the correction for stopwords, which do not apply to
206 : : * arrays. These parameters give bucket width w = K/0.007 and maximum
207 : : * expected hashtable size of about 1000 * K.
208 : : *
209 : : * Elements may repeat within an array. Since duplicates do not change the
210 : : * behavior of <@, && or @>, we want to count each element only once per
211 : : * array. Therefore, we store in the finished pg_statistic entry each
212 : : * element's frequency as the fraction of all non-null rows that contain it.
213 : : * We divide the raw counts by nonnull_cnt to get those figures.
214 : : */
215 : : static void
216 : 1755 : compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
217 : : int samplerows, double totalrows)
218 : : {
219 : : ArrayAnalyzeExtraData *extra_data;
220 : : int num_mcelem;
221 : 1755 : int null_elem_cnt = 0;
222 : 1755 : int analyzed_rows = 0;
223 : :
224 : : /* This is D from the LC algorithm. */
225 : : HTAB *elements_tab;
226 : : HASHCTL elem_hash_ctl;
227 : : HASH_SEQ_STATUS scan_status;
228 : :
229 : : /* This is the current bucket number from the LC algorithm */
230 : : int b_current;
231 : :
232 : : /* This is 'w' from the LC algorithm */
233 : : int bucket_width;
234 : : int array_no;
235 : : int64 element_no;
236 : : TrackItem *item;
237 : : int slot_idx;
238 : : HTAB *count_tab;
239 : : HASHCTL count_hash_ctl;
240 : : DECountItem *count_item;
241 : :
242 : 1755 : extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
243 : :
244 : : /*
245 : : * Invoke analyze.c's standard analysis function to create scalar-style
246 : : * stats for the column. It will expect its own extra_data pointer, so
247 : : * temporarily install that.
248 : : */
249 : 1755 : stats->extra_data = extra_data->std_extra_data;
3023 peter_e@gmx.net 250 : 1755 : extra_data->std_compute_stats(stats, fetchfunc, samplerows, totalrows);
5037 tgl@sss.pgh.pa.us 251 : 1755 : stats->extra_data = extra_data;
252 : :
253 : : /*
254 : : * Set up static pointer for use by subroutines. We wait till here in
255 : : * case std_compute_stats somehow recursively invokes us (probably not
256 : : * possible, but ...)
257 : : */
258 : 1755 : array_extra_data = extra_data;
259 : :
260 : : /*
261 : : * We want statistics_target * 10 elements in the MCELEM array. This
262 : : * multiplier is pretty arbitrary, but is meant to reflect the fact that
263 : : * the number of individual elements tracked in pg_statistic ought to be
264 : : * more than the number of values for a simple scalar column.
265 : : */
898 peter@eisentraut.org 266 : 1755 : num_mcelem = stats->attstattarget * 10;
267 : :
268 : : /*
269 : : * We set bucket width equal to num_mcelem / 0.007 as per the comment
270 : : * above.
271 : : */
5037 tgl@sss.pgh.pa.us 272 : 1755 : bucket_width = num_mcelem * 1000 / 7;
273 : :
274 : : /*
275 : : * Create the hashtable. It will be in local memory, so we don't need to
276 : : * worry about overflowing the initial size. Also we don't need to pay any
277 : : * attention to locking and memory management.
278 : : */
279 : 1755 : elem_hash_ctl.keysize = sizeof(Datum);
280 : 1755 : elem_hash_ctl.entrysize = sizeof(TrackItem);
281 : 1755 : elem_hash_ctl.hash = element_hash;
282 : 1755 : elem_hash_ctl.match = element_match;
283 : 1755 : elem_hash_ctl.hcxt = CurrentMemoryContext;
284 : 1755 : elements_tab = hash_create("Analyzed elements table",
285 : : num_mcelem,
286 : : &elem_hash_ctl,
287 : : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
288 : :
289 : : /* hashtable for array distinct elements counts */
290 : 1755 : count_hash_ctl.keysize = sizeof(int);
291 : 1755 : count_hash_ctl.entrysize = sizeof(DECountItem);
292 : 1755 : count_hash_ctl.hcxt = CurrentMemoryContext;
293 : 1755 : count_tab = hash_create("Array distinct element count table",
294 : : 64,
295 : : &count_hash_ctl,
296 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
297 : :
298 : : /* Initialize counters. */
299 : 1755 : b_current = 1;
300 : 1755 : element_no = 0;
301 : :
302 : : /* Loop over the arrays. */
303 [ + + ]: 2704334 : for (array_no = 0; array_no < samplerows; array_no++)
304 : : {
305 : : Datum value;
306 : : bool isnull;
307 : : ArrayType *array;
308 : : int num_elems;
309 : : Datum *elem_values;
310 : : bool *elem_nulls;
311 : : bool null_present;
312 : : int j;
313 : 2702579 : int64 prev_element_no = element_no;
314 : : int distinct_count;
315 : : bool count_item_found;
316 : :
309 nathan@postgresql.or 317 : 2702579 : vacuum_delay_point(true);
318 : :
5037 tgl@sss.pgh.pa.us 319 : 2702579 : value = fetchfunc(stats, array_no, &isnull);
320 [ + + ]: 2702579 : if (isnull)
321 : : {
322 : : /* ignore arrays that are null overall */
323 : 2416564 : continue;
324 : : }
325 : :
326 : : /* Skip too-large values. */
327 [ - + ]: 286015 : if (toast_raw_datum_size(value) > ARRAY_WIDTH_THRESHOLD)
5037 tgl@sss.pgh.pa.us 328 :UBC 0 : continue;
329 : : else
5037 tgl@sss.pgh.pa.us 330 :CBC 286015 : analyzed_rows++;
331 : :
332 : : /*
333 : : * Now detoast the array if needed, and deconstruct into datums.
334 : : */
335 : 286015 : array = DatumGetArrayTypeP(value);
336 : :
337 [ - + ]: 286015 : Assert(ARR_ELEMTYPE(array) == extra_data->type_id);
338 : 286015 : deconstruct_array(array,
339 : : extra_data->type_id,
340 : 286015 : extra_data->typlen,
341 : 286015 : extra_data->typbyval,
342 : 286015 : extra_data->typalign,
343 : : &elem_values, &elem_nulls, &num_elems);
344 : :
345 : : /*
346 : : * We loop through the elements in the array and add them to our
347 : : * tracking hashtable.
348 : : */
349 : 286015 : null_present = false;
350 [ + + ]: 1127493 : for (j = 0; j < num_elems; j++)
351 : : {
352 : : Datum elem_value;
353 : : bool found;
354 : :
355 : : /* No null element processing other than flag setting here */
356 [ + + ]: 841478 : if (elem_nulls[j])
357 : : {
358 : 24 : null_present = true;
359 : 82095 : continue;
360 : : }
361 : :
362 : : /* Lookup current element in hashtable, adding it if new */
363 : 841454 : elem_value = elem_values[j];
364 : 841454 : item = (TrackItem *) hash_search(elements_tab,
365 : : &elem_value,
366 : : HASH_ENTER, &found);
367 : :
368 [ + + ]: 841454 : if (found)
369 : : {
370 : : /* The element value is already on the tracking list */
371 : :
372 : : /*
373 : : * The operators we assist ignore duplicate array elements, so
374 : : * count a given distinct element only once per array.
375 : : */
376 [ + + ]: 666318 : if (item->last_container == array_no)
377 : 82071 : continue;
378 : :
379 : 584247 : item->frequency++;
380 : 584247 : item->last_container = array_no;
381 : : }
382 : : else
383 : : {
384 : : /* Initialize new tracking list element */
385 : :
386 : : /*
387 : : * If element type is pass-by-reference, we must copy it into
388 : : * palloc'd space, so that we can release the array below. (We
389 : : * do this so that the space needed for element values is
390 : : * limited by the size of the hashtable; if we kept all the
391 : : * array values around, it could be much more.)
392 : : */
393 : 350272 : item->key = datumCopy(elem_value,
394 : 175136 : extra_data->typbyval,
395 : 175136 : extra_data->typlen);
396 : :
397 : 175136 : item->frequency = 1;
398 : 175136 : item->delta = b_current - 1;
399 : 175136 : item->last_container = array_no;
400 : : }
401 : :
402 : : /* element_no is the number of elements processed (ie N) */
403 : 759383 : element_no++;
404 : :
405 : : /* We prune the D structure after processing each bucket */
406 [ - + ]: 759383 : if (element_no % bucket_width == 0)
407 : : {
5037 tgl@sss.pgh.pa.us 408 :UBC 0 : prune_element_hashtable(elements_tab, b_current);
409 : 0 : b_current++;
410 : : }
411 : : }
412 : :
413 : : /* Count null element presence once per array. */
5037 tgl@sss.pgh.pa.us 414 [ + + ]:CBC 286015 : if (null_present)
415 : 24 : null_elem_cnt++;
416 : :
417 : : /* Update frequency of the particular array distinct element count. */
418 : 286015 : distinct_count = (int) (element_no - prev_element_no);
419 : 286015 : count_item = (DECountItem *) hash_search(count_tab, &distinct_count,
420 : : HASH_ENTER,
421 : : &count_item_found);
422 : :
423 [ + + ]: 286015 : if (count_item_found)
424 : 282292 : count_item->frequency++;
425 : : else
426 : 3723 : count_item->frequency = 1;
427 : :
428 : : /* Free memory allocated while detoasting. */
429 [ + + ]: 286015 : if (PointerGetDatum(array) != value)
430 : 267679 : pfree(array);
431 : 286015 : pfree(elem_values);
432 : 286015 : pfree(elem_nulls);
433 : : }
434 : :
435 : : /* Skip pg_statistic slots occupied by standard statistics */
436 : 1755 : slot_idx = 0;
437 [ + - + + ]: 3241 : while (slot_idx < STATISTIC_NUM_SLOTS && stats->stakind[slot_idx] != 0)
438 : 1486 : slot_idx++;
439 [ - + ]: 1755 : if (slot_idx > STATISTIC_NUM_SLOTS - 2)
5037 tgl@sss.pgh.pa.us 440 [ # # ]:UBC 0 : elog(ERROR, "insufficient pg_statistic slots for array stats");
441 : :
442 : : /* We can only compute real stats if we found some non-null values. */
5037 tgl@sss.pgh.pa.us 443 [ + + ]:CBC 1755 : if (analyzed_rows > 0)
444 : : {
445 : 557 : int nonnull_cnt = analyzed_rows;
446 : : int count_items_count;
447 : : int i;
448 : : TrackItem **sort_table;
449 : : int track_len;
450 : : int64 cutoff_freq;
451 : : int64 minfreq,
452 : : maxfreq;
453 : :
454 : : /*
455 : : * We assume the standard stats code already took care of setting
456 : : * stats_valid, stanullfrac, stawidth, stadistinct. We'd have to
457 : : * re-compute those values if we wanted to not store the standard
458 : : * stats.
459 : : */
460 : :
461 : : /*
462 : : * Construct an array of the interesting hashtable items, that is,
463 : : * those meeting the cutoff frequency (s - epsilon)*N. Also identify
464 : : * the maximum frequency among these items.
465 : : *
466 : : * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
467 : : * frequency is 9*N / bucket_width.
468 : : */
469 : 557 : cutoff_freq = 9 * element_no / bucket_width;
470 : :
471 : 557 : i = hash_get_num_entries(elements_tab); /* surely enough space */
7 michael@paquier.xyz 472 :GNC 557 : sort_table = palloc_array(TrackItem *, i);
473 : :
5037 tgl@sss.pgh.pa.us 474 :CBC 557 : hash_seq_init(&scan_status, elements_tab);
475 : 557 : track_len = 0;
476 : 557 : maxfreq = 0;
477 [ + + ]: 176250 : while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
478 : : {
479 [ + + ]: 175136 : if (item->frequency > cutoff_freq)
480 : : {
481 : 67120 : sort_table[track_len++] = item;
482 : 67120 : maxfreq = Max(maxfreq, item->frequency);
483 : : }
484 : : }
485 [ - + ]: 557 : Assert(track_len <= i);
486 : :
487 : : /* emit some statistics for debug purposes */
488 [ - + ]: 557 : elog(DEBUG3, "compute_array_stats: target # mces = %d, "
489 : : "bucket width = %d, "
490 : : "# elements = " INT64_FORMAT ", hashtable size = %d, "
491 : : "usable entries = %d",
492 : : num_mcelem, bucket_width, element_no, i, track_len);
493 : :
494 : : /*
495 : : * If we obtained more elements than we really want, get rid of those
496 : : * with least frequencies. The easiest way is to qsort the array into
497 : : * descending frequency order and truncate the array.
498 : : *
499 : : * If we did not find more elements than we want, then it is safe to
500 : : * assume that the stored MCE array will contain every element with
501 : : * frequency above the cutoff. In that case, rather than storing the
502 : : * smallest frequency we are keeping, we want to store the minimum
503 : : * frequency that would have been accepted as a valid MCE. The
504 : : * selectivity functions can assume that that is an upper bound on the
505 : : * frequency of elements not present in the array.
506 : : *
507 : : * If we found no candidate MCEs at all, we still want to record the
508 : : * cutoff frequency, since it's still valid to assume that no element
509 : : * has frequency more than that.
510 : : */
511 [ + + ]: 557 : if (num_mcelem < track_len)
512 : : {
1254 513 : 15 : qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
514 : : trackitem_compare_frequencies_desc, NULL);
515 : : /* set minfreq to the smallest frequency we're keeping */
5037 516 : 15 : minfreq = sort_table[num_mcelem - 1]->frequency;
517 : : }
518 : : else
519 : : {
520 : 542 : num_mcelem = track_len;
521 : : /* set minfreq to the minimum frequency above the cutoff */
88 tgl@sss.pgh.pa.us 522 :GNC 542 : minfreq = cutoff_freq + 1;
523 : : /* ensure maxfreq is nonzero, too */
524 [ - + ]: 542 : if (track_len == 0)
88 tgl@sss.pgh.pa.us 525 :UNC 0 : maxfreq = minfreq;
526 : : }
527 : :
528 : : /* Generate MCELEM slot entry */
88 tgl@sss.pgh.pa.us 529 [ + - ]:GNC 557 : if (num_mcelem >= 0)
530 : : {
531 : : MemoryContext old_context;
532 : : Datum *mcelem_values;
533 : : float4 *mcelem_freqs;
534 : :
535 : : /*
536 : : * We want to store statistics sorted on the element value using
537 : : * the element type's default comparison function. This permits
538 : : * fast binary searches in selectivity estimation functions.
539 : : */
1254 tgl@sss.pgh.pa.us 540 :CBC 557 : qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
541 : : trackitem_compare_element, NULL);
542 : :
543 : : /* Must copy the target values into anl_context */
5037 544 : 557 : old_context = MemoryContextSwitchTo(stats->anl_context);
545 : :
546 : : /*
547 : : * We sorted statistics on the element value, but we want to be
548 : : * able to find the minimal and maximal frequencies without going
549 : : * through all the values. We also want the frequency of null
550 : : * elements. Store these three values at the end of mcelem_freqs.
551 : : */
552 : 557 : mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
553 : 557 : mcelem_freqs = (float4 *) palloc((num_mcelem + 3) * sizeof(float4));
554 : :
555 : : /*
556 : : * See comments above about use of nonnull_cnt as the divisor for
557 : : * the final frequency estimates.
558 : : */
559 [ + + ]: 60308 : for (i = 0; i < num_mcelem; i++)
560 : : {
1169 drowley@postgresql.o 561 : 59751 : TrackItem *titem = sort_table[i];
562 : :
563 : 119502 : mcelem_values[i] = datumCopy(titem->key,
5037 tgl@sss.pgh.pa.us 564 : 59751 : extra_data->typbyval,
565 : 59751 : extra_data->typlen);
1169 drowley@postgresql.o 566 : 59751 : mcelem_freqs[i] = (double) titem->frequency /
5037 tgl@sss.pgh.pa.us 567 : 59751 : (double) nonnull_cnt;
568 : : }
569 : 557 : mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
570 : 557 : mcelem_freqs[i++] = (double) maxfreq / (double) nonnull_cnt;
571 : 557 : mcelem_freqs[i++] = (double) null_elem_cnt / (double) nonnull_cnt;
572 : :
573 : 557 : MemoryContextSwitchTo(old_context);
574 : :
575 : 557 : stats->stakind[slot_idx] = STATISTIC_KIND_MCELEM;
576 : 557 : stats->staop[slot_idx] = extra_data->eq_opr;
2560 577 : 557 : stats->stacoll[slot_idx] = extra_data->coll_id;
5037 578 : 557 : stats->stanumbers[slot_idx] = mcelem_freqs;
579 : : /* See above comment about extra stanumber entries */
580 : 557 : stats->numnumbers[slot_idx] = num_mcelem + 3;
581 : 557 : stats->stavalues[slot_idx] = mcelem_values;
582 : 557 : stats->numvalues[slot_idx] = num_mcelem;
583 : : /* We are storing values of element type */
584 : 557 : stats->statypid[slot_idx] = extra_data->type_id;
585 : 557 : stats->statyplen[slot_idx] = extra_data->typlen;
586 : 557 : stats->statypbyval[slot_idx] = extra_data->typbyval;
587 : 557 : stats->statypalign[slot_idx] = extra_data->typalign;
588 : 557 : slot_idx++;
589 : : }
590 : :
591 : : /* Generate DECHIST slot entry */
592 : 557 : count_items_count = hash_get_num_entries(count_tab);
593 [ + - ]: 557 : if (count_items_count > 0)
594 : : {
898 peter@eisentraut.org 595 : 557 : int num_hist = stats->attstattarget;
596 : : DECountItem **sorted_count_items;
597 : : int j;
598 : : int delta;
599 : : int64 frac;
600 : : float4 *hist;
601 : :
602 : : /* num_hist must be at least 2 for the loop below to work */
5037 tgl@sss.pgh.pa.us 603 : 557 : num_hist = Max(num_hist, 2);
604 : :
605 : : /*
606 : : * Create an array of DECountItem pointers, and sort them into
607 : : * increasing count order.
608 : : */
7 michael@paquier.xyz 609 :GNC 557 : sorted_count_items = palloc_array(DECountItem *, count_items_count);
5037 tgl@sss.pgh.pa.us 610 :CBC 557 : hash_seq_init(&scan_status, count_tab);
5036 611 : 557 : j = 0;
5037 612 [ + + ]: 4280 : while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
613 : : {
5036 614 : 3723 : sorted_count_items[j++] = count_item;
615 : : }
1254 616 : 557 : qsort_interruptible(sorted_count_items, count_items_count,
617 : : sizeof(DECountItem *),
618 : : countitem_compare_count, NULL);
619 : :
620 : : /*
621 : : * Prepare to fill stanumbers with the histogram, followed by the
622 : : * average count. This array must be stored in anl_context.
623 : : */
624 : : hist = (float4 *)
5037 625 : 557 : MemoryContextAlloc(stats->anl_context,
626 : 557 : sizeof(float4) * (num_hist + 1));
627 : 557 : hist[num_hist] = (double) element_no / (double) nonnull_cnt;
628 : :
629 : : /*----------
630 : : * Construct the histogram of distinct-element counts (DECs).
631 : : *
632 : : * The object of this loop is to copy the min and max DECs to
633 : : * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
634 : : * in between (where "evenly-spaced" is with reference to the
635 : : * whole input population of arrays). If we had a complete sorted
636 : : * array of DECs, one per analyzed row, the i'th hist value would
637 : : * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
638 : : * (compare the histogram-making loop in compute_scalar_stats()).
639 : : * But instead of that we have the sorted_count_items[] array,
640 : : * which holds unique DEC values with their frequencies (that is,
641 : : * a run-length-compressed version of the full array). So we
642 : : * control advancing through sorted_count_items[] with the
643 : : * variable "frac", which is defined as (x - y) * (num_hist - 1),
644 : : * where x is the index in the notional DECs array corresponding
645 : : * to the start of the next sorted_count_items[] element's run,
646 : : * and y is the index in DECs from which we should take the next
647 : : * histogram value. We have to advance whenever x <= y, that is
648 : : * frac <= 0. The x component is the sum of the frequencies seen
649 : : * so far (up through the current sorted_count_items[] element),
650 : : * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
651 : : * per the subscript calculation above. (The subscript calculation
652 : : * implies dropping any fractional part of y; in this formulation
653 : : * that's handled by not advancing until frac reaches 1.)
654 : : *
655 : : * Even though frac has a bounded range, it could overflow int32
656 : : * when working with very large statistics targets, so we do that
657 : : * math in int64.
658 : : *----------
659 : : */
660 : 557 : delta = analyzed_rows - 1;
5036 661 : 557 : j = 0; /* current index in sorted_count_items */
662 : : /* Initialize frac for sorted_count_items[0]; y is initially 0 */
663 : 557 : frac = (int64) sorted_count_items[0]->frequency * (num_hist - 1);
5037 664 [ + + ]: 53437 : for (i = 0; i < num_hist; i++)
665 : : {
666 [ + + ]: 56046 : while (frac <= 0)
667 : : {
668 : : /* Advance, and update x component of frac */
5036 669 : 3166 : j++;
670 : 3166 : frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
671 : : }
672 : 52880 : hist[i] = sorted_count_items[j]->count;
4938 bruce@momjian.us 673 : 52880 : frac -= delta; /* update y for upcoming i increment */
674 : : }
5036 tgl@sss.pgh.pa.us 675 [ - + ]: 557 : Assert(j == count_items_count - 1);
676 : :
5037 677 : 557 : stats->stakind[slot_idx] = STATISTIC_KIND_DECHIST;
678 : 557 : stats->staop[slot_idx] = extra_data->eq_opr;
2560 679 : 557 : stats->stacoll[slot_idx] = extra_data->coll_id;
5037 680 : 557 : stats->stanumbers[slot_idx] = hist;
681 : 557 : stats->numnumbers[slot_idx] = num_hist + 1;
682 : 557 : slot_idx++;
683 : : }
684 : : }
685 : :
686 : : /*
687 : : * We don't need to bother cleaning up any of our temporary palloc's. The
688 : : * hashtable should also go away, as it used a child memory context.
689 : : */
690 : 1755 : }
691 : :
692 : : /*
693 : : * A function to prune the D structure from the Lossy Counting algorithm.
694 : : * Consult compute_tsvector_stats() for wider explanation.
695 : : */
696 : : static void
5037 tgl@sss.pgh.pa.us 697 :UBC 0 : prune_element_hashtable(HTAB *elements_tab, int b_current)
698 : : {
699 : : HASH_SEQ_STATUS scan_status;
700 : : TrackItem *item;
701 : :
702 : 0 : hash_seq_init(&scan_status, elements_tab);
703 [ # # ]: 0 : while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
704 : : {
705 [ # # ]: 0 : if (item->frequency + item->delta <= b_current)
706 : : {
707 : 0 : Datum value = item->key;
708 : :
1045 peter@eisentraut.org 709 [ # # ]: 0 : if (hash_search(elements_tab, &item->key,
710 : : HASH_REMOVE, NULL) == NULL)
5037 tgl@sss.pgh.pa.us 711 [ # # ]: 0 : elog(ERROR, "hash table corrupted");
712 : : /* We should free memory if element is not passed by value */
713 [ # # ]: 0 : if (!array_extra_data->typbyval)
714 : 0 : pfree(DatumGetPointer(value));
715 : : }
716 : : }
717 : 0 : }
718 : :
719 : : /*
720 : : * Hash function for elements.
721 : : *
722 : : * We use the element type's default hash opclass, and the column collation
723 : : * if the type is collation-sensitive.
724 : : */
725 : : static uint32
5037 tgl@sss.pgh.pa.us 726 :CBC 841454 : element_hash(const void *key, Size keysize)
727 : : {
728 : 841454 : Datum d = *((const Datum *) key);
729 : : Datum h;
730 : :
2560 731 : 841454 : h = FunctionCall1Coll(array_extra_data->hash,
732 : 841454 : array_extra_data->coll_id,
733 : : d);
5037 734 : 841454 : return DatumGetUInt32(h);
735 : : }
736 : :
737 : : /*
738 : : * Matching function for elements, to be used in hashtable lookups.
739 : : */
740 : : static int
741 : 667215 : element_match(const void *key1, const void *key2, Size keysize)
742 : : {
743 : : /* The keysize parameter is superfluous here */
744 : 667215 : return element_compare(key1, key2);
745 : : }
746 : :
747 : : /*
748 : : * Comparison function for elements.
749 : : *
750 : : * We use the element type's default btree opclass, and the column collation
751 : : * if the type is collation-sensitive.
752 : : *
753 : : * XXX consider using SortSupport infrastructure
754 : : */
755 : : static int
756 : 1192447 : element_compare(const void *key1, const void *key2)
757 : : {
758 : 1192447 : Datum d1 = *((const Datum *) key1);
759 : 1192447 : Datum d2 = *((const Datum *) key2);
760 : : Datum c;
761 : :
2560 762 : 1192447 : c = FunctionCall2Coll(array_extra_data->cmp,
763 : 1192447 : array_extra_data->coll_id,
764 : : d1, d2);
5037 765 : 1192447 : return DatumGetInt32(c);
766 : : }
767 : :
768 : : /*
769 : : * Comparator for sorting TrackItems by frequencies (descending sort)
770 : : */
771 : : static int
1254 772 : 18297 : trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
773 : : {
3101 774 : 18297 : const TrackItem *const *t1 = (const TrackItem *const *) e1;
775 : 18297 : const TrackItem *const *t2 = (const TrackItem *const *) e2;
776 : :
5037 777 : 18297 : return (*t2)->frequency - (*t1)->frequency;
778 : : }
779 : :
780 : : /*
781 : : * Comparator for sorting TrackItems by element values
782 : : */
783 : : static int
1254 784 : 525232 : trackitem_compare_element(const void *e1, const void *e2, void *arg)
785 : : {
3101 786 : 525232 : const TrackItem *const *t1 = (const TrackItem *const *) e1;
787 : 525232 : const TrackItem *const *t2 = (const TrackItem *const *) e2;
788 : :
5037 789 : 525232 : return element_compare(&(*t1)->key, &(*t2)->key);
790 : : }
791 : :
792 : : /*
793 : : * Comparator for sorting DECountItems by count
794 : : */
795 : : static int
1254 796 : 10955 : countitem_compare_count(const void *e1, const void *e2, void *arg)
797 : : {
3101 798 : 10955 : const DECountItem *const *t1 = (const DECountItem *const *) e1;
799 : 10955 : const DECountItem *const *t2 = (const DECountItem *const *) e2;
800 : :
5037 801 [ + + ]: 10955 : if ((*t1)->count < (*t2)->count)
802 : 5174 : return -1;
803 [ - + ]: 5781 : else if ((*t1)->count == (*t2)->count)
5037 tgl@sss.pgh.pa.us 804 :UBC 0 : return 0;
805 : : else
5037 tgl@sss.pgh.pa.us 806 :CBC 5781 : return 1;
807 : : }
|