예제 #1
0
Datum
textne(PG_FUNCTION_ARGS)
{
	Datum		arg1 = PG_GETARG_DATUM(0);
	Datum		arg2 = PG_GETARG_DATUM(1);
	bool		result;
	Size		len1,
				len2;

	/* See comment in texteq() */
	len1 = toast_raw_datum_size(arg1);
	len2 = toast_raw_datum_size(arg2);
	if (len1 != len2)
		result = true;
	else
	{
		text	   *targ1 = DatumGetTextPP(arg1);
		text	   *targ2 = DatumGetTextPP(arg2);

		result = (memcmp(VARDATA_ANY(targ1), VARDATA_ANY(targ2),
						 len1 - VARHDRSZ) != 0);

		PG_FREE_IF_COPY(targ1, 0);
		PG_FREE_IF_COPY(targ2, 1);
	}

	PG_RETURN_BOOL(result);
}
예제 #2
0
Datum
texteq(PG_FUNCTION_ARGS)
{
	Datum		arg1 = PG_GETARG_DATUM(0);
	Datum		arg2 = PG_GETARG_DATUM(1);
	bool		result;
	Size		len1,
				len2;

	/*
	 * Since we only care about equality or not-equality, we can avoid all the
	 * expense of strcoll() here, and just do bitwise comparison.  In fact, we
	 * don't even have to do a bitwise comparison if we can show the lengths
	 * of the strings are unequal; which might save us from having to detoast
	 * one or both values.
	 */
	len1 = toast_raw_datum_size(arg1);
	len2 = toast_raw_datum_size(arg2);
	if (len1 != len2)
		result = false;
	else
	{
		text	   *targ1 = DatumGetTextPP(arg1);
		text	   *targ2 = DatumGetTextPP(arg2);

		result = (memcmp(VARDATA_ANY(targ1), VARDATA_ANY(targ2),
						 len1 - VARHDRSZ) == 0);

		PG_FREE_IF_COPY(targ1, 0);
		PG_FREE_IF_COPY(targ2, 1);
	}

	PG_RETURN_BOOL(result);
}
예제 #3
0
파일: rowtypes.c 프로젝트: PJMODOS/postgres
/*
 * record_image_eq :
 *		  compares two records for identical contents, based on byte images
 * result :
 *		  returns true if the records are identical, false otherwise.
 *
 * Note: we do not use record_image_cmp here, since we can avoid
 * de-toasting for unequal lengths this way.
 */
Datum
record_image_eq(PG_FUNCTION_ARGS)
{
	HeapTupleHeader record1 = PG_GETARG_HEAPTUPLEHEADER(0);
	HeapTupleHeader record2 = PG_GETARG_HEAPTUPLEHEADER(1);
	bool		result = true;
	Oid			tupType1;
	Oid			tupType2;
	int32		tupTypmod1;
	int32		tupTypmod2;
	TupleDesc	tupdesc1;
	TupleDesc	tupdesc2;
	HeapTupleData tuple1;
	HeapTupleData tuple2;
	int			ncolumns1;
	int			ncolumns2;
	RecordCompareData *my_extra;
	int			ncols;
	Datum	   *values1;
	Datum	   *values2;
	bool	   *nulls1;
	bool	   *nulls2;
	int			i1;
	int			i2;
	int			j;

	/* Extract type info from the tuples */
	tupType1 = HeapTupleHeaderGetTypeId(record1);
	tupTypmod1 = HeapTupleHeaderGetTypMod(record1);
	tupdesc1 = lookup_rowtype_tupdesc(tupType1, tupTypmod1);
	ncolumns1 = tupdesc1->natts;
	tupType2 = HeapTupleHeaderGetTypeId(record2);
	tupTypmod2 = HeapTupleHeaderGetTypMod(record2);
	tupdesc2 = lookup_rowtype_tupdesc(tupType2, tupTypmod2);
	ncolumns2 = tupdesc2->natts;

	/* Build temporary HeapTuple control structures */
	tuple1.t_len = HeapTupleHeaderGetDatumLength(record1);
	ItemPointerSetInvalid(&(tuple1.t_self));
	tuple1.t_tableOid = InvalidOid;
	tuple1.t_data = record1;
	tuple2.t_len = HeapTupleHeaderGetDatumLength(record2);
	ItemPointerSetInvalid(&(tuple2.t_self));
	tuple2.t_tableOid = InvalidOid;
	tuple2.t_data = record2;

	/*
	 * We arrange to look up the needed comparison info just once per series
	 * of calls, assuming the record types don't change underneath us.
	 */
	ncols = Max(ncolumns1, ncolumns2);
	my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
	if (my_extra == NULL ||
		my_extra->ncolumns < ncols)
	{
		fcinfo->flinfo->fn_extra =
			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
							   offsetof(RecordCompareData, columns) +
							   ncols * sizeof(ColumnCompareData));
		my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
		my_extra->ncolumns = ncols;
		my_extra->record1_type = InvalidOid;
		my_extra->record1_typmod = 0;
		my_extra->record2_type = InvalidOid;
		my_extra->record2_typmod = 0;
	}

	if (my_extra->record1_type != tupType1 ||
		my_extra->record1_typmod != tupTypmod1 ||
		my_extra->record2_type != tupType2 ||
		my_extra->record2_typmod != tupTypmod2)
	{
		MemSet(my_extra->columns, 0, ncols * sizeof(ColumnCompareData));
		my_extra->record1_type = tupType1;
		my_extra->record1_typmod = tupTypmod1;
		my_extra->record2_type = tupType2;
		my_extra->record2_typmod = tupTypmod2;
	}

	/* Break down the tuples into fields */
	values1 = (Datum *) palloc(ncolumns1 * sizeof(Datum));
	nulls1 = (bool *) palloc(ncolumns1 * sizeof(bool));
	heap_deform_tuple(&tuple1, tupdesc1, values1, nulls1);
	values2 = (Datum *) palloc(ncolumns2 * sizeof(Datum));
	nulls2 = (bool *) palloc(ncolumns2 * sizeof(bool));
	heap_deform_tuple(&tuple2, tupdesc2, values2, nulls2);

	/*
	 * Scan corresponding columns, allowing for dropped columns in different
	 * places in the two rows.  i1 and i2 are physical column indexes, j is
	 * the logical column index.
	 */
	i1 = i2 = j = 0;
	while (i1 < ncolumns1 || i2 < ncolumns2)
	{
		/*
		 * Skip dropped columns
		 */
		if (i1 < ncolumns1 && tupdesc1->attrs[i1]->attisdropped)
		{
			i1++;
			continue;
		}
		if (i2 < ncolumns2 && tupdesc2->attrs[i2]->attisdropped)
		{
			i2++;
			continue;
		}
		if (i1 >= ncolumns1 || i2 >= ncolumns2)
			break;				/* we'll deal with mismatch below loop */

		/*
		 * Have two matching columns, they must be same type
		 */
		if (tupdesc1->attrs[i1]->atttypid !=
			tupdesc2->attrs[i2]->atttypid)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare dissimilar column types %s and %s at record column %d",
							format_type_be(tupdesc1->attrs[i1]->atttypid),
							format_type_be(tupdesc2->attrs[i2]->atttypid),
							j + 1)));

		/*
		 * We consider two NULLs equal; NULL > not-NULL.
		 */
		if (!nulls1[i1] || !nulls2[i2])
		{
			if (nulls1[i1] || nulls2[i2])
			{
				result = false;
				break;
			}

			/* Compare the pair of elements */
			if (tupdesc1->attrs[i1]->attlen == -1)
			{
				Size		len1,
							len2;

				len1 = toast_raw_datum_size(values1[i1]);
				len2 = toast_raw_datum_size(values2[i2]);
				/* No need to de-toast if lengths don't match. */
				if (len1 != len2)
					result = false;
				else
				{
					struct varlena *arg1val;
					struct varlena *arg2val;

					arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]);
					arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]);

					result = (memcmp(VARDATA_ANY(arg1val),
									 VARDATA_ANY(arg2val),
									 len1 - VARHDRSZ) == 0);

					/* Only free memory if it's a copy made here. */
					if ((Pointer) arg1val != (Pointer) values1[i1])
						pfree(arg1val);
					if ((Pointer) arg2val != (Pointer) values2[i2])
						pfree(arg2val);
				}
			}
			else if (tupdesc1->attrs[i1]->attbyval)
			{
				switch (tupdesc1->attrs[i1]->attlen)
				{
					case 1:
						result = (GET_1_BYTE(values1[i1]) ==
								  GET_1_BYTE(values2[i2]));
						break;
					case 2:
						result = (GET_2_BYTES(values1[i1]) ==
								  GET_2_BYTES(values2[i2]));
						break;
					case 4:
						result = (GET_4_BYTES(values1[i1]) ==
								  GET_4_BYTES(values2[i2]));
						break;
#if SIZEOF_DATUM == 8
					case 8:
						result = (GET_8_BYTES(values1[i1]) ==
								  GET_8_BYTES(values2[i2]));
						break;
#endif
					default:
						Assert(false);	/* cannot happen */
				}
			}
			else
			{
				result = (memcmp(DatumGetPointer(values1[i1]),
								 DatumGetPointer(values2[i2]),
								 tupdesc1->attrs[i1]->attlen) == 0);
			}
			if (!result)
				break;
		}

		/* equal, so continue to next column */
		i1++, i2++, j++;
	}

	/*
	 * If we didn't break out of the loop early, check for column count
	 * mismatch.  (We do not report such mismatch if we found unequal column
	 * values; is that a feature or a bug?)
	 */
	if (result)
	{
		if (i1 != ncolumns1 || i2 != ncolumns2)
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("cannot compare record types with different numbers of columns")));
	}

	pfree(values1);
	pfree(nulls1);
	pfree(values2);
	pfree(nulls2);
	ReleaseTupleDesc(tupdesc1);
	ReleaseTupleDesc(tupdesc2);

	/* Avoid leaking memory when handed toasted input. */
	PG_FREE_IF_COPY(record1, 0);
	PG_FREE_IF_COPY(record2, 1);

	PG_RETURN_BOOL(result);
}
예제 #4
0
/*
 * compute_array_stats() -- compute statistics for an array column
 *
 * This function computes statistics useful for determining selectivity of
 * the array operators <@, &&, and @>.  It is invoked by ANALYZE via the
 * compute_stats hook after sample rows have been collected.
 *
 * We also invoke the standard compute_stats function, which will compute
 * "scalar" statistics relevant to the btree-style array comparison operators.
 * However, exact duplicates of an entire array may be rare despite many
 * arrays sharing individual elements.  This especially afflicts long arrays,
 * which are also liable to lack all scalar statistics due to the low
 * WIDTH_THRESHOLD used in analyze.c.  So, in addition to the standard stats,
 * we find the most common array elements and compute a histogram of distinct
 * element counts.
 *
 * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
 * frequency counts over data streams" by G. S. Manku and R. Motwani, in
 * Proceedings of the 28th International Conference on Very Large Data Bases,
 * Hong Kong, China, August 2002, section 4.2. The paper is available at
 * http://www.vldb.org/conf/2002/S10P03.pdf
 *
 * The Lossy Counting (aka LC) algorithm goes like this:
 * Let s be the threshold frequency for an item (the minimum frequency we
 * are interested in) and epsilon the error margin for the frequency. Let D
 * be a set of triples (e, f, delta), where e is an element value, f is that
 * element's frequency (actually, its current occurrence count) and delta is
 * the maximum error in f. We start with D empty and process the elements in
 * batches of size w. (The batch size is also known as "bucket size" and is
 * equal to 1/epsilon.) Let the current batch number be b_current, starting
 * with 1. For each element e we either increment its f count, if it's
 * already in D, or insert a new___ triple into D with values (e, 1, b_current
 * - 1). After processing each batch we prune D, by removing from it all
 * elements with f + delta <= b_current.  After the algorithm finishes we
 * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
 * where N is the total number of elements in the input.  We emit the
 * remaining elements with estimated frequency f/N.  The LC paper proves
 * that this algorithm finds all elements with true frequency at least s,
 * and that no frequency is overestimated or is underestimated by more than
 * epsilon.  Furthermore, given reasonable assumptions about the input
 * distribution, the required table size is no more than about 7 times w.
 *
 * In the absence of a principled basis for other particular values, we
 * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
 * But we leave out the correction for stopwords, which do not apply to
 * arrays.  These parameters give bucket width w = K/0.007 and maximum
 * expected hashtable size of about 1000 * K.
 *
 * Elements may repeat within an array.  Since duplicates do not change the
 * behavior of <@, && or @>, we want to count each element only once per
 * array.  Therefore, we store in the finished pg_statistic entry each
 * element's frequency as the fraction of all non-null rows that contain it.
 * We divide the raw counts by nonnull_cnt to get those figures.
 */
static void
compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
					int samplerows, double totalrows)
{
	ArrayAnalyzeExtraData *extra_data;
	int			num_mcelem;
	int			null_cnt = 0;
	int			null_elem_cnt = 0;
	int			analyzed_rows = 0;

	/* This is D from the LC algorithm. */
	HTAB	   *elements_tab;
	HASHCTL		elem_hash_ctl;
	HASH_SEQ_STATUS scan_status;

	/* This is the current bucket number from the LC algorithm */
	int			b_current;

	/* This is 'w' from the LC algorithm */
	int			bucket_width;
	int			array_no;
	int64		element_no;
	TrackItem  *item;
	int			slot_idx;
	HTAB	   *count_tab;
	HASHCTL		count_hash_ctl;
	DECountItem *count_item;

	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;

	/*
	 * Invoke analyze.c's standard analysis function to create scalar-style
	 * stats for the column.  It will expect its own extra_data pointer, so
	 * temporarily install that.
	 */
	stats->extra_data = extra_data->std_extra_data;
	(*extra_data->std_compute_stats) (stats, fetchfunc, samplerows, totalrows);
	stats->extra_data = extra_data;

	/*
	 * Set up static pointer for use by subroutines.  We wait till here in
	 * case std_compute_stats somehow recursively invokes us (probably not
	 * possible, but ...)
	 */
	array_extra_data = extra_data;

	/*
	 * We want statistics_target * 10 elements in the MCELEM array. This
	 * multiplier is pretty arbitrary, but is meant to reflect the fact that
	 * the number of individual elements tracked in pg_statistic ought to be
	 * more than the number of values for a simple scalar column.
	 */
	num_mcelem = stats->attr->attstattarget * 10;

	/*
	 * We set bucket width equal to num_mcelem / 0.007 as per the comment
	 * above.
	 */
	bucket_width = num_mcelem * 1000 / 7;

	/*
	 * Create the hashtable. It will be in local memory, so we don't need to
	 * worry about overflowing the initial size. Also we don't need to pay any
	 * attention to locking and memory management.
	 */
	MemSet(&elem_hash_ctl, 0, sizeof(elem_hash_ctl));
	elem_hash_ctl.keysize = sizeof(Datum);
	elem_hash_ctl.entrysize = sizeof(TrackItem);
	elem_hash_ctl.hash = element_hash;
	elem_hash_ctl.match = element_match;
	elem_hash_ctl.hcxt = CurrentMemoryContext;
	elements_tab = hash_create("Analyzed elements table",
							   num_mcelem,
							   &elem_hash_ctl,
					HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);

	/* hashtable for array distinct elements counts */
	MemSet(&count_hash_ctl, 0, sizeof(count_hash_ctl));
	count_hash_ctl.keysize = sizeof(int);
	count_hash_ctl.entrysize = sizeof(DECountItem);
	count_hash_ctl.hcxt = CurrentMemoryContext;
	count_tab = hash_create("Array distinct element count table",
							64,
							&count_hash_ctl,
							HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);

	/* Initialize counters. */
	b_current = 1;
	element_no = 0;

	/* Loop over the arrays. */
	for (array_no = 0; array_no < samplerows; array_no++)
	{
		Datum		value;
		bool		isnull;
		ArrayType  *array;
		int			num_elems;
		Datum	   *elem_values;
		bool	   *elem_nulls;
		bool		null_present;
		int			j;
		int64		prev_element_no = element_no;
		int			distinct_count;
		bool		count_item_found;

		vacuum_delay_point();

		value = fetchfunc(stats, array_no, &isnull);
		if (isnull)
		{
			/* array is null, just count that */
			null_cnt++;
			continue;
		}

		/* Skip too-large values. */
		if (toast_raw_datum_size(value) > ARRAY_WIDTH_THRESHOLD)
			continue;
		else
			analyzed_rows++;

		/*
		 * Now detoast the array if needed, and deconstruct into datums.
		 */
		array = DatumGetArrayTypeP(value);

		Assert(ARR_ELEMTYPE(array) == extra_data->type_id);
		deconstruct_array(array,
						  extra_data->type_id,
						  extra_data->typlen,
						  extra_data->typbyval,
						  extra_data->typalign,
						  &elem_values, &elem_nulls, &num_elems);

		/*
		 * We loop through the elements in the array and add them to our
		 * tracking hashtable.
		 */
		null_present = false;
		for (j = 0; j < num_elems; j++)
		{
			Datum		elem_value;
			bool		found;

			/* No null element processing other than flag setting here */
			if (elem_nulls[j])
			{
				null_present = true;
				continue;
			}

			/* Lookup current element in hashtable, adding it if new___ */
			elem_value = elem_values[j];
			item = (TrackItem *) hash_search(elements_tab,
											 (const void *) &elem_value,
											 HASH_ENTER, &found);

			if (found)
			{
				/* The element value is already on the tracking list */

				/*
				 * The operators we assist ignore duplicate array elements, so
				 * count a given distinct element only once per array.
				 */
				if (item->last_container == array_no)
					continue;

				item->frequency++;
				item->last_container = array_no;
			}
			else
			{
				/* Initialize new___ tracking list element */

				/*
				 * If element type is pass-by-reference, we must copy it into
				 * palloc'd space, so that we can release the array below. (We
				 * do this so that the space needed for element values is
				 * limited by the size of the hashtable; if we kept all the
				 * array values around, it could be much more.)
				 */
				item->key = datumCopy(elem_value,
									  extra_data->typbyval,
									  extra_data->typlen);

				item->frequency = 1;
				item->delta = b_current - 1;
				item->last_container = array_no;
			}

			/* element_no is the number of elements processed (ie N) */
			element_no++;

			/* We prune the D structure after processing each bucket */
			if (element_no % bucket_width == 0)
			{
				prune_element_hashtable(elements_tab, b_current);
				b_current++;
			}
		}

		/* Count null element presence once per array. */
		if (null_present)
			null_elem_cnt++;

		/* Update frequency of the particular array distinct element count. */
		distinct_count = (int) (element_no - prev_element_no);
		count_item = (DECountItem *) hash_search(count_tab, &distinct_count,
												 HASH_ENTER,
												 &count_item_found);

		if (count_item_found)
			count_item->frequency++;
		else
			count_item->frequency = 1;

		/* Free memory allocated while detoasting. */
		if (PointerGetDatum(array) != value)
			pfree(array);
		pfree(elem_values);
		pfree(elem_nulls);
	}

	/* Skip pg_statistic slots occupied by standard statistics */
	slot_idx = 0;
	while (slot_idx < STATISTIC_NUM_SLOTS && stats->stakind[slot_idx] != 0)
		slot_idx++;
	if (slot_idx > STATISTIC_NUM_SLOTS - 2)
		elog(ERROR, "insufficient pg_statistic slots for array stats");

	/* We can only compute real stats if we found some non-null values. */
	if (analyzed_rows > 0)
	{
		int			nonnull_cnt = analyzed_rows;
		int			count_items_count;
		int			i;
		TrackItem **sort_table;
		int			track_len;
		int64		cutoff_freq;
		int64		minfreq,
					maxfreq;

		/*
		 * We assume the standard stats code already took care of setting
		 * stats_valid, stanullfrac, stawidth, stadistinct.  We'd have to
		 * re-compute those values if we wanted to not store the standard
		 * stats.
		 */

		/*
		 * Construct an array of the interesting hashtable items, that is,
		 * those meeting the cutoff frequency (s - epsilon)*N.  Also identify
		 * the minimum and maximum frequencies among these items.
		 *
		 * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
		 * frequency is 9*N / bucket_width.
		 */
		cutoff_freq = 9 * element_no / bucket_width;

		i = hash_get_num_entries(elements_tab); /* surely enough space */
		sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);

		hash_seq_init(&scan_status, elements_tab);
		track_len = 0;
		minfreq = element_no;
		maxfreq = 0;
		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
		{
			if (item->frequency > cutoff_freq)
			{
				sort_table[track_len++] = item;
				minfreq = Min(minfreq, item->frequency);
				maxfreq = Max(maxfreq, item->frequency);
			}
		}
		Assert(track_len <= i);

		/* emit some statistics for debug purposes */
		elog(DEBUG3, "compute_array_stats: target # mces = %d, "
			 "bucket width = %d, "
			 "# elements = " INT64_FORMAT ", hashtable size = %d, "
			 "usable entries = %d",
			 num_mcelem, bucket_width, element_no, i, track_len);

		/*
		 * If we obtained more elements than we really want, get rid of those
		 * with least frequencies.  The easiest way is to qsort the array into
		 * descending frequency order and truncate the array.
		 */
		if (num_mcelem < track_len)
		{
			qsort(sort_table, track_len, sizeof(TrackItem *),
				  trackitem_compare_frequencies_desc);
			/* reset minfreq to the smallest frequency we're keeping */
			minfreq = sort_table[num_mcelem - 1]->frequency;
		}
		else
			num_mcelem = track_len;

		/* Generate MCELEM slot entry */
		if (num_mcelem > 0)
		{
			MemoryContext old_context;
			Datum	   *mcelem_values;
			float4	   *mcelem_freqs;

			/*
			 * We want to store statistics sorted on the element value using
			 * the element type's default comparison function.  This permits
			 * fast binary searches in selectivity estimation functions.
			 */
			qsort(sort_table, num_mcelem, sizeof(TrackItem *),
				  trackitem_compare_element);

			/* Must copy the target values into anl_context */
			old_context = MemoryContextSwitchTo(stats->anl_context);

			/*
			 * We sorted statistics on the element value, but we want to be
			 * able to find the minimal and maximal frequencies without going
			 * through all the values.  We also want the frequency of null
			 * elements.  Store these three values at the end of mcelem_freqs.
			 */
			mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
			mcelem_freqs = (float4 *) palloc((num_mcelem + 3) * sizeof(float4));

			/*
			 * See comments above about use of nonnull_cnt as the divisor for
			 * the final frequency estimates.
			 */
			for (i = 0; i < num_mcelem; i++)
			{
				TrackItem  *item = sort_table[i];

				mcelem_values[i] = datumCopy(item->key,
											 extra_data->typbyval,
											 extra_data->typlen);
				mcelem_freqs[i] = (double) item->frequency /
					(double) nonnull_cnt;
			}
			mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
			mcelem_freqs[i++] = (double) maxfreq / (double) nonnull_cnt;
			mcelem_freqs[i++] = (double) null_elem_cnt / (double) nonnull_cnt;

			MemoryContextSwitchTo(old_context);

			stats->stakind[slot_idx] = STATISTIC_KIND_MCELEM;
			stats->staop[slot_idx] = extra_data->eq_opr;
			stats->stanumbers[slot_idx] = mcelem_freqs;
			/* See above comment about extra stanumber entries */
			stats->numnumbers[slot_idx] = num_mcelem + 3;
			stats->stavalues[slot_idx] = mcelem_values;
			stats->numvalues[slot_idx] = num_mcelem;
			/* We are storing values of element type */
			stats->statypid[slot_idx] = extra_data->type_id;
			stats->statyplen[slot_idx] = extra_data->typlen;
			stats->statypbyval[slot_idx] = extra_data->typbyval;
			stats->statypalign[slot_idx] = extra_data->typalign;
			slot_idx++;
		}

		/* Generate DECHIST slot entry */
		count_items_count = hash_get_num_entries(count_tab);
		if (count_items_count > 0)
		{
			int			num_hist = stats->attr->attstattarget;
			DECountItem **sorted_count_items;
			int			j;
			int			delta;
			int64		frac;
			float4	   *hist;

			/* num_hist must be at least 2 for the loop below to work */
			num_hist = Max(num_hist, 2);

			/*
			 * Create an array of DECountItem pointers, and sort them into
			 * increasing count order.
			 */
			sorted_count_items = (DECountItem **)
				palloc(sizeof(DECountItem *) * count_items_count);
			hash_seq_init(&scan_status, count_tab);
			j = 0;
			while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
			{
				sorted_count_items[j++] = count_item;
			}
			qsort(sorted_count_items, count_items_count,
				  sizeof(DECountItem *), countitem_compare_count);

			/*
			 * Prepare to fill stanumbers with the histogram, followed by the
			 * average count.  This array must be stored in anl_context.
			 */
			hist = (float4 *)
				MemoryContextAlloc(stats->anl_context,
								   sizeof(float4) * (num_hist + 1));
			hist[num_hist] = (double) element_no / (double) nonnull_cnt;

			/*----------
			 * Construct the histogram of distinct-element counts (DECs).
			 *
			 * The object of this loop is to copy the min and max DECs to
			 * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
			 * in between (where "evenly-spaced" is with reference to the
			 * whole input population of arrays).  If we had a complete sorted
			 * array of DECs, one per analyzed row, the i'th hist value would
			 * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
			 * (compare the histogram-making loop in compute_scalar_stats()).
			 * But instead of that we have the sorted_count_items[] array,
			 * which holds unique DEC values with their frequencies (that is,
			 * a run-length-compressed version of the full array).  So we
			 * control advancing through sorted_count_items[] with the
			 * variable "frac", which is defined as (x - y) * (num_hist - 1),
			 * where x is the index in the notional DECs array corresponding
			 * to the start of the next sorted_count_items[] element's run,
			 * and y is the index in DECs from which we should take the next
			 * histogram value.  We have to advance whenever x <= y, that is
			 * frac <= 0.  The x component is the sum of the frequencies seen
			 * so far (up through the current sorted_count_items[] element),
			 * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
			 * per the subscript calculation above.  (The subscript calculation
			 * implies dropping any fractional part of y; in this formulation
			 * that's handled by not advancing until frac reaches 1.)
			 *
			 * Even though frac has a bounded range, it could overflow int32
			 * when working with very large statistics targets, so we do that
			 * math in int64.
			 *----------
			 */
			delta = analyzed_rows - 1;
			j = 0;				/* current index in sorted_count_items */
			/* Initialize frac for sorted_count_items[0]; y is initially 0 */
			frac = (int64) sorted_count_items[0]->frequency * (num_hist - 1);
			for (i = 0; i < num_hist; i++)
			{
				while (frac <= 0)
				{
					/* Advance, and update x component of frac */
					j++;
					frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
				}
				hist[i] = sorted_count_items[j]->count;
				frac -= delta;	/* update y for upcoming i increment */
			}
			Assert(j == count_items_count - 1);

			stats->stakind[slot_idx] = STATISTIC_KIND_DECHIST;
			stats->staop[slot_idx] = extra_data->eq_opr;
			stats->stanumbers[slot_idx] = hist;
			stats->numnumbers[slot_idx] = num_hist + 1;
			slot_idx++;
		}
	}

	/*
	 * We don't need to bother cleaning up any of our temporary palloc's. The
	 * hashtable should also go away, as it used a child memory context.
	 */
}