Example #1
0
/*
 * Costing function.
 */
Datum
tsm_system_time_cost(PG_FUNCTION_ARGS)
{
	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
	Path		   *path = (Path *) PG_GETARG_POINTER(1);
	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
	List		   *args = (List *) PG_GETARG_POINTER(3);
	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
	double		   *tuples = (double *) PG_GETARG_POINTER(5);
	Node		   *limitnode;
	int32			time;
	BlockNumber		relpages;
	double			reltuples;
	double			density;
	double			spc_random_page_cost;

	limitnode = linitial(args);
	limitnode = estimate_expression_value(root, limitnode);

	if (IsA(limitnode, RelabelType))
		limitnode = (Node *) ((RelabelType *) limitnode)->arg;

	if (IsA(limitnode, Const))
		time = DatumGetInt32(((Const *) limitnode)->constvalue);
	else
	{
		/* Default time (1s) if the estimation didn't return Const. */
		time = 1000;
	}

	relpages = baserel->pages;
	reltuples = baserel->tuples;

	/* estimate the tuple density */
	if (relpages > 0)
		density = reltuples / (double) relpages;
	else
		density = (BLCKSZ - SizeOfPageHeaderData) / baserel->width;

	/*
	 * We equal random page cost value to number of ms it takes to read the
	 * random page here which is far from accurate but we don't have anything
	 * better to base our predicted page reads.
	 */
	get_tablespace_page_costs(baserel->reltablespace,
							  &spc_random_page_cost,
							  NULL);

	/*
	 * Assumption here is that we'll never read less than 1% of table pages,
	 * this is here mainly because it is much less bad to overestimate than
	 * underestimate and using just spc_random_page_cost will probably lead
	 * to underestimations in general.
	 */
	*pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
	*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
	path->rows = *tuples;

	PG_RETURN_VOID();
}
Example #2
0
/*
 * Sample size estimation.
 */
static void
system_time_samplescangetsamplesize(PlannerInfo *root,
									RelOptInfo *baserel,
									List *paramexprs,
									BlockNumber *pages,
									double *tuples)
{
	Node	   *limitnode;
	double		millis;
	double		spc_random_page_cost;
	double		npages;
	double		ntuples;

	/* Try to extract an estimate for the limit time spec */
	limitnode = (Node *) linitial(paramexprs);
	limitnode = estimate_expression_value(root, limitnode);

	if (IsA(limitnode, Const) &&
		!((Const *) limitnode)->constisnull)
	{
		millis = DatumGetFloat8(((Const *) limitnode)->constvalue);
		if (millis < 0 || isnan(millis))
		{
			/* Default millis if the value is bogus */
			millis = 1000;
		}
	}
	else
	{
		/* Default millis if we didn't obtain a non-null Const */
		millis = 1000;
	}

	/* Get the planner's idea of cost per page read */
	get_tablespace_page_costs(baserel->reltablespace,
							  &spc_random_page_cost,
							  NULL);

	/*
	 * Estimate the number of pages we can read by assuming that the cost
	 * figure is expressed in milliseconds.  This is completely, unmistakably
	 * bogus, but we have to do something to produce an estimate and there's
	 * no better answer.
	 */
	if (spc_random_page_cost > 0)
		npages = millis / spc_random_page_cost;
	else
		npages = millis;		/* even more bogus, but whatcha gonna do? */

	/* Clamp to sane value */
	npages = clamp_row_est(Min((double) baserel->pages, npages));

	if (baserel->tuples > 0 && baserel->pages > 0)
	{
		/* Estimate number of tuples returned based on tuple density */
		double		density = baserel->tuples / (double) baserel->pages;

		ntuples = npages * density;
	}
	else
	{
		/* For lack of data, assume one tuple per page */
		ntuples = npages;
	}

	/* Clamp to the estimated relation size */
	ntuples = clamp_row_est(Min(baserel->tuples, ntuples));

	*pages = npages;
	*tuples = ntuples;
}