/*
 * Costing function.
 */
Datum
tsm_system_time_cost(PG_FUNCTION_ARGS)
{
	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
	Path		   *path = (Path *) PG_GETARG_POINTER(1);
	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
	List		   *args = (List *) PG_GETARG_POINTER(3);
	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
	double		   *tuples = (double *) PG_GETARG_POINTER(5);
	Node		   *limitnode;
	int32			time;
	BlockNumber		relpages;
	double			reltuples;
	double			density;
	double			spc_random_page_cost;

	limitnode = linitial(args);
	limitnode = estimate_expression_value(root, limitnode);

	if (IsA(limitnode, RelabelType))
		limitnode = (Node *) ((RelabelType *) limitnode)->arg;

	if (IsA(limitnode, Const))
		time = DatumGetInt32(((Const *) limitnode)->constvalue);
	else
	{
		/* Default time (1s) if the estimation didn't return Const. */
		time = 1000;
	}

	relpages = baserel->pages;
	reltuples = baserel->tuples;

	/* estimate the tuple density */
	if (relpages > 0)
		density = reltuples / (double) relpages;
	else
		density = (BLCKSZ - SizeOfPageHeaderData) / baserel->width;

	/*
	 * We equal random page cost value to number of ms it takes to read the
	 * random page here which is far from accurate but we don't have anything
	 * better to base our predicted page reads.
	 */
	get_tablespace_page_costs(baserel->reltablespace,
							  &spc_random_page_cost,
							  NULL);

	/*
	 * Assumption here is that we'll never read less than 1% of table pages,
	 * this is here mainly because it is much less bad to overestimate than
	 * underestimate and using just spc_random_page_cost will probably lead
	 * to underestimations in general.
	 */
	*pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
	*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
	path->rows = *tuples;

	PG_RETURN_VOID();
}
/*
 * Sample size estimation.
 */
static void
system_rows_samplescangetsamplesize(PlannerInfo *root,
									RelOptInfo *baserel,
									List *paramexprs,
									BlockNumber *pages,
									double *tuples)
{
	Node	   *limitnode;
	int64		ntuples;
	double		npages;

	/* Try to extract an estimate for the limit rowcount */
	limitnode = (Node *) linitial(paramexprs);
	limitnode = estimate_expression_value(root, limitnode);

	if (IsA(limitnode, Const) &&
		!((Const *) limitnode)->constisnull)
	{
		ntuples = DatumGetInt64(((Const *) limitnode)->constvalue);
		if (ntuples < 0)
		{
			/* Default ntuples if the value is bogus */
			ntuples = 1000;
		}
	}
	else
	{
		/* Default ntuples if we didn't obtain a non-null Const */
		ntuples = 1000;
	}

	/* Clamp to the estimated relation size */
	if (ntuples > baserel->tuples)
		ntuples = (int64) baserel->tuples;
	ntuples = clamp_row_est(ntuples);

	if (baserel->tuples > 0 && baserel->pages > 0)
	{
		/* Estimate number of pages visited based on tuple density */
		double		density = baserel->tuples / (double) baserel->pages;

		npages = ntuples / density;
	}
	else
	{
		/* For lack of data, assume one tuple per page */
		npages = ntuples;
	}

	/* Clamp to sane value */
	npages = clamp_row_est(Min((double) baserel->pages, npages));

	*pages = npages;
	*tuples = ntuples;
}
Exemple #3
0
/*
 * Sample size estimation.
 */
static void
system_samplescangetsamplesize(PlannerInfo *root,
							   RelOptInfo *baserel,
							   List *paramexprs,
							   BlockNumber *pages,
							   double *tuples)
{
	Node	   *pctnode;
	float4		samplefract;

	/* Try to extract an estimate for the sample percentage */
	pctnode = (Node *) linitial(paramexprs);
	pctnode = estimate_expression_value(root, pctnode);

	if (IsA(pctnode, Const) &&
		!((Const *) pctnode)->constisnull)
	{
		samplefract = DatumGetFloat4(((Const *) pctnode)->constvalue);
		if (samplefract >= 0 && samplefract <= 100 && !isnan(samplefract))
			samplefract /= 100.0f;
		else
		{
			/* Default samplefract if the value is bogus */
			samplefract = 0.1f;
		}
	}
	else
	{
		/* Default samplefract if we didn't obtain a non-null Const */
		samplefract = 0.1f;
	}

	/* We'll visit a sample of the pages ... */
	*pages = clamp_row_est(baserel->pages * samplefract);

	/* ... and hopefully get a representative number of tuples from them */
	*tuples = clamp_row_est(baserel->tuples * samplefract);
}
Exemple #4
0
/*
 * Costing function.
 */
Datum
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
{
	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
	Path		   *path = (Path *) PG_GETARG_POINTER(1);
	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
	List		   *args = (List *) PG_GETARG_POINTER(3);
	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
	double		   *tuples = (double *) PG_GETARG_POINTER(5);
	Node		   *pctnode;
	float4			samplesize;

	*pages = baserel->pages;

	pctnode = linitial(args);
	pctnode = estimate_expression_value(root, pctnode);

	if (IsA(pctnode, RelabelType))
		pctnode = (Node *) ((RelabelType *) pctnode)->arg;

	if (IsA(pctnode, Const))
	{
		samplesize = DatumGetFloat4(((Const *) pctnode)->constvalue);
		samplesize /= 100.0;
	}
	else
	{
		/* Default samplesize if the estimation didn't return Const. */
		samplesize = 0.1f;
	}

	*tuples = path->rows * samplesize;
	path->rows = *tuples;

	PG_RETURN_VOID();
}
/*
 * Sample size estimation.
 */
static void
system_time_samplescangetsamplesize(PlannerInfo *root,
									RelOptInfo *baserel,
									List *paramexprs,
									BlockNumber *pages,
									double *tuples)
{
	Node	   *limitnode;
	double		millis;
	double		spc_random_page_cost;
	double		npages;
	double		ntuples;

	/* Try to extract an estimate for the limit time spec */
	limitnode = (Node *) linitial(paramexprs);
	limitnode = estimate_expression_value(root, limitnode);

	if (IsA(limitnode, Const) &&
		!((Const *) limitnode)->constisnull)
	{
		millis = DatumGetFloat8(((Const *) limitnode)->constvalue);
		if (millis < 0 || isnan(millis))
		{
			/* Default millis if the value is bogus */
			millis = 1000;
		}
	}
	else
	{
		/* Default millis if we didn't obtain a non-null Const */
		millis = 1000;
	}

	/* Get the planner's idea of cost per page read */
	get_tablespace_page_costs(baserel->reltablespace,
							  &spc_random_page_cost,
							  NULL);

	/*
	 * Estimate the number of pages we can read by assuming that the cost
	 * figure is expressed in milliseconds.  This is completely, unmistakably
	 * bogus, but we have to do something to produce an estimate and there's
	 * no better answer.
	 */
	if (spc_random_page_cost > 0)
		npages = millis / spc_random_page_cost;
	else
		npages = millis;		/* even more bogus, but whatcha gonna do? */

	/* Clamp to sane value */
	npages = clamp_row_est(Min((double) baserel->pages, npages));

	if (baserel->tuples > 0 && baserel->pages > 0)
	{
		/* Estimate number of tuples returned based on tuple density */
		double		density = baserel->tuples / (double) baserel->pages;

		ntuples = npages * density;
	}
	else
	{
		/* For lack of data, assume one tuple per page */
		ntuples = npages;
	}

	/* Clamp to the estimated relation size */
	ntuples = clamp_row_est(Min(baserel->tuples, ntuples));

	*pages = npages;
	*tuples = ntuples;
}