/* * Sample size estimation. */ static void system_rows_samplescangetsamplesize(PlannerInfo *root, RelOptInfo *baserel, List *paramexprs, BlockNumber *pages, double *tuples) { Node *limitnode; int64 ntuples; double npages; /* Try to extract an estimate for the limit rowcount */ limitnode = (Node *) linitial(paramexprs); limitnode = estimate_expression_value(root, limitnode); if (IsA(limitnode, Const) && !((Const *) limitnode)->constisnull) { ntuples = DatumGetInt64(((Const *) limitnode)->constvalue); if (ntuples < 0) { /* Default ntuples if the value is bogus */ ntuples = 1000; } } else { /* Default ntuples if we didn't obtain a non-null Const */ ntuples = 1000; } /* Clamp to the estimated relation size */ if (ntuples > baserel->tuples) ntuples = (int64) baserel->tuples; ntuples = clamp_row_est(ntuples); if (baserel->tuples > 0 && baserel->pages > 0) { /* Estimate number of pages visited based on tuple density */ double density = baserel->tuples / (double) baserel->pages; npages = ntuples / density; } else { /* For lack of data, assume one tuple per page */ npages = ntuples; } /* Clamp to sane value */ npages = clamp_row_est(Min((double) baserel->pages, npages)); *pages = npages; *tuples = ntuples; }
/* * adjust_rows: tweak estimated row numbers according to the hint. */ static double adjust_rows(double rows, RowsHint *hint) { double result = 0.0; /* keep compiler quiet */ if (hint->value_type == RVT_ABSOLUTE) result = hint->rows; else if (hint->value_type == RVT_ADD) result = rows + hint->rows; else if (hint->value_type == RVT_SUB) result = rows - hint->rows; else if (hint->value_type == RVT_MULTI) result = rows * hint->rows; else Assert(false); /* unrecognized rows value type */ hint->base.state = HINT_STATE_USED; if (result < 1.0) ereport(WARNING, (errmsg("Force estimate to be at least one row, to avoid possible divide-by-zero when interpolating costs : %s", hint->base.hint_str))); result = clamp_row_est(result); elog(DEBUG1, "adjusted rows %d to %d", (int) rows, (int) result); return result; }
/* * Sample size estimation. */ static void system_samplescangetsamplesize(PlannerInfo *root, RelOptInfo *baserel, List *paramexprs, BlockNumber *pages, double *tuples) { Node *pctnode; float4 samplefract; /* Try to extract an estimate for the sample percentage */ pctnode = (Node *) linitial(paramexprs); pctnode = estimate_expression_value(root, pctnode); if (IsA(pctnode, Const) && !((Const *) pctnode)->constisnull) { samplefract = DatumGetFloat4(((Const *) pctnode)->constvalue); if (samplefract >= 0 && samplefract <= 100 && !isnan(samplefract)) samplefract /= 100.0f; else { /* Default samplefract if the value is bogus */ samplefract = 0.1f; } } else { /* Default samplefract if we didn't obtain a non-null Const */ samplefract = 0.1f; } /* We'll visit a sample of the pages ... */ *pages = clamp_row_est(baserel->pages * samplefract); /* ... and hopefully get a representative number of tuples from them */ *tuples = clamp_row_est(baserel->tuples * samplefract); }
/* * Sample size estimation. */ static void system_time_samplescangetsamplesize(PlannerInfo *root, RelOptInfo *baserel, List *paramexprs, BlockNumber *pages, double *tuples) { Node *limitnode; double millis; double spc_random_page_cost; double npages; double ntuples; /* Try to extract an estimate for the limit time spec */ limitnode = (Node *) linitial(paramexprs); limitnode = estimate_expression_value(root, limitnode); if (IsA(limitnode, Const) && !((Const *) limitnode)->constisnull) { millis = DatumGetFloat8(((Const *) limitnode)->constvalue); if (millis < 0 || isnan(millis)) { /* Default millis if the value is bogus */ millis = 1000; } } else { /* Default millis if we didn't obtain a non-null Const */ millis = 1000; } /* Get the planner's idea of cost per page read */ get_tablespace_page_costs(baserel->reltablespace, &spc_random_page_cost, NULL); /* * Estimate the number of pages we can read by assuming that the cost * figure is expressed in milliseconds. This is completely, unmistakably * bogus, but we have to do something to produce an estimate and there's * no better answer. */ if (spc_random_page_cost > 0) npages = millis / spc_random_page_cost; else npages = millis; /* even more bogus, but whatcha gonna do? */ /* Clamp to sane value */ npages = clamp_row_est(Min((double) baserel->pages, npages)); if (baserel->tuples > 0 && baserel->pages > 0) { /* Estimate number of tuples returned based on tuple density */ double density = baserel->tuples / (double) baserel->pages; ntuples = npages * density; } else { /* For lack of data, assume one tuple per page */ ntuples = npages; } /* Clamp to the estimated relation size */ ntuples = clamp_row_est(Min(baserel->tuples, ntuples)); *pages = npages; *tuples = ntuples; }