コード例 #1
0
ファイル: infogap.c プロジェクト: montyvesselinov/MADS
int infogap_obs( struct opt_data *op )
{
	int i, j, ig_index, status, success = 0, count, neval_total, njac_total, no_memory = 0;
	double *opt_params, phi_min = HUGE_VAL;
	int ( *optimize_func )( struct opt_data * op );
	if( ( opt_params = ( double * ) malloc( op->pd->nOptParam * sizeof( double ) ) ) == NULL ) no_memory = 1;
	if( no_memory ) { tprintf( "Not enough memory!\n" ); return( 0 ); }
	tprintf( "\n\nInfo-gap analysis: observation step %g observation domain %g\nInfo-gap search: ", op->cd->obsstep, op->cd->obsdomain );
	if( op->cd->obsstep > DBL_EPSILON ) tprintf( "maximum\n" ); else tprintf( "minimum\n" );
	tprintf( "Number of predictions %d\n", op->preds->nTObs );
	for( i = 0; i < op->preds->nTObs; i++ )
	{
		// op->preds->obs_best are updated in mads_func.c
		if( op->cd->obsstep > DBL_EPSILON ) op->preds->obs_best[i] = -HUGE_VAL; // max search
		else op->preds->obs_best[i] = HUGE_VAL; // min search
		j = op->preds->obs_index[i];
		op->od->obs_weight[j] *= -1;
	}
	ig_index = op->preds->obs_index[0]; // first prediction is applied only
	tprintf( "Info-gap observation:\n" );
	tprintf( "%-20s: info-gap target %12g weight %12g range %12g - %12g\n", op->od->obs_id[ig_index], op->od->obs_target[ig_index], op->od->obs_weight[ig_index], op->od->obs_min[ig_index], op->od->obs_max[ig_index] );
	if( op->cd->obsstep > DBL_EPSILON ) { op->od->obs_target[ig_index] = op->od->obs_min[ig_index]; op->od->obs_min[ig_index] -= op->cd->obsstep / 2; } // obsstep is positive
	else { op->od->obs_target[ig_index] = op->od->obs_max[ig_index]; op->od->obs_max[ig_index] -= op->cd->obsstep / 2; } // obsstep is negative
	if( strncasecmp( op->cd->opt_method, "lm", 2 ) == 0 ) optimize_func = optimize_lm; // Define optimization method: LM
	else optimize_func = optimize_pso; // Define optimization method: PSO
	neval_total = njac_total = count = 0;
	while( 1 )
	{
		tprintf( "\nInfo-gap analysis #%d\n", ++count );
		tprintf( "%-20s: info-gap target %12g weight %12g range %12g - %12g\n", op->od->obs_id[ig_index], op->od->obs_target[ig_index], op->od->obs_weight[ig_index], op->od->obs_min[ig_index], op->od->obs_max[ig_index] );
		op->cd->neval = op->cd->njac = 0;
		if( op->cd->analysis_type == IGRND ) status = igrnd( op );
		else status = optimize_func( op );
		neval_total += op->cd->neval;
		njac_total += op->cd->njac;
		if( !status ) break;
		if( op->success )
		{
			for( i = 0; i < op->pd->nOptParam; i++ ) opt_params[i] = op->pd->var[op->pd->var_index[i]];
			for( i = 0; i < op->od->nTObs; i++ ) op->od->obs_best[i] = op->od->obs_current[i];
			phi_min = op->phi;
			success = 1;
		}
		tprintf( "Intermediate info-gap results for model predictions:\n" );
		tprintf( "%-20s: info-gap target %12g weight %12g range %12g - %12g\n", op->od->obs_id[ig_index], op->od->obs_target[ig_index], op->od->obs_weight[ig_index], op->od->obs_min[ig_index], op->od->obs_max[ig_index] );
		for( i = 0; i < op->preds->nTObs; i++ )
		{
			j = op->preds->obs_index[i];
			if( op->cd->obsstep > DBL_EPSILON ) tprintf( "%-20s: Current info-gap max %12g Observation step %g Observation domain %g Success %d\n", op->od->obs_id[j], op->preds->obs_best[i], op->cd->obsstep, op->cd->obsdomain, op->success );
			else                           tprintf( "%-20s: Current info-gap min %12g Observation step %g Observation domain %g Success %d\n", op->od->obs_id[j], op->preds->obs_best[i], op->cd->obsstep, op->cd->obsdomain, op->success );
		}
		if( !op->success ) break;
		if( op->cd->debug ) print_results( op, 1 );
		save_results( 1, "infogap", op, op->gd );
		op->od->obs_target[ig_index] += op->cd->obsstep;
		if( op->cd->obsstep > DBL_EPSILON ) // max search
		{
			if( op->od->obs_target[ig_index] > op->od->obs_max[ig_index] ) break;
			if( fabs( op->preds->obs_best[0] - op->od->obs_max[ig_index] ) < DBL_EPSILON ) break;
			op->od->obs_min[ig_index] += op->cd->obsstep;
			j = ( int )( ( double )( op->preds->obs_best[0] - op->od->obs_min[ig_index] + op->cd->obsstep / 2 ) / op->cd->obsstep + 1 );
			op->od->obs_target[ig_index] += op->cd->obsstep * j;
			op->od->obs_min[ig_index] += op->cd->obsstep * j;
			if( op->od->obs_target[ig_index] > op->od->obs_max[ig_index] ) op->od->obs_target[ig_index] = op->od->obs_max[ig_index];
			if( op->od->obs_min[ig_index] > op->od->obs_max[ig_index] ) op->od->obs_min[ig_index] = op->od->obs_max[ig_index];
		}
		else // min search
		{
			if( op->od->obs_target[ig_index] < op->od->obs_min[ig_index] ) break;
			if( fabs( op->preds->obs_best[0] - op->od->obs_min[ig_index] ) < DBL_EPSILON ) break;
			op->od->obs_max[ig_index] += op->cd->obsstep; // obsstep is negative
			j = ( int )( ( double )( op->od->obs_max[ig_index] - op->preds->obs_best[0] - op->cd->obsstep / 2 ) / -op->cd->obsstep + 1 ); // obsstep is negative
			op->od->obs_target[ig_index] += op->cd->obsstep * j;
			op->od->obs_max[ig_index] += op->cd->obsstep * j;
			if( op->od->obs_target[ig_index] < op->od->obs_min[ig_index] ) op->od->obs_target[ig_index] = op->od->obs_min[ig_index];
			if( op->od->obs_max[ig_index] < op->od->obs_min[ig_index] ) op->od->obs_max[ig_index] = op->od->obs_min[ig_index];
		}
	}
	op->cd->neval = neval_total; // provide the correct number of total evaluations
	op->cd->njac = njac_total; // provide the correct number of total evaluations
	if( success )
	{
		for( i = 0; i < op->pd->nOptParam; i++ ) op->cd->var[i] = op->pd->var[op->pd->var_index[i]] = op->pd->var_current[i] = op->pd->var_best[i] = opt_params[i];
		for( i = 0; i < op->od->nTObs; i++ ) op->od->obs_current[i] = op->od->obs_best[i];
		op->phi = phi_min;
		op->success = success;
	}
	else tprintf( "\nWARNING: Info-gap analysis failed to find acceptable solutions!\n" );
	tprintf( "\nInfo-gap results for model predictions:\n" );
	for( i = 0; i < op->preds->nTObs; i++ )
	{
		j = op->preds->obs_index[i];
		if( op->cd->obsstep > DBL_EPSILON ) tprintf( "%-20s: Info-gap max %12g Observation step %g Observation domain %g\n", op->od->obs_id[j], op->preds->obs_best[i], op->cd->obsstep, op->cd->obsdomain ); // max search
		else                           tprintf( "%-20s: Info-gap min %12g Observation step %g Observation domain %g\n", op->od->obs_id[j], op->preds->obs_best[i], op->cd->obsstep, op->cd->obsdomain ); // min search
		op->od->obs_target[j] = op->preds->obs_target[i];
		op->od->obs_min[j] = op->preds->obs_min[i];
		op->od->obs_max[j] = op->preds->obs_max[i];
		op->od->obs_weight[j] *= -1;
	}
	tprintf( "\n" );
	free( opt_params );
	print_results( op, 1 );
	save_results( 1, "", op, op->gd );
	return( 1 );
}
コード例 #2
0
ファイル: whole-program.cpp プロジェクト: Rican7/hhvm
void optimize(Index& index, php::Program& program) {
    assert(check(program));
    trace_time tracer("optimize");
    SCOPE_EXIT { state_after("optimize", program); };

    // Counters, just for debug printing.
    std::atomic<uint32_t> total_funcs{0};
    auto round = uint32_t{0};

    /*
     * Algorithm:
     *
     * Start by running an analyze pass on every function.  During
     * analysis, information about functions or classes will be
     * requested from the Index, which initially won't really know much,
     * but will record a dependency.  This part is done in parallel: no
     * passes are mutating anything, just reading from the Index.
     *
     * After a pass, we do a single-threaded "update" step to prepare
     * for the next pass: for each function that was analyzed, note the
     * facts we learned that may aid analyzing other functions in the
     * program, and register them in the index.  At this point, if any
     * of these facts are more useful than they used to be, add all the
     * Contexts that had a dependency on the new information to the work
     * list again, in case they can do better based on the new fact.
     *
     * Repeat until the work list is empty.
     */
    auto work = initial_work(program);
    while (!work.empty()) {
        auto const results = [&] {
            trace_time trace(
                "analyzing",
                folly::format("round {} -- {} work items", round, work.size()).str()
            );
            return parallel_map(
                work,
            [&] (const Context& ctx) -> folly::Optional<FuncAnalysis> {
                total_funcs.fetch_add(1, std::memory_order_relaxed);
                return analyze_func(index, ctx);
            }
            );
        }();
        work.clear();

        ++round;
        trace_time update_time("updating");

        std::set<Context> revisit;
        for (auto i = size_t{0}; i < results.size(); ++i) {
            auto& result = *results[i];

            assert(result.ctx.func == work[i].func);
            assert(result.ctx.cls == work[i].cls);
            assert(result.ctx.unit == work[i].unit);

            auto deps = index.refine_return_type(
                            result.ctx.func, result.inferredReturn
                        );
            for (auto& d : deps) revisit.insert(d);
        }

        std::copy(begin(revisit), end(revisit), std::back_inserter(work));
    }

    if (Trace::moduleEnabledRelease(Trace::hhbbc_time, 1)) {
        Trace::traceRelease("total function visits %u\n", total_funcs.load());
    }

    /*
     * Finally, use the results of all these iterations to perform
     * optimization.  This reanalyzes every function using our
     * now-very-updated Index, and then runs optimize_func with the
     * results.
     *
     * We do this in parallel: all the shared information is queried out
     * of the index, and each thread is allowed to modify the bytecode
     * for the function it is looking at.
     *
     * NOTE: currently they can't modify anything other than the
     * bytecode/Blocks, because other threads may be doing unlocked
     * queries to php::Func and php::Class structures.
     */
    trace_time final_pass("final pass");
    work = initial_work(program);
    parallel_for_each(
        initial_work(program),
    [&] (Context ctx) {
        optimize_func(index, analyze_func(index, ctx));
    }
    );
}