Example #1
0
void
OMR::Simplifier::prePerformOnBlocks()
   {
   _invalidateUseDefInfo      = false;
   _alteredBlock = false;
   _blockRemoved = false;

   _useDefInfo = optimizer()->getUseDefInfo();
   _valueNumberInfo = optimizer()->getValueNumberInfo();
   _containingStructure = NULL;


   if (_reassociate)
      {
      _hashTable.reset();
      _hashTable.init(1000, true);

      TR_ASSERT(comp()->getFlowGraph()->getStructure(), "assertion failure");
      computeInvarianceOfAllStructures(comp(), comp()->getFlowGraph()->getStructure());
      }
      _ccHashTab.reset();
      _ccHashTab.init(64, true);

   if (trace())
      {
      comp()->dumpMethodTrees("Trees before simplification");
      }
   }
MDOUBLE ssrvDistanceSeqs2Tree::calcSideInfoGivenTreeAndAlpha(const sequenceContainer &sc, const tree &et, MDOUBLE alpha) 
{
	_newAlpha = alpha;
	(static_cast<gammaDistribution*>(_spPtr->distr()))->setAlpha(alpha);

	// optimize only nu (and tamura92 params, if relevant)
	if (!dynamic_cast<tamura92*>(
			static_cast<replacementModelSSRV*>(_spPtr->getPijAccelerator()->getReplacementModel())
			->getBaseRM()
			)
		) {
		bestParamSSRV optimizer(false,true,false,false);
		optimizer(et,sc,*(static_cast<stochasticProcessSSRV*>(_spPtr)),_weights,
				  15,15,_epsilonLikelihoodImprovement4alphaOptimiz,_epsilonLikelihoodImprovement,
				  _epsilonLikelihoodImprovement4BBL,_maxIterationsBBL,5);
		_newNu=optimizer.getBestNu();
		return(optimizer.getBestL());
	} else {
		bestParamSSRV optimizer(false,true,true,false);
		optimizer(et,sc,*(static_cast<stochasticProcessSSRV*>(_spPtr)),_weights,
				  15,15,_epsilonLikelihoodImprovement4alphaOptimiz,_epsilonLikelihoodImprovement,
				  _epsilonLikelihoodImprovement4BBL,_maxIterationsBBL,5);
		_newNu=optimizer.getBestNu();
		return(optimizer.getBestL());
	}
}
Example #3
0
void IdwInterpolator::_buildModel()
{
  _index.reset();

  if (_p < 0.0)
  {
    NelderMead optimizer(1, new IdwOptimizeFunction(*this), _stopDelta);
    Vector result;
    result.prepare(1);

    _p = 1.0;
    result[0] = _p;
    optimizer.step(result, -estimateError());

    _p = 4.0;
    result[0] = _p;
    optimizer.step(result, -estimateError());

    int iterations = 0;
    while (optimizer.done() == false && iterations <= _maxAllowedPerLoopOptimizationIterations)
    {
      double e = -estimateError();
      //cout << "error: " << e << " count: " << iterations << endl;
      result = optimizer.step(result, e);
      iterations++;
      _p = result[0];
    }
    if (iterations > _iterations)
    {
      _iterations = iterations;
    }
  }
}
Example #4
0
void CtcPolytopeHull::contract(IntervalVector& box) {

	if (!(limit_diam_box.contains(box.max_diam()))) return;
	// is it necessary?  YES (BNE) Soplex can give false infeasible results with large numbers
	//       	cout << " box before LR " << box << endl;


	try {
		// Update the bounds the variables
		mylinearsolver->initBoundVar(box);

		//returns the number of constraints in the linearized system
		int cont = lr.linearization(box,mylinearsolver);

		if(cont<1)  return;
		optimizer(box);

		//	mylinearsolver->writeFile("LP.lp");
		//		system ("cat LP.lp");
		//		cout << " box after  LR " << box << endl;
		mylinearsolver->cleanConst();


	}
	catch(EmptyBoxException&) {
		box.set_empty(); // empty the box before exiting in case of EmptyBoxException
		mylinearsolver->cleanConst();
		throw EmptyBoxException();
	}

}
Example #5
0
void IdwInterpolator::_buildModel()
{
  _index.reset();

  if (_p < 0.0)
  {
    NelderMead optimizer(1, new IdwOptimizeFunction(*this), _stopDelta);
    Vector result;
    result.prepare(1);

    _p = 1.0;
    result[0] = _p;
    optimizer.step(result, -estimateError());

    _p = 4.0;
    result[0] = _p;
    optimizer.step(result, -estimateError());

    int count = 0;
    while (optimizer.done() == false)
    {
      double e = -estimateError();
      cout << "error: " << e << " count: " << count++ << endl;
      result = optimizer.step(result, e);
      _p = result[0];
    }
  }
}
//*************************************************************************
TEST (EssentialMatrixFactor2, extraMinimization) {
    // Additional test with camera moving in positive X direction

    // We start with a factor graph and add constraints to it
    // Noise sigma is 1, assuming pixel measurements
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < data.number_tracks(); i++)
        graph.add(EssentialMatrixFactor2(100, i, pA(i), pB(i), model2, K));

    // Check error at ground truth
    Values truth;
    truth.insert(100, trueE);
    for (size_t i = 0; i < data.number_tracks(); i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(trueE, actual, 1e-1));
    for (size_t i = 0; i < data.number_tracks(); i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}
Example #7
0
void BiasSVD<OptimizerType>::Apply(const arma::mat& data,
                                   const size_t rank,
                                   arma::mat& u,
                                   arma::mat& v,
                                   arma::vec& p,
                                   arma::vec& q)
{
  // batchSize is 1 in our implementation of Bias SVD.
  // batchSize other than 1 has not been supported yet.
  const int batchSize = 1;
  Log::Warn << "The batch size for optimizing BiasSVD is 1."
      << std::endl;

  // Make the optimizer object using a BiasSVDFunction object.
  BiasSVDFunction<arma::mat> biasSVDFunc(data, rank, lambda);
  ens::StandardSGD optimizer(alpha, batchSize,
      iterations * data.n_cols);

  // Get optimized parameters.
  arma::mat parameters = biasSVDFunc.GetInitialPoint();
  optimizer.Optimize(biasSVDFunc, parameters);

  // Constants for extracting user and item matrices.
  const size_t numUsers = max(data.row(0)) + 1;
  const size_t numItems = max(data.row(1)) + 1;

  // Extract user and item matrices, user and item bias from the optimized
  // parameters.
  u = parameters.submat(0, numUsers, rank - 1, numUsers + numItems - 1).t();
  v = parameters.submat(0, 0, rank - 1, numUsers - 1);
  p = parameters.row(rank).subvec(numUsers, numUsers + numItems - 1).t();
  q = parameters.row(rank).subvec(0, numUsers - 1).t();
}
SparseAutoencoder<OptimizerType>::SparseAutoencoder(const arma::mat& data,
                                                    const size_t visibleSize,
                                                    const size_t hiddenSize,
                                                    double lambda,
                                                    double beta,
                                                    double rho) :
    visibleSize(visibleSize),
    hiddenSize(hiddenSize),
    lambda(lambda),
    beta(beta),
    rho(rho)
{
  SparseAutoencoderFunction encoderFunction(data, visibleSize, hiddenSize,
                                            lambda, beta, rho);
  OptimizerType<SparseAutoencoderFunction> optimizer(encoderFunction);

  parameters = encoderFunction.GetInitialPoint();

  // Train the model.
  Timer::Start("sparse_autoencoder_optimization");
  const double out = optimizer.Optimize(parameters);
  Timer::Stop("sparse_autoencoder_optimization");

  Log::Info << "SparseAutoencoder::SparseAutoencoder(): final objective of "
      << "trained model is " << out << "." << std::endl;
}
//*************************************************************************
TEST (EssentialMatrixFactor2, minimization) {
    // Here we want to optimize for E and inverse depths at the same time

    // We start with a factor graph and add constraints to it
    // Noise sigma is 1cm, assuming metric measurements
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        graph.add(EssentialMatrixFactor2(100, i, pA(i), pB(i), model2));

    // Check error at ground truth
    Values truth;
    truth.insert(100, trueE);
    for (size_t i = 0; i < 5; i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(trueE, actual, 1e-1));
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}
Example #10
0
/**
 * Entry point for an optimization pass.
 */
size_t optimize(
    std::unique_ptr<SingleImplAnalysis> analysis,
    const ClassHierarchy& ch,
    Scope& scope, const SingleImplConfig& config) {
  OptimizationImpl optimizer(std::move(analysis), ch);
  return optimizer.optimize(scope, config);
}
Example #11
0
JSObject* ProgramExecutable::initializeGlobalProperties(VM& vm, CallFrame* callFrame, JSScope* scope)
{
    RELEASE_ASSERT(scope);
    JSGlobalObject* globalObject = scope->globalObject();
    RELEASE_ASSERT(globalObject);
    ASSERT(&globalObject->vm() == &vm);

    JSObject* exception = 0;
    UnlinkedProgramCodeBlock* unlinkedCodeBlock = globalObject->createProgramCodeBlock(callFrame, this, &exception);
    if (exception)
        return exception;

    m_unlinkedProgramCodeBlock.set(vm, this, unlinkedCodeBlock);

    BatchedTransitionOptimizer optimizer(vm, globalObject);

    for (size_t i = 0, numberOfFunctions = unlinkedCodeBlock->numberOfFunctionDecls(); i < numberOfFunctions; ++i) {
        UnlinkedFunctionExecutable* unlinkedFunctionExecutable = unlinkedCodeBlock->functionDecl(i);
        ASSERT(!unlinkedFunctionExecutable->name().isEmpty());
        globalObject->addFunction(callFrame, unlinkedFunctionExecutable->name());
        if (vm.typeProfiler() || vm.controlFlowProfiler()) {
            vm.functionHasExecutedCache()->insertUnexecutedRange(sourceID(), 
                unlinkedFunctionExecutable->typeProfilingStartOffset(), 
                unlinkedFunctionExecutable->typeProfilingEndOffset());
        }
    }

    const VariableEnvironment& variableDeclarations = unlinkedCodeBlock->variableDeclarations();
    for (auto& entry : variableDeclarations) {
        ASSERT(entry.value.isVar());
        globalObject->addVar(callFrame, Identifier::fromUid(&vm, entry.key.get()));
    }
    return 0;
}
Example #12
0
// --------------------------------------------------------
void ClutchHandle::optimizePosition(float aspect_ratio)
{
	Q_ASSERT(_radius_mode == OriginalRadius);
	//QPointF c = _clutch_handle->posT();
	//QPointF a = _absolute_handle->posT();

	qDebug() << "pre src pos" << _mg->srcCenterT();
	qDebug() << "pre src radius" << _mg->srcRadiusT();
	qDebug() << "pre dst pos" << _mg->dstCenterT();
	qDebug() << "pre dst radius" << _mg->dstRadiusT();

	QPointF cma = _mg->dstCenterT() - _mg->srcCenterT();
	if(QLineF(_mg->dstCenterT(), _mg->srcCenterT()).length() < _mg->dstRadiusT() + _mg->srcRadiusT())
	{
		qDebug() << "old src pos" << _mg->srcCenterT();
		qDebug() << "old src radius" << _mg->srcRadiusT();
		qDebug() << "old dst pos" << _mg->dstCenterT();
		qDebug() << "old dst radius" << _mg->dstRadiusT();
		MagnifyingGlassOptimizer optimizer(
			_mg,
			aspect_ratio,
			atan2(-cma.y(), cma.x()) * 180 / 3.1415f
			);
		optimizer.optimizeLastGlassPosition();
		qDebug() << "new src pos" << _mg->srcCenterT();
		qDebug() << "new src radius" << _mg->srcRadiusT();
		qDebug() << "new dst pos" << _mg->dstCenterT();
		qDebug() << "new dst radius" << _mg->dstRadiusT();
		//_original_dst_radius = _mg->dstRadiusT(); // is this line required?
	}
}
Example #13
0
int main(int argc, char ** argv) {
  MPI_Init(&argc, &argv);

  QUESO::FullEnvironment env(MPI_COMM_WORLD, "", "", NULL);

  QUESO::VectorSpace<QUESO::GslVector, QUESO::GslMatrix> paramSpace(env,
      "space_", 3, NULL);

  QUESO::GslVector minBound(paramSpace.zeroVector());
  minBound[0] = -10.0;
  minBound[1] = -10.0;
  minBound[2] = -10.0;

  QUESO::GslVector maxBound(paramSpace.zeroVector());
  maxBound[0] = 10.0;
  maxBound[1] = 10.0;
  maxBound[2] = 10.0;

  QUESO::BoxSubset<QUESO::GslVector, QUESO::GslMatrix> domain("", paramSpace,
      minBound, maxBound);

  ObjectiveFunction<QUESO::GslVector, QUESO::GslMatrix> objectiveFunction(
      "", domain);

  QUESO::GslVector initialPoint(paramSpace.zeroVector());
  initialPoint[0] = 9.0;
  initialPoint[1] = -9.0;
  initialPoint[1] = -1.0;

  QUESO::GslOptimizer optimizer(objectiveFunction);

  double tol = 1.0e-10;
  optimizer.setTolerance(tol);
  optimizer.set_solver_type(QUESO::GslOptimizer::STEEPEST_DESCENT);

  QUESO::OptimizerMonitor monitor(env);
  monitor.set_display_output(true,true);

  std::cout << "Solving with Steepest Decent" << std::endl;
  optimizer.minimize(&monitor);

  if (std::abs( optimizer.minimizer()[0] - 1.0) > tol) {
    std::cerr << "GslOptimize failed.  Found minimizer at: " << optimizer.minimizer()[0]
              << std::endl;
    std::cerr << "Actual minimizer is 1.0" << std::endl;
    queso_error();
  }

  std::string nm = "nelder_mead2";
  optimizer.set_solver_type(nm);
  monitor.reset();
  monitor.set_display_output(true,true);

  std::cout << std::endl << "Solving with Nelder Mead" << std::endl;
  optimizer.minimize(&monitor);

  monitor.print(std::cout,false);

  return 0;
}
Example #14
0
JSObject* ProgramExecutable::initializeGlobalProperties(VM& vm, CallFrame* callFrame, JSScope* scope)
{
    RELEASE_ASSERT(scope);
    JSGlobalObject* globalObject = scope->globalObject();
    RELEASE_ASSERT(globalObject);
    ASSERT(&globalObject->vm() == &vm);

    JSObject* exception = 0;
    UnlinkedProgramCodeBlock* unlinkedCodeBlock = globalObject->createProgramCodeBlock(callFrame, this, &exception);
    if (exception)
        return exception;

    m_unlinkedProgramCodeBlock.set(vm, this, unlinkedCodeBlock);

    BatchedTransitionOptimizer optimizer(vm, globalObject);

    const UnlinkedProgramCodeBlock::VariableDeclations& variableDeclarations = unlinkedCodeBlock->variableDeclarations();
    const UnlinkedProgramCodeBlock::FunctionDeclations& functionDeclarations = unlinkedCodeBlock->functionDeclarations();

    for (size_t i = 0; i < functionDeclarations.size(); ++i) {
        UnlinkedFunctionExecutable* unlinkedFunctionExecutable = functionDeclarations[i].second.get();
        JSValue value = JSFunction::create(vm, unlinkedFunctionExecutable->link(vm, m_source, lineNo(), 0), scope);
        globalObject->addFunction(callFrame, functionDeclarations[i].first, value);
    }

    for (size_t i = 0; i < variableDeclarations.size(); ++i) {
        if (variableDeclarations[i].second & DeclarationStacks::IsConstant)
            globalObject->addConst(callFrame, variableDeclarations[i].first);
        else
            globalObject->addVar(callFrame, variableDeclarations[i].first);
    }
    return 0;
}
Example #15
0
// Simplify all blocks
//
OMR::Simplifier::Simplifier(TR::OptimizationManager *manager)
   : TR::Optimization(manager),
     _hashTable(manager->trMemory(), stackAlloc),
     _ccHashTab(manager->trMemory(), stackAlloc)
   {
   _invalidateUseDefInfo      = false;
   _alteredBlock = false;
   _blockRemoved = false;

   _useDefInfo = optimizer()->getUseDefInfo();
   _valueNumberInfo = optimizer()->getValueNumberInfo();

   _reassociate = comp()->getOption(TR_EnableReassociation);

   _containingStructure = NULL;
   }
//*************************************************************************
TEST (EssentialMatrixFactor3, minimization) {

    // As before, we start with a factor graph and add constraints to it
    NonlinearFactorGraph graph;
    for (size_t i = 0; i < 5; i++)
        // but now we specify the rotation bRc
        graph.add(EssentialMatrixFactor3(100, i, pA(i), pB(i), cRb, model2));

    // Check error at ground truth
    Values truth;
    truth.insert(100, bodyE);
    for (size_t i = 0; i < 5; i++) {
        Point3 P1 = data.tracks[i].p;
        truth.insert(i, double(baseline / P1.z()));
    }
    EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);

    // Optimize
    LevenbergMarquardtParams parameters;
    // parameters.setVerbosity("ERROR");
    LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
    Values result = optimizer.optimize();

    // Check result
    EssentialMatrix actual = result.at<EssentialMatrix>(100);
    EXPECT(assert_equal(bodyE, actual, 1e-1));
    for (size_t i = 0; i < 5; i++)
        EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);

    // Check error at result
    EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}
Example #17
0
 param_type
 operator()(const Range& samples, std::size_t labels, std::size_t features) {
   conjugate_gradient<param_type> optimizer(
     new slope_binary_search<param_type>(1e-6, wolfe<T>::conjugate_gradient()),
     {1e-6, false}
   );
   param_ll_objective<softmax_ll<T>, Range> objective(
     samples,
     regul_ ? new l2_regularization<param_type>(regul_) : nullptr
   );
   optimizer.objective(&objective);
   optimizer.solution(param_type(labels, features, T(0)));
   for (std::size_t it = 0; !optimizer.converged() && it < max_iter_; ++it) {
     line_search_result<T> value = optimizer.iterate();
     if (verbose_) {
       std::cout << "Iteration " << it << ", " << value << std::endl;
     }
   }
   if (!optimizer.converged()) {
     std::cerr << "Warning: failed to converge" << std::endl;
   }
   if (verbose_) {
     std::cout << "Number of calls: " << objective.calls() << std::endl;
   }
   return optimizer.solution();
 }
TEST_F(EmptyArrayToStringOptimizerTests,
OptimizerHasNonEmptyID) {
	ShPtr<EmptyArrayToStringOptimizer> optimizer(
		new EmptyArrayToStringOptimizer(module));

	EXPECT_TRUE(!optimizer->getId().empty()) <<
		"the optimizer should have a non-empty ID";
}
TEST_F(VarDefLoopOptimizerTests,
OptimizerHasNonEmptyID) {
	ShPtr<VarDefForLoopOptimizer> optimizer(
		new VarDefForLoopOptimizer(module));

	EXPECT_TRUE(!optimizer->getId().empty()) <<
		"the optimizer should have a non-empty ID";
}
Example #20
0
void
OMR::Simplifier::postPerformOnBlocks()
   {
   if (trace())
      comp()->dumpMethodTrees("Trees after simplification");

#ifdef DEBUG
   resetBlockVisitFlags(comp());
#endif

   // Invalidate usedef and value number information if necessary
   //
   if (_useDefInfo && _invalidateUseDefInfo)
      optimizer()->setUseDefInfo(NULL);
   if (_valueNumberInfo && _invalidateValueNumberInfo)
      optimizer()->setValueNumberInfo(NULL);
   }
Example #21
0
int bayes_optimization_disc(int nDim, eval_func f, void* f_data,
			    double *valid_x, size_t n_points,
			    double *x, double *minf, bopt_params parameters)
{
  vectord result(nDim);
  vectord input(nDim);
  vecOfvec xSet;

  for(size_t i = 0; i<n_points;++i)
    {
      for(int j = 0; j<nDim; ++j)
	{
	 input(j) = valid_x[i*nDim+j]; 
	}
      xSet.push_back(input);
    }

  if(parameters.n_init_samples > n_points)
    {
      parameters.n_init_samples = n_points;
      parameters.n_iterations = 0;
    }

  try
    {
      CDiscreteModel optimizer(xSet,parameters);
      
      optimizer.set_eval_funct(f);
      optimizer.save_other_data(f_data);
      optimizer.optimize(result);

      std::copy(result.begin(), result.end(), x);

      *minf = optimizer.getValueAtMinimum();
    }
  catch (std::bad_alloc& e)
    {
      FILE_LOG(logERROR) << e.what(); 
      return  BAYESOPT_OUT_OF_MEMORY; 
    }
  catch (std::invalid_argument& e)
    { 
      FILE_LOG(logERROR) << e.what(); 
      return BAYESOPT_INVALID_ARGS; 
    }
  catch (std::runtime_error& e)
    { 
      FILE_LOG(logERROR) << e.what(); 
      return BAYESOPT_RUNTIME_ERROR;
    }
  catch (...)
    { 
      FILE_LOG(logERROR) << "Unknown error";
      return BAYESOPT_FAILURE; 
    }

  return 0; /* everything ok*/
}
Example #22
0
int main(int argc, char* argv[])
{
    if (argc != 2)
    {
        puts("plPageOptimizer: wrong number of arguments");
        return 1;
    }

    plFileName filename = argv[1];
    plPrintf("Optimizing {}...", filename);

#ifndef _DEBUG
    try {
#endif
        plResManager* resMgr = new plResManager;
        hsgResMgr::Init(resMgr);
#ifndef _DEBUG
    } catch (...) {
        puts(" ***crashed on init");
        return 2;
    }
#endif

#ifndef _DEBUG
    try
#endif
    {
        plPageOptimizer optimizer(argv[1]);
        optimizer.Optimize();
    }
#ifndef _DEBUG
    catch (...) {
        puts(" ***crashed on optimizing");
        return 2;
    }
#endif

#ifndef _DEBUG
    try {
#endif
        // Reading in objects may have generated dirty state which we're obviously
        // not sending out. Clear it so that we don't have leaked keys before the
        // ResMgr goes away.
        std::vector<plSynchedObject::StateDefn> carryOvers;
        plSynchedObject::ClearDirtyState(carryOvers);

        hsgResMgr::Shutdown();
#ifndef _DEBUG
    } catch (...) {
        puts(" ***crashed on shutdown");
        return 2;
    }
#endif

    return 0;
}
double SoftmaxRegression<OptimizerType>::Train(const arma::mat& data,
                                               const arma::Row<size_t>& labels,
                                               const size_t numClasses)
{
  SoftmaxRegressionFunction regressor(data, labels, numClasses,
                                      lambda, fitIntercept);
  OptimizerType<SoftmaxRegressionFunction> optimizer(regressor);

  return Train(optimizer);
}
TEST_F(AuxiliaryVariablesOptimizerTests,
OptimizerHasNonEmptyID) {
	INSTANTIATE_ALIAS_ANALYSIS_AND_VALUE_ANALYSIS(module);

	ShPtr<AuxiliaryVariablesOptimizer> optimizer(new AuxiliaryVariablesOptimizer(
		module, va, OptimCallInfoObtainer::create()));

	EXPECT_TRUE(!optimizer->getId().empty()) <<
		"the optimizer should have a non-empty ID";
}
Example #25
0
TEST(liblbfgs_minimizer, maxent2_cppstyle) {
  SimpleLibLBFGSMaxent maxent;  
  optimize::LibLBFGSFunction my_func = maxent.objective();
  std::vector<double> point = {0.0, 0.0};
  optimize::LibLBFGSMinimizer optimizer(2);
  int opt_status = optimizer.minimize(&my_func, point);
  ASSERT_EQ(0, opt_status);
  ASSERT_NEAR(0.34,  point[0], 1E-2);
  ASSERT_NEAR(-0.34,  point[1], 1E-2);
}
Example #26
0
int32_t TR_LoadExtensions::perform()
   {
   if (comp()->getOptLevel() >= warm && !optimizer()->cantBuildGlobalsUseDefInfo())
      {
      if (!comp()->getFlowGraph()->getStructure())
         {
         optimizer()->doStructuralAnalysis();
         }

      TR::LexicalMemProfiler memoryProfiler("Load Extensions: Usedef calculation", comp()->phaseMemProfiler());

      optimizer()->setUseDefInfo(NULL);

      TR_UseDefInfo* useDefInfo = new (comp()->allocator()) TR_UseDefInfo(comp(), comp()->getFlowGraph(), optimizer(), false, false, false, true, true);

      if (useDefInfo->infoIsValid())
         {
         optimizer()->setUseDefInfo(useDefInfo);
         }
      else
         {
         delete useDefInfo;
         }
      }

   TR::StackMemoryRegion stackMemoryRegion(*trMemory());

   excludedNodes = new (stackMemoryRegion) NodeToIntTable(NodeToIntTableComparator(), NodeToIntTableAllocator(stackMemoryRegion));
   loadExtensionPreference = new (stackMemoryRegion) NodeToIntTable(NodeToIntTableComparator(), NodeToIntTableAllocator(stackMemoryRegion));

   for (TR::PreorderNodeIterator iter(comp()->getStartTree(), comp()); iter.currentTree() != NULL; ++iter)
      {
      findPreferredLoadExtensions(iter.currentNode());
      }

   for (TR::PreorderNodeIterator iter(comp()->getStartTree(), comp()); iter.currentTree() != NULL; ++iter)
      {
      flagPreferredLoadExtensions(iter.currentNode());
      }

   return 0;
   }
void KernelEstimationInterpolator::_buildModel()
{
  const DataFrame& df = *_df;

  _index.reset();

  if (_sigma < 0)
  {
    // calculate the standard deviation in x
    double mean = 0;
    size_t n = df.getNumDataVectors();
    for (size_t i = 0; i < n; ++i)
    {
      double v = df.getDataVector(i)[_indColumns[0]];
      mean += v;
    }
    mean /= df.getNumDataVectors();

    double sumDiff = 0;
    for (size_t i = 0; i < n; ++i)
    {
      double v = df.getDataVector(i)[_indColumns[0]];
      sumDiff += (v - mean) * (v - mean);
    }

    double sdx = sqrt(1.0 / (n - 1) * sumDiff);

    // calculate a reasonable starting point w/ silverman's rule of thumb. Put a minimum at 1m to
    // prevent some edge conditions.
    double silvermans = max(1.0, 1.06 * sdx * pow(n, -.2));

    NelderMead optimizer(1, new OptimizeFunction(*this), _stopDelta);
    Vector result;
    result.prepare(1);

    // silverman's rule of thumb tends to over estimate and we're faster at evaluating smaller sigma
    // so start with two smallish values to seed nelder-mead.
    _sigma = silvermans * 0.6;
    result[0] = _sigma;
    optimizer.step(result, -estimateError());

    _sigma = silvermans * 0.2;
    result[0] = _sigma;
    optimizer.step(result, -estimateError());

    while (optimizer.done() == false)
    {
      double e = -estimateError();
      result = optimizer.step(result, e);
      _sigma = result[0];
    }
  }
}
Example #28
0
int
run_compilation_passes (struct ast **ss)
{
  int ret = 0;
  ret = ret || semantic (*ss);
  ret = ret || transform (ss);
  ret = ret || dealias (ss);
  ret = ret || collect_vars (*ss);
  ret = ret || optimizer (ss);
  ret = ret || gen_code (*ss);
  AST_FREE (*ss);
  return ret;
}
Example #29
0
TEST_F(InsertSQLTests, InsertMultipleValues) {
  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  catalog::Catalog::GetInstance()->CreateDatabase(txn, DEFAULT_DB_NAME);
  txn_manager.CommitTransaction(txn);

  CreateAndLoadTable();

  std::vector<ResultValue> result;
  std::vector<FieldInfo> tuple_descriptor;
  std::string error_message;
  int rows_changed;
  std::unique_ptr<optimizer::AbstractOptimizer> optimizer(
      new optimizer::Optimizer());

  // INSERT multiple tuples
  std::string query("INSERT INTO test VALUES (6, 11, 888), (7, 77, 999);");

  txn = txn_manager.BeginTransaction();
  auto plan = TestingSQLUtil::GeneratePlanWithOptimizer(optimizer, query, txn);
  EXPECT_EQ(plan->GetPlanNodeType(), PlanNodeType::INSERT);
  txn_manager.CommitTransaction(txn);

  TestingSQLUtil::ExecuteSQLQueryWithOptimizer(
      optimizer, query, result, tuple_descriptor, rows_changed, error_message);

  EXPECT_EQ(2, rows_changed);

  // SELECT to find out if the tuples are correctly inserted
  TestingSQLUtil::ExecuteSQLQueryWithOptimizer(
      optimizer, "SELECT * FROM test WHERE a=6", result, tuple_descriptor,
      rows_changed, error_message);
  EXPECT_EQ(3, result.size());
  EXPECT_EQ("6", TestingSQLUtil::GetResultValueAsString(result, 0));
  EXPECT_EQ("11", TestingSQLUtil::GetResultValueAsString(result, 1));
  EXPECT_EQ("888", TestingSQLUtil::GetResultValueAsString(result, 2));

  // SELECT to find out if the tuples are correctly inserted
  TestingSQLUtil::ExecuteSQLQueryWithOptimizer(
      optimizer, "SELECT * FROM test WHERE a=7", result, tuple_descriptor,
      rows_changed, error_message);
  EXPECT_EQ(3, result.size());
  EXPECT_EQ("7", TestingSQLUtil::GetResultValueAsString(result, 0));
  EXPECT_EQ("77", TestingSQLUtil::GetResultValueAsString(result, 1));
  EXPECT_EQ("999", TestingSQLUtil::GetResultValueAsString(result, 2));

  // free the database just created
  txn = txn_manager.BeginTransaction();
  catalog::Catalog::GetInstance()->DropDatabaseWithName(txn, DEFAULT_DB_NAME);
  txn_manager.CommitTransaction(txn);
}
Example #30
0
void CCodeGenerator::Output(ostream &Stream)
{

	CGlobalSymbolTable *SymTable = Parser.ParseTranslationUnit();

	for (CGlobalSymbolTable::VariablesIterator it = SymTable->VariablesBegin(); it != SymTable->VariablesEnd(); ++it) {
		Code.AddGlobalVariable(it->second);
	}

	ofstream *TreeStream = NULL;
	if (!Parameters.TreeFilename.empty()) {
		TreeStream = new ofstream(Parameters.TreeFilename.c_str());
	}

	CFunctionSymbol *FuncSym = NULL;

	for (CGlobalSymbolTable::FunctionsIterator it = SymTable->FunctionsBegin(); it != SymTable->FunctionsEnd(); ++it) {
		FuncSym = it->second;

		if (FuncSym->GetBody()) {
			if (Parameters.Optimize) {
				CConstantFolding cf;
				FuncSym->GetBody()->Accept(cf);

				CUnreachableCodeElimination uce;
				FuncSym->GetBody()->Accept(uce);

				CLoopInvariantHoisting lih;
				FuncSym->GetBody()->Accept(lih);
			}

			if (TreeStream) {
				CStatementTreePrintVisitor stpv(*TreeStream);
				*TreeStream << FuncSym->GetName() << ":" << endl;
				FuncSym->GetBody()->Accept(stpv);
			}

			Visitor.SetFunction(FuncSym);
			FuncSym->GetBody()->Accept(Visitor);
		}
	}

	delete TreeStream;

	if (Parameters.Optimize) {
		CLowLevelOptimizer optimizer(Code);
		optimizer.Optimize();
	}

	Code.Output(Stream);
}