Beispiel #1
0
void nasolver<nr_type_t>::runMNA (void)
{

    // just solve the equation system here
    eqns->setAlgo (eqnAlgo);
    eqns->passEquationSys (updateMatrix ? A : NULL, x, z);
    eqns->solve ();

    // if damped Newton-Raphson is requested
    if (xprev != NULL && top_exception () == NULL)
    {
        if (convHelper == CONV_Attenuation)
        {
            applyAttenuation ();
        }
        else if (convHelper == CONV_LineSearch)
        {
            lineSearch ();
        }
        else if (convHelper == CONV_SteepestDescent)
        {
            steepestDescent ();
        }
    }
}
	void PostureIKSolver::GD(){
		for(int i = 0;i < maxIteration;++i){
			computeGredient();
			double tmp = gredient.length();
			if(tmp > this->gredientThreshold && lineSearchStepSize > step){
				snapshot(1);
				lineSearch(1, gredient.revert());
			}else{
				break;
			}
		}
	}
  bool NewtonMinimizerGradHessian::minimize(const VectorXd& start_point, VectorXd& min_point, double tol, unsigned int max_iter, double abs_tol)
  {
    try
    {
      if(!function){throw (char*)("minimize called but function has not been set");}
    }
    catch(char* str)
    {
      cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
      throw;
      return false;
    }
    
    unsigned int npars = function->nPars();
    
    try
    {
      if(!(npars>0)){throw (char*)("function to be minimized has zero dimensions");}
      if(start_point.rows()!=(int)npars){throw (char*)("input to minimizer not of dimension required by function to be minimized");}
    }
    catch(char* str)
    {
      cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
      throw;
      return false;
    }
    
    
    //parameters used for the Wolfe conditions
    double c1 = 1.0e-4;
    double c2 = 0.9;
    
    
    VectorXd working_points[2];
    working_points[0] = VectorXd::Zero(npars);
    working_points[1] = VectorXd::Zero(npars);
    
    VectorXd* current_point = &(working_points[0]);
    VectorXd* try_point = &(working_points[1]);
    
    (*current_point) = start_point;
    
    double value=0.;
    double prev_value=0.;
    
    double dir_der=0.;
    double scale = 1.;
    double scale_temp = 1.;
    double grad0_dir = 0.;
    
    bool good_value = true;
    bool bounds = true;
    
    unsigned int search_iter = 32;
    
    VectorXd grad[2];
    grad[0] = VectorXd::Zero(npars);
    grad[1] = VectorXd::Zero(npars);
    VectorXd* current_grad = &(grad[0]);
    VectorXd* try_grad = &(grad[1]);
    
    
    MatrixXd hessian = MatrixXd::Zero(npars, npars);
    
    VectorXd move = VectorXd::Zero(npars);
    VectorXd unit_move = VectorXd::Zero(npars);
    
    //try a Newton iteration
    function->calcValGradHessian((*current_point), value, (*current_grad), hessian);
    for(unsigned int i=0;i<fixparameter.size();++i)
    {
      if(fixparameter[i] != 0)
      {
        (*current_grad)[i] = 0.;
        for(unsigned int j=0;j<npars;++j)
        {
          hessian(j,i) = 0.;
          hessian(i,j) = 0.;
        }
        hessian(i,i) = 1.;
      }
    }
    
    move = -hessian.fullPivLu().solve(*current_grad);
    good_value=true;
    for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
    if(good_value == false){move = - (*current_grad);}
    dir_der = (*current_grad).dot(move);
    //if the inverse hessian times the negative gradient isn't even a descent direction, negate the direction
    if(dir_der>0.)
    {
      move = -move;
    }
    function->rescaleMove((*current_point), move);
    grad0_dir = (*current_grad).dot(move);
    scale_temp = 1.;
    //find scale, such that move*scale satisfies the strong Wolfe conditions
    bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
    if(bounds == false){min_point = start_point; return false;}
    move *= scale;
    (*try_point) = ((*current_point) + move);
    function->calcValGradHessian((*try_point), prev_value, (*try_grad), hessian);
    for(unsigned int i=0;i<fixparameter.size();++i)
    {
      if(fixparameter[i] != 0)
      {
        (*try_grad)[i] = 0.;
        for(unsigned int j=0;j<npars;++j)
        {
          hessian(j,i) = 0.;
          hessian(i,j) = 0.;
        }
        hessian(i,i) = 1.;
      }
    }
    swap(current_point, try_point);
    swap(current_grad, try_grad);
    swap(value, prev_value);
    
    unsigned long int count = 1;
    bool converged=false;
    while(converged==false)
    {
      if((fabs((prev_value - value)/prev_value)<tol || fabs(prev_value - value)<abs_tol)){converged=true;break;}
      prev_value = value;
      //try a Newton iteration
      
      move = -hessian.fullPivLu().solve(*current_grad);
      good_value=true;
      for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
      if(good_value == false){move = - (*current_grad);}
      dir_der = (*current_grad).dot(move);
      //if the inverse hessian times the negative gradient isn't even a descent direction, negate the direction
      if(dir_der>0.)
      {
        move = -move;
      }
      function->rescaleMove((*current_point), move);
      grad0_dir = (*current_grad).dot(move);
      scale_temp = 1.;
      //find scale, such that move*scale satisfies the strong Wolfe conditions
//       scale_temp = fabs(value/grad0_dir);
      bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
      if(bounds == false){min_point = (*current_point); return false;}
      move *= scale;
      (*try_point) = ((*current_point) + move);
      function->calcValGradHessian((*try_point), value, (*try_grad), hessian);
      for(unsigned int i=0;i<fixparameter.size();++i)
      {
        if(fixparameter[i] != 0)
        {
          (*try_grad)[i] = 0.;
          for(unsigned int j=0;j<npars;++j)
          {
            hessian(j,i) = 0.;
            hessian(i,j) = 0.;
          }
          hessian(i,i) = 1.;
        }
      }
      swap(current_point, try_point);
      swap(current_grad, try_grad);
      
      count++;
      if(count > max_iter){break;}
    }
    function->computeCovariance(value, hessian);
    
    min_point = (*current_point);
    return converged;
  }
 bool NewtonMinimizerGradHessian::findSaddlePoint(const VectorXd& start_point, VectorXd& min_point, double tol, unsigned int max_iter, double abs_tol)
 {
   try
   {
     if(!function){throw (char*)("minimize called but function has not been set");}
   }
   catch(char* str)
   {
     cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
     throw;
     return false;
   }
   
   unsigned int npars = function->nPars();
   
   try
   {
     if(!(npars>0)){throw (char*)("function to be minimized has zero dimensions");}
     if(start_point.rows()!=(int)npars){throw (char*)("input to minimizer not of dimension required by function to be minimized");}
   }
   catch(char* str)
   {
     cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
     throw;
     return false;
   }
   
   //parameters used for the Wolfe conditions
   double c1 = 1.0e-6;
   double c2 = 1.0e-1;
   
   VectorXd working_points[2];
   working_points[0] = VectorXd::Zero(npars);
   working_points[1] = VectorXd::Zero(npars);
   
   VectorXd* current_point = &(working_points[0]);
   VectorXd* try_point = &(working_points[1]);
   
   (*current_point) = start_point;
   
   double value=0.;
   double prev_value=0.;
   
   double dir_der=0.;
   double scale = 1.;
   double scale_temp = 1.;
   double grad0_dir = 0.;
   
   bool good_value = true;
   bool bounds = true;
   
   unsigned int search_iter = 64;
   
   VectorXd grad[2];
   grad[0] = VectorXd::Zero(npars);
   grad[1] = VectorXd::Zero(npars);
   VectorXd* current_grad = &(grad[0]);
   VectorXd* try_grad = &(grad[1]);
   
   VectorXd newgrad = VectorXd::Zero(npars);
   
   MatrixXd hessian = MatrixXd::Zero(npars, npars);
   
   VectorXd move = VectorXd::Zero(npars);
   VectorXd unit_move = VectorXd::Zero(npars);
   
   FunctionGradHessian* orig_func = function;
   SquareGradient gradsquared(orig_func);
   function = &gradsquared;
   
   //try a Newton iteration
   orig_func->calcValGradHessian((*current_point), value, (*current_grad), hessian);
   move = -hessian.fullPivLu().solve(*current_grad);
   gradsquared.calcValGrad((*current_point), value, newgrad);
   good_value=true;
   for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
   if(good_value == false){move = -newgrad;}
   dir_der = newgrad.dot(move);
   if(dir_der>0.){move = -move;}
   gradsquared.rescaleMove((*current_point), move);
   grad0_dir = newgrad.dot(move);
   scale_temp = 1.;
   //find scale, such that move*scale satisfies the strong Wolfe conditions
   bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
   if(bounds == false){min_point = start_point; function=orig_func;return false;}
   move *= scale;
   (*try_point) = ((*current_point) + move);
   orig_func->calcValGradHessian((*try_point), prev_value, (*try_grad), hessian);
   gradsquared.calcValGrad((*try_point), prev_value, newgrad);
   swap(current_point, try_point);
   swap(current_grad, try_grad);
   swap(value, prev_value);
   
   unsigned long int count = 1;
   bool converged=false;
   while(converged==false)
   {
     if((fabs((prev_value - value)/prev_value)<tol || fabs(prev_value - value)<abs_tol)){converged=true;break;}
     prev_value = value;
     //try a Newton iteration
     move = -hessian.fullPivLu().solve(*current_grad);
     gradsquared.calcValGrad((*current_point), value, newgrad);
     
     good_value=true;
     for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
     scale_temp = 1.;
     scale_temp = fabs(value/newgrad.dot(move));
     if(good_value == false){move = -newgrad;scale_temp = fabs(value/move.dot(move));}
     dir_der = newgrad.dot(move);
     if(dir_der>0.){move = -move;}
     gradsquared.rescaleMove((*current_point), move);
     grad0_dir = newgrad.dot(move);
     //find scale, such that move*scale satisfies the strong Wolfe conditions
     bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
     if(bounds == false){min_point = (*current_point); function=orig_func;return false;}
     move *= scale;
     (*try_point) = ((*current_point) + move);
     orig_func->calcValGradHessian((*try_point), value, (*try_grad), hessian);
     gradsquared.calcValGrad((*try_point), value, newgrad);
     swap(current_point, try_point);
     swap(current_grad, try_grad);
     count++;
     if(count > max_iter){break;}
   }
   orig_func->computeCovariance(value, hessian);
   min_point = (*current_point);
   function=orig_func;return converged;
 }
void MlMaximumEntropyModel::learnLMBFGS(MlTrainingContainer* params)
{
	params->initialize();

	const MlDataSet* trainingDataSet = params->getTrainingDataSet();
	const MlDataSet* testDataSet     = params->getTestDataSet();
	const vector<size_t>& trainingIdxs = params->getTrainingIdxs();
	const vector<size_t>& testIdxs	   = params->getTestIdxs();
	const size_t numSamples = trainingIdxs.size();
	const bool performTest = (testDataSet && testIdxs.size()>0);

	if (trainingDataSet->getNumClasess() < 2)
		error("learnLMBFGS accepts only datasets with 2 or more classes, yor data has ",
			trainingDataSet->getNumClasess());

	const double  lambda  = params->getLambda();
	const size_t memorySize = params->getLmBfgsMemorySize(); 
	const size_t reportFrequency = params->getVerboseLevel();
	const double perplexityDelta  = params->getPerplexityDelta();
	const size_t numClasses  = trainingDataSet->getNumClasess();
	const size_t numFeatures = trainingDataSet->getNumBasicFeatures(); // F
	const size_t numTraining = trainingIdxs.size();    // N

	const size_t numRestarts=3;

	// data structures used for training
	vector< vector<double> >& w = weights_; // class X features
	vector< vector<double> > wOld(numClasses, vector<double>(numFeatures,0.0)); // class X features
	vector< vector<double> > wtx(numClasses, vector<double>(numTraining, 0.0));  // class X samples
	vector< vector<double> > qtx(numClasses, vector<double>(numTraining, 0.0));  // class X samples
	vector< vector<double> > q(numClasses, vector<double>(numFeatures,0.0));       // class X features
	vector< vector<double> > g(numClasses, vector<double>(numFeatures,0.0));       // class X features
	vector< vector<double> > gOld(numClasses, vector<double>(numFeatures,0.0));  // class X features

	vector< vector<float> > trainingProbs(numClasses, vector<float>(numTraining));
	vector< vector<float> > testProbs(numClasses, vector<float>(numTraining));
	vector< vector<double> > bestW(numClasses, vector<double>(numFeatures));

	// initialize weights
	if (params->getInputPath().length() > 1)
	{
		const string modelFile = params->getInputPath() + "_scr.txt";
		if (readModel(modelFile.c_str()))
			params->setIndClearWeights(false);
	}

	if (params->getIndClearWeights())
		weights_.clear();

	weights_.resize(numClasses, vector<double>(numFeatures,0.0));

	double previousPerplexity = MAX_FLOAT;
	float  bestTestError=1.0;
	size_t bestTestRound=0;
	float  bestTrainingError=1.0;
	size_t bestTrainingRound=0;

	bool terminateTraining = false;
	size_t totalRounds=0;
	size_t megaRound=0;
	for ( megaRound=0; megaRound<numRestarts; megaRound++)
	{
		// first round
		computeGradient(trainingDataSet, trainingIdxs, w, wtx, lambda, g);

		const double gtg = computeDotProduct(g,g);
		const double denominator = 1.0 / sqrt(gtg);
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<numFeatures; i++)
				q[c][i]=g[c][i]*denominator;

		// qtx <- qTx
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<numSamples; i++)
			{
				const MlSample& sample = trainingDataSet->getSample(trainingIdxs[i]);
				qtx[c][i]=computeDotProduct(q[c],sample.pairs);
			}

		// eta <- lineSearch(...)
		double eta = lineSearch(trainingDataSet, trainingIdxs, w, wtx, qtx, g, q, lambda);
		//cout << "eta = " << eta << endl;

		// update wtx <- wtx + eta*qtx
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<wtx[c].size(); i++)
				wtx[c][i]+=eta*qtx[c][i];

		// update wOld<- w ; w <- w + eta *q ; gOld<-g
		for (size_t c=0; c<numClasses; c++)
		{
			memcpy(&wOld[c][0],&w[c][0],sizeof(double)*w[c].size());
			memcpy(&gOld[c][0],&g[c][0],sizeof(double)*g[c].size());
			for (size_t i=0; i<numFeatures; i++)
				w[c][i]+= eta*q[c][i];
		}


		// initialize memory
		vector< vector< vector<double> > > memoryU(memorySize, vector< vector<double> >(numClasses));
		vector< vector< vector<double> > > memoryD(memorySize, vector< vector<double> >(numClasses));
		vector< double > memoryAlpha(memorySize);
		size_t nextMemPosition=0;
		size_t numMemPushes=0;
		
		// iterate until convergence
		size_t round=1;
		while (round<10000)
		{
			// compute errors and report round results
			{
				double trainingLogLikelihood=0.0, testLogLikelihood=0.0;
				const double trainingError = calcErrorRateWithLogLikelihood(trainingDataSet, trainingIdxs,
																		false, &trainingLogLikelihood);
				double testError=1.0;
				if (performTest)
					testError = calcErrorRateWithLogLikelihood(testDataSet, testIdxs, false, &testLogLikelihood);

				if (reportFrequency>0 && round % reportFrequency == 0)
				{
					cout << round << "\t" << scientific << setprecision(5) << trainingLogLikelihood << "\t" << fixed << setprecision(5) << trainingError;
					if (performTest)
						cout <<"\t" << scientific << testLogLikelihood << "\t" << fixed << setprecision(5)<< testError;
					cout << endl;
				}
				
				if (performTest)
				{
					if (testError<=bestTestError)
					{
						bestTestRound=round;
						bestTestError=testError;
						for (size_t c=0; c<numClasses; c++)
							memcpy(&bestW[c][0],&w[c][0],numFeatures*sizeof(double)); // copy weights
					}
				}
				
				if (trainingError<=bestTrainingError)
				{
					bestTrainingRound=round;
					bestTrainingError=trainingError;
					if (! performTest)
					{
						for (size_t c=0; c<numClasses; c++)
							memcpy(&bestW[c][0],&w[c][0],numFeatures*sizeof(double)); // copy weights
					}
				}		
			}

			// Train new round

			computeGradient(trainingDataSet, trainingIdxs, w, wtx, lambda, g);

			double alpha=0.0;
			double sigma=0.0;
			double utu=0.0;

			// write u=g'-g and d=w'-w onto memory, use them to compute alpha and sigma
			vector< vector<double> >& u = memoryU[nextMemPosition];
			vector< vector<double> >& d = memoryD[nextMemPosition];
			for (size_t c=0; c<numClasses; c++)
			{
				const size_t numFeatures = g[c].size();
				u[c].resize(numFeatures);
				d[c].resize(numFeatures);
				for (size_t i=0; i<numFeatures; i++)
				{
					const double gDiff = g[c][i]-gOld[c][i];
					const double wDiff = w[c][i]-wOld[c][i];
					u[c][i]=gDiff;
					d[c][i]=wDiff;
					alpha += gDiff*wDiff;
					utu += gDiff*gDiff;
				}
			}
			sigma = alpha / utu;
			memoryAlpha[nextMemPosition]=alpha;

			// update memory position
			nextMemPosition++;
			if (nextMemPosition == memorySize)
				nextMemPosition = 0;
			numMemPushes++;

			// q<-g
			for (size_t c=0; c<numClasses; c++)
				memcpy(&q[c][0],&g[c][0],g[c].size()*sizeof(double));
			
			// determine memory evaluation order 1..M (M is the newest)
			vector<size_t> memOrder;
			if (numMemPushes<=memorySize)
			{
				for (size_t i=0; i<numMemPushes; i++)
					memOrder.push_back(i);
			}
			else
			{
				for (size_t i=0; i<memorySize; i++)
					memOrder.push_back((i+nextMemPosition) % memorySize);
			}

			vector<double> beta(memOrder.size(),0.0);
			for (int i=memOrder.size()-1; i>=0; i--)
			{
				const size_t m = memOrder[static_cast<size_t>(i)];
				const double alpha = memoryAlpha[m];
				
				const vector< vector<double> >& dM = memoryD[m];
				double& betaM = beta[m];

				// compute beta[m] = (memory_d[m] dot g)/alpha[m]
				for (size_t c=0; c<dM.size(); c++)
					for (size_t i=0; i<dM[c].size(); i++)
						betaM += dM[c][i]*g[c][i];
				betaM/=alpha;
				
				// q <- q - beta[m]*memory_u[m]
				const vector< vector<double> >& uM = memoryU[m]; 
				for (size_t c=0; c<q.size(); c++)
					for (size_t i=0; i<q[c].size(); i++)
						q[c][i] -= betaM * uM[c][i];

			}

			// q <- sigma*q
			for (size_t c=0; c<q.size(); c++)
				for (size_t i=0; i<q[c].size(); i++)
					q[c][i]*=sigma;


			for (size_t i=0; i<memOrder.size(); i++)
			{
				const size_t m = memOrder[static_cast<size_t>(i)];
				const vector< vector<double> >& uM = memoryU[m];
				const vector< vector<double> >& dM = memoryD[m]; 
				const double betaM = beta[m];
				const double oneOverAlpha = 1.0 / memoryAlpha[m];
				double umq = computeDotProduct(uM,q);
				for (size_t c=0; c<numClasses; c++)
					for (size_t j=0; j<q[c].size(); j++)
					{
						const double dq = dM[c][j] * (betaM - umq*oneOverAlpha);
						umq += uM[c][j]*dq;
						q[c][j] += dq;
					}
		
			}

			// q<- -q
			for (size_t c=0; c<numClasses; c++)
				for (size_t i=0; i<q[c].size(); i++)
					q[c][i]=-q[c][i];
			
			// qtx = q*X
			for (size_t i=0; i<trainingIdxs.size(); i++)
			{
				const MlSample& sample = trainingDataSet->getSample(trainingIdxs[i]);
				for (size_t c=0; c<numClasses; c++)
					qtx[c][i]=computeDotProduct(q[c],sample.pairs);
			}
			

			bool needToRestart=false;
			eta = lineSearch(trainingDataSet, trainingIdxs, w, wtx, qtx, g, q, lambda);
			if (eta<= 0.0)
			{
				// restart ?
				needToRestart = true;
			}

			// update wOld<- w ; w <- w + eta *q ; gOld<- g
			for (size_t c=0; c<numClasses; c++)
			{
				memcpy(&wOld[c][0],&w[c][0],sizeof(double)*w[c].size());
				memcpy(&gOld[c][0],&g[c][0],sizeof(double)*g[c].size());
				for (size_t i=0; i<numFeatures; i++)
					w[c][i]+= eta*q[c][i];
			}

			for (size_t c=0; c<numClasses; c++)
				for (size_t i=0; i<numSamples; i++)
					wtx[c][i]+=eta*qtx[c][i];

			round++;
			totalRounds++;
			if (terminateTraining || needToRestart)
				break;
		}
		
		if (terminateTraining)
			break;
	}

	if (! params->getIndHadInternalError())
	{
		params->setIndNormalTermination(true);
	}
	else
		cout << "Warning: encountered mathemtical error while training!" << endl;

	weights_ = bestW;

	cout << "W=" << endl;
	printVector(weights_);
	cout << endl;

	cout << "Terminated after " << totalRounds << " rounds (" << megaRound << " restarts)" << endl;
	cout << "Best training error  " << fixed << setprecision(8) << bestTrainingError << " (round " << bestTrainingRound << ")" << endl;
	if (performTest)
	cout << "Best test error      "  << bestTestError     << " (round " << bestTestRound << ")" << endl;

	indWasInitialized_ = true;

	//this->calcErrorRateWithPerplexity(trainingDataSet, trainingIdxs, true, NULL);
}
Beispiel #6
0
/*
 * Main solver routine.
 */
idxint ECOS_solve(pwork* w)
{
	idxint i, initcode, KKT_FACTOR_RETURN_CODE;
	pfloat dtau_denom, dtauaff, dkapaff, sigma, dtau, dkap, bkap, pres_prev;
	idxint exitcode = ECOS_FATAL;
#if PROFILING > 0
	timer tsolve;
#endif
#if PROFILING > 1
    timer tfactor, tkktsolve;
#endif
    
#if PROFILING > 0
    /* start timer */
    tic(&tsolve);
#endif
	
	/* Initialize solver */
    initcode = init(w);
	if( initcode == ECOS_FATAL ){
#if PRINTLEVEL > 0
        if( w->stgs->verbose ) PRINTTEXT("\nFatal error during initialization, aborting.");
#endif
        return ECOS_FATAL;
    }
    
    
    
	/* MAIN INTERIOR POINT LOOP ---------------------------------------------------------------------- */
	for( w->info->iter = 0; w->info->iter <= w->stgs->maxit; w->info->iter++ ){
        
		/* Compute residuals */
		computeResiduals(w);
        
		/* Update statistics */
		updateStatistics(w);

#if PRINTLEVEL > 1
		/* Print info */
		if( w->stgs->verbose ) printProgress(w->info);
#endif		
        
        
        /* SAFEGUARD: Backtrack to old iterate if the update was bad such that the primal residual PRES has
         *            increased by a factor of SAFEGUARD.
         * If the safeguard is activated, the solver quits with the flag ECOS_NUMERICS.
         */
        if( w->info->iter > 0 && w->info->pres > SAFEGUARD*pres_prev ){
#if PRINTLEVEL > 1
            if( w->stgs->verbose ) PRINTTEXT("\nNUMERICAL PROBLEMS, recovering iterate %d and stopping.\n", (int)w->info->iter-1);
#endif
            /* Backtrack */
            for( i=0; i < w->n; i++ ){ w->x[i] -= w->info->step * w->KKT->dx2[i]; }
            for( i=0; i < w->p; i++ ){ w->y[i] -= w->info->step * w->KKT->dy2[i]; }
            for( i=0; i < w->m; i++ ){ w->z[i] -= w->info->step * w->KKT->dz2[i]; }
            for( i=0; i < w->m; i++ ){ w->s[i] -= w->info->step * w->dsaff[i]; }
            w->kap -= w->info->step * dkap;
            w->tau -= w->info->step * dtau;
            exitcode = ECOS_NUMERICS;
            computeResiduals(w);
            updateStatistics(w);
            break;
        }
        pres_prev = w->info->pres;
        

		/* Check termination criteria and exit if necessary */
		/* Optimal? */
		if( ( ( -w->cx > 0 ) || ( -w->by - w->hz > 0) ) &&
            w->info->pres < w->stgs->feastol && w->info->dres < w->stgs->feastol &&
			( w->info->gap < w->stgs->abstol || w->info->relgap < w->stgs->reltol ) ){
#if PRINTLEVEL > 0
			if( w->stgs->verbose ) PRINTTEXT("\nOPTIMAL (within feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", w->stgs->feastol, w->stgs->reltol, w->stgs->abstol);
#endif
	        exitcode = ECOS_OPTIMAL;
			break;
		}            
		/* Primal infeasible? */
		else if( ((w->info->pinfres != NAN) && (w->info->pinfres < w->stgs->feastol)) ||
                 ((w->tau < w->stgs->feastol) && (w->kap < w->stgs->feastol && w->info->pinfres < w->stgs->feastol)) ){
#if PRINTLEVEL > 0
			if( w->stgs->verbose ) PRINTTEXT("\nPRIMAL INFEASIBLE (within feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", w->stgs->feastol, w->stgs->reltol, w->stgs->abstol);
#endif
			w->info->pinf = 1;
			w->info->dinf = 0;
			exitcode = ECOS_PINF;
			break;
		}        
		/* Dual infeasible? */
		else if( (w->info->dinfres != NAN) && (w->info->dinfres < w->stgs->feastol) ){
#if PRINTLEVEL > 0
			if( w->stgs->verbose ) PRINTTEXT("\nUNBOUNDED (within feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", w->stgs->feastol, w->stgs->reltol, w->stgs->abstol);
#endif
			w->info->pinf = 0;  
			w->info->dinf = 1;        
			exitcode = ECOS_DINF;
			break;
		}   
		/* Did the line search c**k up? (zero step length) */
		else if( w->info->iter > 0 && w->info->step == STEPMIN*GAMMA ){
#if PRINTLEVEL > 0
			if( w->stgs->verbose ) PRINTTEXT("\nNo further progress possible (- numerics?), exiting.");
#endif
			exitcode = ECOS_NUMERICS;
			break;
		}
		/* MAXIT reached? */
		else if( w->info->iter == w->stgs->maxit ){
#if PRINTLEVEL > 0
			if( w->stgs->verbose ) PRINTTEXT("\nMaximum number of iterations reached, exiting.");
#endif
			exitcode = ECOS_MAXIT;
			break;
		}


		/* Compute scalings */
		if( updateScalings(w->C, w->s, w->z, w->lambda) == OUTSIDE_CONE ){
#if PRINTLEVEL > 0
            if( w->stgs->verbose ) PRINTTEXT("\nSlacks or multipliers leaving the positive orthant (- numerics ?), exiting.\n");
#endif
            return ECOS_OUTCONE;
        }
        
		/* Update KKT matrix with scalings */
		kkt_update(w->KKT->PKPt, w->KKT->PK, w->C);
        
#if DEBUG > 0
        dumpSparseMatrix(w->KKT->PKPt,"PKPt_updated.txt");
#endif
        /* factor KKT matrix */
#if PROFILING > 1
		tic(&tfactor);
        KKT_FACTOR_RETURN_CODE = kkt_factor(w->KKT, w->stgs->eps, w->stgs->delta, &w->info->tfactor_t1, &w->info->tfactor_t2);
        w->info->tfactor += toc(&tfactor);
#else
        KKT_FACTOR_RETURN_CODE = kkt_factor(w->KKT, w->stgs->eps, w->stgs->delta);
#endif

		/* Solve for RHS1, which is used later also in combined direction */
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref1 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS1, w->KKT->dx1, w->KKT->dy1, w->KKT->dz1, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
  
		/* AFFINE SEARCH DIRECTION (predictor, need dsaff and dzaff only) */
		RHS_affine(w);
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref2 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS2, w->KKT->dx2, w->KKT->dy2, w->KKT->dz2, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
        
		/* dtau_denom = kap/tau - (c'*x1 + by1 + h'*z1); */
		dtau_denom = w->kap/w->tau - ddot(w->n, w->c, w->KKT->dx1) - ddot(w->p, w->b, w->KKT->dy1) - ddot(w->m, w->h, w->KKT->dz1);
		
        /* dtauaff = (dt + c'*x2 + by2 + h'*z2) / dtau_denom; */
		dtauaff = (w->rt - w->kap + ddot(w->n, w->c, w->KKT->dx2) + ddot(w->p, w->b, w->KKT->dy2) + ddot(w->m, w->h, w->KKT->dz2)) / dtau_denom;
        
		/* dzaff = dz2 + dtau_aff*dz1 */
		for( i=0; i<w->m; i++ ){ w->W_times_dzaff[i] = w->KKT->dz2[i] + dtauaff*w->KKT->dz1[i]; } 
		scale(w->W_times_dzaff, w->C, w->W_times_dzaff);

		/* W\dsaff = -W*dzaff -lambda; */		
		for( i=0; i<w->m; i++ ){ w->dsaff_by_W[i] = -w->W_times_dzaff[i] - w->lambda[i]; }
		
		/* dkapaff = -(bkap + kap*dtauaff)/tau; bkap = kap*tau*/
		dkapaff = -w->kap - w->kap/w->tau*dtauaff;
        
        /* Line search on W\dsaff and W*dzaff */
		w->info->step_aff = lineSearch(w->lambda, w->dsaff_by_W, w->W_times_dzaff, w->tau, dtauaff, w->kap, dkapaff, w->C, w->KKT);
        
		/* Centering parameter */
        sigma = 1.0 - w->info->step_aff;
        sigma = sigma*sigma*sigma;
        if( sigma > SIGMAMAX ) sigma = SIGMAMAX;
        if( sigma < SIGMAMIN ) sigma = SIGMAMIN;
        w->info->sigma = sigma;
        
		
		/* COMBINED SEARCH DIRECTION */
		RHS_combined(w);
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref3 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS2, w->KKT->dx2, w->KKT->dy2, w->KKT->dz2, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
        
  		/* bkap = kap*tau + dkapaff*dtauaff - sigma*info.mu; */
		bkap = w->kap*w->tau + dkapaff*dtauaff - sigma*w->info->mu;

		/* dtau = ((1-sigma)*rt - bkap/tau + c'*x2 + by2 + h'*z2) / dtau_denom; */		
		dtau = ((1-sigma)*w->rt - bkap/w->tau + ddot(w->n, w->c, w->KKT->dx2) + ddot(w->p, w->b, w->KKT->dy2) + ddot(w->m, w->h, w->KKT->dz2)) / dtau_denom;
      	
		/* dx = x2 + dtau*x1;     dy = y2 + dtau*y1;       dz = z2 + dtau*z1; */
		for( i=0; i < w->n; i++ ){ w->KKT->dx2[i] += dtau*w->KKT->dx1[i]; }
		for( i=0; i < w->p; i++ ){ w->KKT->dy2[i] += dtau*w->KKT->dy1[i]; }
		for( i=0; i < w->m; i++ ){ w->KKT->dz2[i] += dtau*w->KKT->dz1[i]; }

		/*  ds_by_W = -(lambda \ bs + conelp_timesW(scaling,dz,dims)); */
		/* note that ath this point w->dsaff_by_W holds already (lambda \ ds) */
		scale(w->KKT->dz2, w->C, w->W_times_dzaff);
		for( i=0; i < w->m; i++ ){ w->dsaff_by_W[i] = -(w->dsaff_by_W[i] + w->W_times_dzaff[i]); }

		/* dkap = -(bkap + kap*dtau)/tau; */
		dkap = -(bkap + w->kap*dtau)/w->tau;

		/* Line search on combined direction */
		w->info->step = lineSearch(w->lambda, w->dsaff_by_W, w->W_times_dzaff, w->tau, dtau, w->kap, dkap, w->C, w->KKT) * w->stgs->gamma;
		
		/* ds = W*ds_by_W */
		scale(w->dsaff_by_W, w->C, w->dsaff);

		/* Update variables */
		for( i=0; i < w->n; i++ ){ w->x[i] += w->info->step * w->KKT->dx2[i]; }
		for( i=0; i < w->p; i++ ){ w->y[i] += w->info->step * w->KKT->dy2[i]; }
		for( i=0; i < w->m; i++ ){ w->z[i] += w->info->step * w->KKT->dz2[i]; }
		for( i=0; i < w->m; i++ ){ w->s[i] += w->info->step * w->dsaff[i]; }
		w->kap += w->info->step * dkap;
		w->tau += w->info->step * dtau;
	}

	/* scale variables back */    
	backscale(w);

	/* stop timer */
#if PROFILING > 0
	w->info->tsolve = toc(&tsolve);
#endif

#if PRINTLEVEL > 0
#if PROFILING > 0
	if( w->stgs->verbose ) PRINTTEXT("\nRuntime: %f seconds.", w->info->tsetup + w->info->tsolve);
#endif
	if( w->stgs->verbose ) PRINTTEXT("\n\n");
#endif

	return exitcode;
}
Beispiel #7
0
/*
 * Main solver routine.
 */
idxint ECOS_solve(pwork* w)
{
	idxint i, initcode, KKT_FACTOR_RETURN_CODE;
	pfloat dtau_denom, dtauaff, dkapaff, sigma, dtau, dkap, bkap, pres_prev;
	idxint exitcode = ECOS_FATAL, interrupted;
    
#if DEBUG
    char fn[20];
#endif
    
#if (defined _WIN32 || defined _WIN64 )
	/* sets width of exponent for floating point numbers to 2 instead of 3 */
	unsigned int old_output_format = _set_output_format(_TWO_DIGIT_EXPONENT);
#endif

#if PROFILING > 0
	timer tsolve;
#endif
#if PROFILING > 1
    timer tfactor, tkktsolve;
#endif
    
#if PROFILING > 0
    /* start timer */
    tic(&tsolve);
#endif
	
    /* initialize ctrl-c support */
    init_ctrlc();

	/* Initialize solver */
    initcode = init(w);
	if( initcode == ECOS_FATAL ){
#if PRINTLEVEL > 0
        if( w->stgs->verbose ) PRINTTEXT("\nFatal error during initialization, aborting.");
#endif
        return ECOS_FATAL;
    }
    
    
    
	/* MAIN INTERIOR POINT LOOP ---------------------------------------------------------------------- */
	for( w->info->iter = 0; w->info->iter <= w->stgs->maxit ; w->info->iter++ ){
        
		/* Compute residuals */
		computeResiduals(w);
        
		/* Update statistics */
		updateStatistics(w);

#if PRINTLEVEL > 1
		/* Print info */
		if( w->stgs->verbose ) printProgress(w->info);
#endif
        
        /* SAFEGUARD: Backtrack to best previously seen iterate if
         *
         * - the update was bad such that the primal residual PRES has increased by a factor of SAFEGUARD, or
         * - the gap became negative
         *
         * If the safeguard is activated, the solver tests if reduced precision has been reached, and reports
         * accordingly. If not even reduced precision is reached, ECOS returns the flag ECOS_NUMERICS.
         */
        if( w->info->iter > 0 && (w->info->pres > SAFEGUARD*pres_prev || w->info->gap < 0) ){
#if PRINTLEVEL > 1
            if( w->stgs->verbose ) deleteLastProgressLine( w->info );
            if( w->stgs->verbose ) PRINTTEXT("Unreliable search direction detected, recovering best iterate (%d) and stopping.\n", (int)w->best_info->iter);
#endif
            restoreBestIterate( w );
            
            /* Determine whether we have reached at least reduced accuracy */
            exitcode = checkExitConditions( w, ECOS_INACC_OFFSET );
            
            /* if not, exit anyways */
            if( exitcode == ECOS_NOT_CONVERGED_YET ){
                exitcode = ECOS_NUMERICS;
#if PRINTLEVEL > 0
                if( w->stgs->verbose ) PRINTTEXT("\nNUMERICAL PROBLEMS (reached feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", MAX(w->info->dres, w->info->pres), w->info->relgap, w->info->gap);
#endif
                break;
            } else {
                break;
            }
        }
        pres_prev = w->info->pres;
        

		/* Check termination criteria to full precision and exit if necessary */
		exitcode = checkExitConditions( w, 0 );
        interrupted = check_ctrlc();
        if( exitcode == ECOS_NOT_CONVERGED_YET ){
            
            /*
             * Full precision has not been reached yet. Check for two more cases of exit:
             *  (i) min step size, in which case we assume we won't make progress any more, and
             * (ii) maximum number of iterations reached
             * If these two are not fulfilled, another iteration will be made.
             */
            
            /* Did the line search c**k up? (zero step length) */
            if( w->info->iter > 0 && w->info->step == STEPMIN*GAMMA ){
#if PRINTLEVEL > 0
                if( w->stgs->verbose ) deleteLastProgressLine( w->info );
                if( w->stgs->verbose ) PRINTTEXT("No further progress possible, recovering best iterate (%d) and stopping.", (int)w->best_info->iter );
#endif
                restoreBestIterate( w );
                
                /* Determine whether we have reached reduced precision */
                exitcode = checkExitConditions( w, ECOS_INACC_OFFSET );
                if( exitcode == ECOS_NOT_CONVERGED_YET ){
                    exitcode = ECOS_NUMERICS;
#if PRINTLEVEL > 0
                    if( w->stgs->verbose ) PRINTTEXT("\nNUMERICAL PROBLEMS (reached feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", MAX(w->info->dres, w->info->pres), w->info->relgap, w->info->gap);
#endif
                }
                break;
            }
            /* MAXIT reached? */
            else if( interrupted || w->info->iter == w->stgs->maxit ){

#if PRINTLEVEL > 0                
                const char *what = interrupted ? "SIGINT intercepted" : "Maximum number of iterations reached";
#endif                
                /* Determine whether current iterate is better than what we had so far */
                if( compareStatistics( w->info, w->best_info) ){
#if PRINTLEVEL > 0
                    if( w->stgs->verbose ) 
                        PRINTTEXT("%s, stopping.\n",what);
#endif
                } else
                {
#if PRINTLEVEL > 0
                    if( w->stgs->verbose ) 
                        PRINTTEXT("%s, recovering best iterate (%d) and stopping.\n", what, (int)w->best_info->iter);
#endif
                    restoreBestIterate( w );
                }
                
                /* Determine whether we have reached reduced precision */
                exitcode = checkExitConditions( w, ECOS_INACC_OFFSET );
                if( exitcode == ECOS_NOT_CONVERGED_YET ){
                    exitcode = interrupted ? ECOS_SIGINT : ECOS_MAXIT;
#if PRINTLEVEL > 0
                    if( w->stgs->verbose ) {
                        const char* what = interrupted ? "INTERRUPTED" : "RAN OUT OF ITERATIONS";
                        PRINTTEXT("\n%s (reached feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", what, MAX(w->info->dres, w->info->pres), w->info->relgap, w->info->gap);
                    }
#endif
                }
                break;

            }
        } else {
            
            /* Full precision has been reached, stop solver */
            break;
        }
        
		
        
        /* SAFEGUARD:
         * Check whether current iterate is worth keeping as the best solution so far,
         * before doing another iteration
         */
        if (w->info->iter == 0) {
            /* we're at the first iterate, so there's nothing to compare yet */
            saveIterateAsBest( w );
        } else if( compareStatistics( w->info, w->best_info) ){
            /* PRINTTEXT("Better solution found, saving as best so far \n"); */
            saveIterateAsBest( w );
        }
        

		/* Compute scalings */
		if( updateScalings(w->C, w->s, w->z, w->lambda) == OUTSIDE_CONE ){
            
            /* SAFEGUARD: we have to recover here */
#if PRINTLEVEL > 0
            if( w->stgs->verbose ) deleteLastProgressLine( w->info );
            if( w->stgs->verbose ) PRINTTEXT("Slacks/multipliers leaving the cone, recovering best iterate (%d) and stopping.\n", (int)w->best_info->iter);
#endif
            restoreBestIterate( w );
            
            /* Determine whether we have reached at least reduced accuracy */
            exitcode = checkExitConditions( w, ECOS_INACC_OFFSET );
            if( exitcode == ECOS_NOT_CONVERGED_YET ){
#if PRINTLEVEL > 0
                if( w->stgs->verbose ) PRINTTEXT("\nNUMERICAL PROBLEMS (reached feastol=%3.1e, reltol=%3.1e, abstol=%3.1e).", MAX(w->info->dres, w->info->pres), w->info->relgap, w->info->gap);
#endif
                return ECOS_OUTCONE;

            } else {
                break;
            }
        }
        
		/* Update KKT matrix with scalings */
		kkt_update(w->KKT->PKPt, w->KKT->PK, w->C);
        
#if DEBUG > 0
        /* DEBUG: Store matrix to be factored */
        sprintf(fn, "PKPt_updated_%02i.txt", (int)w->info->iter);
        dumpSparseMatrix(w->KKT->PKPt, fn);
#endif
        /* factor KKT matrix */
#if PROFILING > 1
		tic(&tfactor);
        KKT_FACTOR_RETURN_CODE = kkt_factor(w->KKT, w->stgs->eps, w->stgs->delta, &w->info->tfactor_t1, &w->info->tfactor_t2);
        w->info->tfactor += toc(&tfactor);
#else
        KKT_FACTOR_RETURN_CODE = kkt_factor(w->KKT, w->stgs->eps, w->stgs->delta);
#endif
        
#if DEBUG > 0
        /* DEBUG: store factor */
        sprintf(fn, "PKPt_factor_%02i.txt", (int)w->info->iter);
        dumpSparseMatrix(w->KKT->L, fn);
#endif

		/* Solve for RHS1, which is used later also in combined direction */
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref1 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS1, w->KKT->dx1, w->KKT->dy1, w->KKT->dz1, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
        
#if DEBUG > 0 && PRINTLEVEL > 2
        /* Print result of linear system solve */
        printDenseMatrix(w->KKT->dx1, 1, 5, "dx1(1:5)");
        printDenseMatrix(w->KKT->dy1, 1, 5, "dy1(1:5)");
        printDenseMatrix(w->KKT->dz1, 1, 5, "dz1(1:5)");
#endif
  
		/* AFFINE SEARCH DIRECTION (predictor, need dsaff and dzaff only) */
		RHS_affine(w);
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref2 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS2, w->KKT->dx2, w->KKT->dy2, w->KKT->dz2, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
        
		/* dtau_denom = kap/tau - (c'*x1 + by1 + h'*z1); */
		dtau_denom = w->kap/w->tau - eddot(w->n, w->c, w->KKT->dx1) - eddot(w->p, w->b, w->KKT->dy1) - eddot(w->m, w->h, w->KKT->dz1);
		
        /* dtauaff = (dt + c'*x2 + by2 + h'*z2) / dtau_denom; */
		dtauaff = (w->rt - w->kap + eddot(w->n, w->c, w->KKT->dx2) + eddot(w->p, w->b, w->KKT->dy2) + eddot(w->m, w->h, w->KKT->dz2)) / dtau_denom;
        
		/* dzaff = dz2 + dtau_aff*dz1 */
		for( i=0; i<w->m; i++ ){ w->W_times_dzaff[i] = w->KKT->dz2[i] + dtauaff*w->KKT->dz1[i]; } 
		scale(w->W_times_dzaff, w->C, w->W_times_dzaff);

		/* W\dsaff = -W*dzaff -lambda; */		
		for( i=0; i<w->m; i++ ){ w->dsaff_by_W[i] = -w->W_times_dzaff[i] - w->lambda[i]; }
		
		/* dkapaff = -(bkap + kap*dtauaff)/tau; bkap = kap*tau*/
		dkapaff = -w->kap - w->kap/w->tau*dtauaff;
        
        /* Line search on W\dsaff and W*dzaff */
		w->info->step_aff = lineSearch(w->lambda, w->dsaff_by_W, w->W_times_dzaff, w->tau, dtauaff, w->kap, dkapaff, w->C, w->KKT);
        
		/* Centering parameter */
        sigma = 1.0 - w->info->step_aff;
        sigma = sigma*sigma*sigma;
        if( sigma > SIGMAMAX ) sigma = SIGMAMAX;
        if( sigma < SIGMAMIN ) sigma = SIGMAMIN;
        w->info->sigma = sigma;
        
		
		/* COMBINED SEARCH DIRECTION */
		RHS_combined(w);
#if PROFILING > 1
		tic(&tkktsolve);
#endif
		w->info->nitref3 = kkt_solve(w->KKT, w->A, w->G, w->KKT->RHS2, w->KKT->dx2, w->KKT->dy2, w->KKT->dz2, w->n, w->p, w->m, w->C, 0, w->stgs->nitref);
#if PROFILING > 1
		w->info->tkktsolve += toc(&tkktsolve);
#endif
        
  		/* bkap = kap*tau + dkapaff*dtauaff - sigma*info.mu; */
		bkap = w->kap*w->tau + dkapaff*dtauaff - sigma*w->info->mu;

		/* dtau = ((1-sigma)*rt - bkap/tau + c'*x2 + by2 + h'*z2) / dtau_denom; */		
		dtau = ((1-sigma)*w->rt - bkap/w->tau + eddot(w->n, w->c, w->KKT->dx2) + eddot(w->p, w->b, w->KKT->dy2) + eddot(w->m, w->h, w->KKT->dz2)) / dtau_denom;
      	
		/* dx = x2 + dtau*x1;     dy = y2 + dtau*y1;       dz = z2 + dtau*z1; */
		for( i=0; i < w->n; i++ ){ w->KKT->dx2[i] += dtau*w->KKT->dx1[i]; }
		for( i=0; i < w->p; i++ ){ w->KKT->dy2[i] += dtau*w->KKT->dy1[i]; }
		for( i=0; i < w->m; i++ ){ w->KKT->dz2[i] += dtau*w->KKT->dz1[i]; }

		/*  ds_by_W = -(lambda \ bs + conelp_timesW(scaling,dz,dims)); */
		/* note that ath this point w->dsaff_by_W holds already (lambda \ ds) */
		scale(w->KKT->dz2, w->C, w->W_times_dzaff);
		for( i=0; i < w->m; i++ ){ w->dsaff_by_W[i] = -(w->dsaff_by_W[i] + w->W_times_dzaff[i]); }

		/* dkap = -(bkap + kap*dtau)/tau; */
		dkap = -(bkap + w->kap*dtau)/w->tau;

		/* Line search on combined direction */
		w->info->step = lineSearch(w->lambda, w->dsaff_by_W, w->W_times_dzaff, w->tau, dtau, w->kap, dkap, w->C, w->KKT) * w->stgs->gamma;
		
		/* ds = W*ds_by_W */
		scale(w->dsaff_by_W, w->C, w->dsaff);

		/* Update variables */
		for( i=0; i < w->n; i++ ){ w->x[i] += w->info->step * w->KKT->dx2[i]; }
		for( i=0; i < w->p; i++ ){ w->y[i] += w->info->step * w->KKT->dy2[i]; }
		for( i=0; i < w->m; i++ ){ w->z[i] += w->info->step * w->KKT->dz2[i]; }
		for( i=0; i < w->m; i++ ){ w->s[i] += w->info->step * w->dsaff[i]; }
		w->kap += w->info->step * dkap;
		w->tau += w->info->step * dtau;
	}

	/* scale variables back */    
	backscale(w);

	/* stop timer */
#if PROFILING > 0
	w->info->tsolve = toc(&tsolve);
#endif

#if PRINTLEVEL > 0
#if PROFILING > 0
	if( w->stgs->verbose ) PRINTTEXT("\nRuntime: %f seconds.", w->info->tsetup + w->info->tsolve);
#endif
	if( w->stgs->verbose ) PRINTTEXT("\n\n");
#endif

    remove_ctrlc();
	return exitcode;
}
Beispiel #8
0
void MainWindow::createBars() {
    createActions();

    qpbMain = new QProgressBar;
        qpbMain->setMaximumSize( 150, 15 );
        qpbMain->setTextVisible( 0 );

    setStatusBar(qsbMain = new QStatusBar);
        qsbMain->showMessage( tr( "Ready" ));
        qsbMain->addPermanentWidget( qpbMain );

    setMenuBar( qmbMain = new QMenuBar );
        qmbMain->addMenu( qmFile = new QMenu( tr( "&File" )));
            qmFile->addSeparator();
            qmFile->addAction( qaPrintDialog );
            qmFile->addSeparator();
            qmFile->addAction( qaExit );
        qmbMain->addMenu( qmEdit = new QMenu( tr( "&Edit" )));
            qmEdit->addActions( qagNavigation->actions ());
            qmEdit->addAction( qaSearch );
            qmEdit->addMenu (qmSubEdit = new QMenu( tr( "Languages" )));
                qmSubEdit->addActions (qagLanguages->actions ());
        qmbMain->addMenu( qmView = new QMenu( tr( "&View" )));
            qmView->addActions ( qagZoom->actions ());
        qmbMain->addMenu( qmHelp = new QMenu( tr( "&Help" )));
            qmHelp->addAction( qaAboutQt );
            qmHelp->addSeparator();
            qmHelp->addAction( qaAbout );

    qtbDeleteSearch = new QToolButton(this);
        qtbDeleteSearch->setDefaultAction( qaClearSearch );
        qtbDeleteSearch->setToolTip( "Clear" );
        qtbDeleteSearch->setFocusPolicy( Qt::NoFocus );

    qleSearch = new QLineEdit;
        connect( qleSearch, SIGNAL( returnPressed ()), this, SLOT( lineSearch ()));
        connect( qleSearch, SIGNAL( returnPressed ()), this, SLOT( setSearchWord ()));

#if ( QT_VERSION >= 0x040700 )
        qleSearch->setPlaceholderText( tr( "Search" ));
#endif

    QSqlTableModel qstm;
        qstm.setTable("searchWords"); // table name
        qstm.removeColumn(0); // remove the id column
        qstm.removeColumn(2); // remove the numberofused column
        qstm.select();

    QCompleter::CompletionMode mode = QCompleter::InlineCompletion; // a new completer mode
    QCompleter *qcSearchWordHelp = new QCompleter(&qstm);
	qcSearchWordHelp->setCompletionMode(mode); // set the mode 
    qleSearch->setCompleter(qcSearchWordHelp);

    qtbMain = new QToolBar( "Toolbar" );
	qtbMain->setFloatable( false );
	qtbMain->setMovable( false );
        qtbMain->addAction( qaHome );
        qtbMain->addSeparator();
        qtbMain->addActions( qagNavigation->actions ());
	qtbMain->addSeparator();
	qtbMain->addWidget( qtbDeleteSearch );
        qtbMain->addWidget( qleSearch );
        addToolBar( qtbMain );
}
Beispiel #9
0
void Relax::structure(ISO& iso, const LocalPotential& potential, const Symmetry* symmetry) const
{
	
	// Steepest descent run
	if (_relaxMethod == RM_STEEPEST_DESCENT)
		_getLineDirection = &Relax::SD;
	
	// Conjugate gradient run
	else if (_relaxMethod == RM_CONJUGATE_GRADIENT)
		_getLineDirection = &Relax::CG;
	
	// Unknown method
	else
	{
		Output::newline(ERROR);
		Output::print("Unknown relaxation method");
		Output::quit();
	}
	
	// Output
	Output::newline();
	Output::print("Relaxing internal coordinates using ");
	Output::print(relaxMethod(_relaxMethod).tolower());
	Output::print(" algorithm");
	Output::increase();
	
	// Loop until max loops is reached or converged
	int i, j, k;
	int loopNum;
	double stepScale;
	Vector3D newPos;
	OList<Vector3D > direction;
	for (loopNum = 0; loopNum < _maxIterations; ++loopNum)
	{
		
		// Output
		Output::newline();
		if (loopNum == 0)
			Output::print("Initial structure");
		else
		{
			Output::print("Step ");
			Output::print(loopNum);
		}
		Output::increase();

		// Evaluate the forces
		_prevForces = _forces;
		evaluateForces(_forces, iso, potential, symmetry, true);

		// Output
		Output::decrease();

		// Break if converged
		if (areForcesConverged())
			break;
		
		// Break if at max iterations
		if (loopNum == _maxIterations - 1)
			break;
		
		// Get the line search direction
		(this->*_getLineDirection)(direction);
		
		// Get the step size to make
		stepScale = lineSearch(direction, iso, potential, symmetry);
		
		// Set the new positions
		for (i = 0; i < iso.atoms().length(); ++i)
		{
			for (j = 0; j < iso.atoms()[i].length(); ++j)
			{
				newPos = _origPositions[i][j];
				for (k = 0; k < 3; ++k)
					newPos[k] += stepScale * direction[iso.atoms()[i][j].atomNumber()][k];
				iso.atoms()[i][j].cartesian(newPos);
			}
		}
	}
	
	// Did not converged
	if (loopNum >= _maxIterations - 1)
	{
		Output::newline(WARNING);
		Output::print("Failed to reach convergence criterion");
	}
	
	// Output
	Output::decrease();
}
Beispiel #10
0
bool
FiniteStrainPlasticBase::returnMap(const RankTwoTensor & stress_old, const RankTwoTensor & plastic_strain_old, const std::vector<Real> & intnl_old, const RankTwoTensor & delta_d, const RankFourTensor & E_ijkl, RankTwoTensor & stress, RankTwoTensor & plastic_strain, std::vector<Real> & intnl, std::vector<Real> & f, unsigned int & iter)
{

  // Assume this strain increment does not induce any plasticity
  // This is the elastic-predictor
  stress = stress_old + E_ijkl * delta_d; // the trial stress
  plastic_strain = plastic_strain_old;
  for (unsigned i = 0; i < intnl_old.size() ; ++i)
    intnl[i] = intnl_old[i];
  iter = 0;

  yieldFunction(stress, intnl, f);

  Real nr_res2 = 0;
  for (unsigned i = 0 ; i < f.size() ; ++i)
    nr_res2 += 0.5*std::pow( std::max(f[i], 0.0)/_f_tol[i], 2);

  if (nr_res2 < 0.5)
    // a purely elastic increment.
    // All output variables have been calculated
    return true;


  // So, from here on we know that the trial stress
  // is inadmissible, and we have to return from that
  // value to the yield surface.  There are three
  // types of constraints we have to satisfy, listed
  // below, and calculated in calculateConstraints(...)

  // Plastic strain constraint, L2 norm must be zero (up to a tolerance)
  RankTwoTensor epp;

  // Yield function constraint passed to this function as
  // std::vector<Real> & f
  // Each yield function must be <= 0 (up to tolerance)

  // Internal constraint(s), must be zero (up to a tolerance)
  std::vector<Real> ic;


  // During the Newton-Raphson procedure, we'll be
  // changing the following parameters in order to
  // (attempt to) satisfy the constraints.
  RankTwoTensor dstress; // change in stress
  std::vector<Real> dpm; // change in plasticity multipliers ("consistency parameters")
  std::vector<Real>  dintnl; // change in internal parameters



  // The following are used in the Newton-Raphson

  // Inverse of E_ijkl (assuming symmetric)
  RankFourTensor E_inv = E_ijkl.invSymm();

  // convenience variable that holds the change in plastic strain incurred during the return
  // delta_dp = plastic_strain - plastic_strain_old
  // delta_dp = E^{-1}*(trial_stress - stress), where trial_stress = E*(strain - plastic_strain_old)
  RankTwoTensor delta_dp;

  // The "consistency parameters" (plastic multipliers)
  // Change in plastic strain in this timestep = pm*flowPotential
  // Each pm must be non-negative
  std::vector<Real> pm;
  pm.assign(numberOfYieldFunctions(), 0.0);

  // whether line-searching was successful
  bool ls_success = true;

  // The Newton-Raphson loops
  while (nr_res2 > 0.5 && iter < _max_iter && ls_success)
  {
    iter++;

    // calculate dstress, dpm and dintnl for one full Newton-Raphson step
    nrStep(stress, intnl_old, intnl, pm, E_inv, delta_dp, dstress, dpm, dintnl);

    // perform a line search
    // The line-search will exit with updated values
    ls_success = lineSearch(nr_res2, stress, intnl_old, intnl, pm, E_inv, delta_dp, dstress, dpm, dintnl, f, epp, ic);
  }


  if (iter >= _max_iter || !ls_success)
  {
    stress = stress_old;
    for (unsigned i = 0; i < intnl_old.size() ; ++i)
      intnl[i] = intnl_old[i];
    return false;
  }
  else
  {
    plastic_strain += delta_dp;
    return true;
  }

}