Exemplo n.º 1
0
float TimeSeriesLinearReg::TrainEpoch(const matrix_eig &data) {
  matrix_eig X, y;
  ModelUtil::FeatureLabelSplit(*this, data, X, y);
  Fit(X, y);
  matrix_eig y_hat = Predict(X);
  return ModelUtil::MeanSqError(y, y_hat);
}
Exemplo n.º 2
0
	int PredictProb( const std::vector<T>& x, std::map<int, double>& prob ) {
		int dimension = scale_info_.dimension();
		assert( static_cast<int>( x.size() ) == dimension );
		assert( prob.empty() );

		if( (libsvm_model_->param.svm_type == C_SVC || libsvm_model_->param.svm_type == NU_SVC)
			&& libsvm_model_->probA!=NULL && libsvm_model_->probB!=NULL ) {
			std::vector<double> scaled_x;
			scale_info_.Scale( x, scaled_x );
			struct svm_node* nodes;
			nodes = (struct svm_node *) malloc( ( dimension + 1 ) * sizeof(struct svm_node) );
			GetSVMNodes( scaled_x, nodes );
			int class_num = libsvm_model_->nr_class;
			double probs[class_num];
			double predict_label = svm_predict_probability( libsvm_model_, nodes, probs );
			std::vector<int> class_labels;
			GetClassLabels( class_labels );
			for( int i = 0; i < class_num; ++i ) {
				prob[ class_labels[i] ] = probs[i];
			}
			free(nodes);
			return static_cast<int>( predict_label );
		} else {
			return Predict( x );
		}
	}
Exemplo n.º 3
0
void CSMMSeqPair::TrainPair(CSeqSet &set)
{
	if(clog_detail.back()>NONE)
	{
		clog << endl << "CPredSMM::TrainPair(" << set.GetName()<<")";
		clog << endl << "Elements in Set: " << set.ElementNumber();
		clog << endl << "Lambda Grouping\t";
		if(m_individual_lambdas)
			clog << "INDIVIDUAL_LAMBDAS";
		else 
			clog << "ONE_GROUP";
		clog << endl << "Min Count\t" << m_min_count;
		clog << endl << "Max Disagreement %\t" << m_max_percent_disagree;
	}

	assert(m_matrix.size()==CLetter::AlphabetSize()*set.SeqLength()+1);
	m_matrix_offset=m_matrix[0];

	m_pair_coef.clear();
	IdentifyPairCoef(set,m_min_count);

	Predict(set);
	assert(m_max_percent_disagree<=0.5);
	DropPairCoef(set,m_max_percent_disagree);

	unsigned count=m_pair_coef.size();
	if(m_pair_coef.size()>0)
	{
		GenerateLambdaGroups(m_individual_lambdas);
		set.ConvertToSMMSet(m_smm.TrainingSet(),false,m_pair_coef);
		m_smm.TrainRepeater();
		ConvertSolution();
	}
}
Exemplo n.º 4
0
	void Physics::Simulate()
	{
		for (auto itr = physics_objects_.begin(); itr != physics_objects_.end(); )
		{
			std::shared_ptr<Lame::GameObject> go = (*itr)->gameObject();
			if (go && !go->IsDestroying())
			{
				if ((*itr)->enabled())
				{
					Vector3 position, velocity;
					Predict(*itr, fixed_timestep_, position, velocity);
					go->transform().position(position);
					(*itr)->velocity(velocity);
				}

				++itr;
			}
			else
			{
				itr = physics_objects_.erase(itr);
			}
		}
		if (LameWorld::Exists())
		{
			LameWorld::Get().PhysicsUpdate(fixed_timestep_);
		}
	}
Exemplo n.º 5
0
	void Physics::Predict(std::shared_ptr<Physics3DComponent> i_comp, const float i_delta_time, Vector3& o_postion, Vector3& o_velocity) const
	{
		o_postion = i_comp->gameObject()->transform().position();
		o_velocity = i_comp->velocity();
		const Vector3 acceleration = i_comp->constant_acceleration() + gravity() * i_comp->gravity_multiplier();
		Predict(i_delta_time, o_postion, o_velocity, acceleration);
	}
Exemplo n.º 6
0
      // Compute the a posteriori estimate of the system state, as well as
      // the a posteriori estimate error variance. Version for
      // one-dimensional systems without control input on the system.
      //
      // @param phiValue          State transition gain.
      // @param processNoiseVariance    Process noise variance.
      // @param measurement       Measurement value.
      // @param measurementsGain  Measurements gain.
      // @param measurementsNoiseVariance   Measurements noise variance.
      //
      // @return
      //  0 if OK
      //  -1 if problems arose
      //
   int SimpleKalmanFilter::Compute( const double& phiValue,
                                    const double& processNoiseVariance,
                                    const double& measurement,
                                    const double& measurementsGain,
                                    const double& measurementsNoiseVariance )
      throw(InvalidSolver)
   {

      try
      {
         Predict( phiValue,
                  xhat(0),
                  processNoiseVariance );

         Correct( measurement,
                  measurementsGain,
                  measurementsNoiseVariance );
      }
      catch(InvalidSolver e)
      {
         GPSTK_THROW(e);
         return -1;
      }

      return 0;

   }  // End of method 'SimpleKalmanFilter::Compute()'
Exemplo n.º 7
0
	FloatT GradientBoostingForest::FitError()
	{
//		puts("FitError do");
		FloatT ret =0.0;
		assert(NULL != m_pInstancePool);
		FloatT sum_weight = 0.0;
		#pragma omp parallel for schedule(static) reduction(+:sum_weight)
		for(int i=0;i<m_pInstancePool->Size();i++)
		{
			sum_weight += m_pInstancePool->GetInstance(i).weight;
		}
		#pragma omp parallel for schedule(static) reduction(+:ret)
		for(int i=0;i<m_pInstancePool->Size();i++)
		{
			FloatT predict;
			if(0 != Predict(m_pInstancePool->GetInstance(i).X, predict))
			{
				Comm::LogErr("GradientBoostingForest::FitError fail! Predict fail");
			}
			ret = ret + ((m_pInstancePool->GetInstance(i).ys - predict) *
                    (m_pInstancePool->GetInstance(i).ys - predict)) * 
                m_pInstancePool->GetInstance(i).weight;
		}
//		puts("FitError done");
		return sqrt(ret / sum_weight);
	}
Exemplo n.º 8
0
      // Compute the a posteriori estimate of the system state, as well as
      // the a posteriori estimate error covariance matrix. This version
      // assumes that no control inputs act on the system.
      //
      // @param phiMatrix         State transition matrix.
      // @param processNoiseCovariance    Process noise covariance matrix.
      // @param measurements      Measurements vector.
      // @param measurementsMatrix    Measurements matrix. Called geometry
      //                              matrix in GNSS.
      // @param measurementsNoiseCovariance   Measurements noise covariance
      //                                      matrix.
      //
      // @return
      //  0 if OK
      //  -1 if problems arose
      //
   int SimpleKalmanFilter::Compute( const Matrix<double>& phiMatrix,
                                 const Matrix<double>& processNoiseCovariance,
                                    const Vector<double>& measurements,
                                    const Matrix<double>& measurementsMatrix,
                            const Matrix<double>& measurementsNoiseCovariance )
      throw(InvalidSolver)
   {

      try
      {
         Predict( phiMatrix,
                  xhat,
                  processNoiseCovariance );

         Correct( measurements,
                  measurementsMatrix,
                  measurementsNoiseCovariance );
      }
      catch(InvalidSolver e)
      {
         GPSTK_THROW(e);
         return -1;
      }

      return 0;

   }  // End of method 'SimpleKalmanFilter::Compute()'
Exemplo n.º 9
0
ValueType RegressionTree::Predict(const Node *root, const Tuple &t) {
  if (root->leaf) {
    return root->pred;
  }
  if (t.feature[root->index] == kUnknownValue) {
    if (root->child[Node::UNKNOWN]) {
      return Predict(root->child[Node::UNKNOWN], t);
    } else {
      return root->pred;
    }
  } else if (t.feature[root->index] < root->value) {
    return Predict(root->child[Node::LT], t);
  } else {
    return Predict(root->child[Node::GE], t);
  }
}
			/**
			\brief Run an iteration of the tracker loop.

			Predict and correct, adjusting precision and stepsize as necessary.

			\return Success if the step was successful, and a non-success code if something went wrong, such as a linear algebra failure or AMP Criterion violation.
			*/
			SuccessCode TrackerIteration() const override
			{
				static_assert(std::is_same<	typename Eigen::NumTraits<RT>::Real, 
			              				typename Eigen::NumTraits<CT>::Real>::value,
			              				"underlying complex type and the type for comparisons must match");

				this->NotifyObservers(NewStep<EmitterType >(*this));

				Vec<CT>& predicted_space = std::get<Vec<CT> >(this->temporary_space_); // this will be populated in the Predict step
				Vec<CT>& current_space = std::get<Vec<CT> >(this->current_space_); // the thing we ultimately wish to update
				CT current_time = CT(this->current_time_);
				CT delta_t = CT(this->delta_t_);

				SuccessCode predictor_code = Predict(predicted_space, current_space, current_time, delta_t);

				if (predictor_code!=SuccessCode::Success)
				{
					this->NotifyObservers(FirstStepPredictorMatrixSolveFailure<EmitterType >(*this));

					this->next_stepsize_ = this->stepping_config_.step_size_fail_factor*this->current_stepsize_;

					UpdateStepsize();

					return predictor_code;
				}

				this->NotifyObservers(SuccessfulPredict<EmitterType , CT>(*this, predicted_space));

				Vec<CT>& tentative_next_space = std::get<Vec<CT> >(this->tentative_space_); // this will be populated in the Correct step

				CT tentative_next_time = current_time + delta_t;

				SuccessCode corrector_code = Correct(tentative_next_space,
													 predicted_space,
													 tentative_next_time);

				if (corrector_code == SuccessCode::GoingToInfinity)
				{
					// there is no corrective action possible...
					return corrector_code;
				}
				else if (corrector_code!=SuccessCode::Success)
				{
					this->NotifyObservers(CorrectorMatrixSolveFailure<EmitterType >(*this));

					this->next_stepsize_ = this->stepping_config_.step_size_fail_factor*this->current_stepsize_;
					UpdateStepsize();

					return corrector_code;
				}

				
				this->NotifyObservers(SuccessfulCorrect<EmitterType , CT>(*this, tentative_next_space));

				// copy the tentative vector into the current space vector;
				current_space = tentative_next_space;
				return SuccessCode::Success;
			}
Exemplo n.º 11
0
	FloatT GradientBoostingForest::TestError()
	{
//		puts("TestError do");
		FloatT ret =0.0;
		if(NULL == m_pTestInstancePool)
		{
			Comm::LogErr("GradientBoostingForest::TestError fail m_pTestInstancePool is NULL");
			return -1;
		}
		FloatT sum_weight = 0.0;
		#pragma omp parallel for schedule(static) reduction(+:sum_weight)
		for(int i=0;i<m_pTestInstancePool->Size();i++)
		{
			sum_weight += m_pTestInstancePool->GetInstance(i).weight;
		}

		FloatT lim[8] = {0.5,0.6,0.7,0.8,0.9,0.95,0.97,0.99};
		int cnt[8] = {0,0,0,0,0,0,0,0};
		int tot[8] = {0,0,0,0,0,0,0,0};

		#pragma omp parallel for schedule(static) reduction(+:ret)
		for(int i=0;i<m_pTestInstancePool->Size();i++)
		{
			FloatT predict;
			if(0 != Predict(m_pTestInstancePool->GetInstance(i).X, predict))
			{
				Comm::LogErr("GradientBoostingForest::TestError fail! Predict fail!");
			}
			FloatT tmp = (m_pTestInstancePool->GetInstance(i).ys - predict);
			
	//		printf("%s predict:%f\n",m_pTestInstancePool->GetInstance(i).DebugStr().c_str(),predict);
			for(int j=0;j<8;j++)
			{
				if( predict >= lim[j])
				{
					if(m_pTestInstancePool->GetInstance(i).ys == 1)
					#pragma omp atomic
						cnt[j]++;
					#pragma omp atomic
					tot[j]++;
				}
			}

			tmp = tmp * tmp;	
			
			ret = ret + tmp * m_pTestInstancePool->GetInstance(i).weight;
		}
		ret = sqrt(ret / sum_weight);

		for(int i=0;i<8;i++)
		{
			printf("predict >= %f cnt = %d tot = %d cnt/tot = %f\n",lim[i], cnt[i], tot[i], cnt[i]*1.0/tot[i]);
		}
		//printf("cnt = %d tot = %d\n",cnt,tot);
//		puts("TestError done");
		return ret;
		
	}
Exemplo n.º 12
0
void Model::Predict( const char* datsetPath, const char* errDatsetPath )
{
	SampleSet samples, errorSample;
	samples.Read(datsetPath);
	
	Predict(samples, errorSample);
	
	if (errDatsetPath != NULL)
		errorSample.Write(errDatsetPath);
}
Exemplo n.º 13
0
int SignClassifier::PredictSingle(const Mat &image)
{
    vector<Mat> image_vec(1, image);
    vector<int> labels;
    if (!Predict(image_vec, &labels))
    {
        return -1;
    }
    return labels[0];
}
Exemplo n.º 14
0
int main(int argc, char* argv[])
{
    int wait;

    // Train the neural network  with the samples
    trainMachine();

    // Now try predicting some values with the trained network


    Predict(1.0,1.3);
    Predict(0.7,0.5);
    Predict(0.1,0.2);
    Predict(0.2,0.2);
    Predict(0.3,0.3);

    //I'll wait for an integer. :)
    scanf("%d",&wait);
    return 0;
}
Exemplo n.º 15
0
void GBDT::LogLossProcess(DataVector *d, size_t samples, int i) {
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
  for (size_t j = 0; j < samples; ++j) {
    ValueType p = Predict(*(*d)[j], i);
    (*d)[j]->target =
        static_cast<ValueType>(LogitLossGradient((*d)[j]->label, p));
  }

  if (g_conf.debug) {
    Auc auc;
    DataVector::iterator iter = d->begin();
    for ( ; iter != d->end(); ++iter) {
      ValueType p = Logit(Predict(**iter, i));
      auc.Add(p, (*iter)->label);
    }
    std::cout << "auc: " << auc.CalculateAuc() << std::endl;
  }
}
Exemplo n.º 16
0
extern void regtestNew(void)
{
  static const regSample Samples[] = {
    { 2.0f, 3.0f },
    { 3.0f, 6.7f },
    { 4.0f, 7.0f },
    { 5.0f, 8.0f },
    { 6.0f, 9.0f },
  };

  reg * pReg = regNew();

  regSamples(pReg, Samples, sizeof(Samples)/sizeof(Samples[0]));

  Predict(pReg, 5.0f);
  Predict(pReg, 6.0f);
  Predict(pReg, 7.0f);
  Predict(pReg, 8.0f);
  Predict(pReg, 9.0f);
}
/*!
    \fn CvFaceSVMClassifier::Training_error(CvMat * train_data, CvMat * labels) const
 */
CvScalar CvFaceSVMClassifier::Training_error(CvMat * train_data, CvMat * labels) const
{
  CvSize size = cvGetSize(labels);
  int nsamples = size.width * size.height;
  
  int numpositive = 0, numnegative = 0;
  for(int i = 0; i < nsamples; i++)
  {
    double value = cvGetReal1D( labels, i );
    if ( value == 1.0 )
      numpositive++;
    else if( value == 2.0 )
      numnegative++;
  }
  
  assert((numpositive+numnegative) == nsamples );
  
  size = cvGetSize( train_data );
  
  int nelements = size.height;
  
  CvMat * sample = cvCreateMat(1, nelements, CV_32FC1);
  
  int numerror = 0, numtruepositive = 0, numfalsepositive = 0;
  for(int i = 0; i < nsamples; i++)
  {
    cvGetRow( train_data, sample, i );
    double pre_label = Predict( sample );
    double label = cvGetReal1D( labels, i );
    if((pre_label == 1.0)&&(label == 1.0))
    {
      numtruepositive++;
    }
    if(pre_label != label)
    {
      if((pre_label == 1.0)&&(label == 2.0))
      {
        numfalsepositive++;
      }
      //printf("%d   ", i);
      numerror++;
    }
  }
  printf("\n\n");
  double error = (double)numerror/(double)nsamples;
  double tp_rate = (double)numtruepositive/(double)numpositive;
  double fp_rate = (double)numfalsepositive/(double)numnegative;
  
  cvReleaseMat(&sample);
  
  CvScalar scalar = cvScalar(error, tp_rate, fp_rate, 0);
  
  return scalar;
}
/*!
    \fn CvFaceSVMClassifier::Training_error(CvGaborResponseData & gabordata, CvGaborFeaturePool & new_features) const
 */
CvScalar CvFaceSVMClassifier::Training_error(CvGaborResponseData & gabordata, CvGaborFeaturePool & new_features) const
{
  CvMat * train_data = GetDataFromFeatures( gabordata, new_features );
  CvMat * labels = GetLabelsFromFeatures( gabordata, new_features );
   
  CvSize size = cvGetSize(labels);
  int nsamples = size.width * size.height;
  
  int numpositive = 0, numnegative = 0;
  for(int i = 0; i < nsamples; i++)
  {
    double value = cvGetReal1D( labels, i );
    if ( value == 1.0 )
      numpositive++;
    else if( value == 2.0 )
      numnegative++;
  }
  
  assert((numpositive+numnegative) == nsamples );
  
  int nelements = new_features.getSize();
  
  CvMat * sample = cvCreateMat(1, nelements, CV_32FC1);
  
  int numerror = 0, numtruepositive = 0, numfalsepositive = 0;
  for(int i = 0; i < nsamples; i++)
  {
    cvGetRow( train_data, sample, i );
    double pre_label = Predict( sample );
    double label = cvGetReal1D( labels, i );
    if((pre_label == 1.0)&&(label == 1.0))
    {
      numtruepositive++;
    }
    if(pre_label != label)
    {
      if((pre_label == 1.0)&&(label == 2.0))
        numfalsepositive++;
      numerror++;
    }
  }
  
  double error = (double)numerror/(double)nsamples;
  double tp_rate = (double)numtruepositive/(double)numpositive;
  double fp_rate = (double)numfalsepositive/(double)numnegative;
  
  cvReleaseMat(&sample);
  cvReleaseMat(&train_data);
  cvReleaseMat(&labels);
  
  CvScalar scalar = cvScalar(error, tp_rate, fp_rate, 0);
  
  return scalar;
}
Exemplo n.º 19
0
float NeuralNets::_validate(int start, int end){
    // all shared data is read-only, so there is no need to lock
    float res = 0.0 ;
    for (int i = start; i < end; i++) {
        Eigen::ArrayXf y1 = _data_matrix.row(i).array();
        int predict = Predict(y1);
        if (predict == _labels[i]) {
            res += 1;
        }
    }
    return res / (end-start + 1);
}
Exemplo n.º 20
0
/* Return the top N predictions. */
std::vector<Prediction> Deep_Classifier::Classify(const cv::Mat& img, int N) {
    std::vector<float> output = Predict(img);

    std::vector<int> maxN = Argmax(output, N);
    std::vector<Prediction> predictions;
    for (int i = 0; i < N; ++i) {
        int idx = maxN[i];
        predictions.push_back(std::make_pair(labels_[idx], output[idx]));
    }

    return predictions;
}
Exemplo n.º 21
0
void GBDT::SquareLossProcess(DataVector *d, size_t samples, int i) {
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
  for (size_t j = 0; j < samples; ++j) {
    ValueType p = Predict(*(*d)[j], i);
    (*d)[j]->target = (*d)[j]->label - p;
  }

  if (g_conf.debug) {
    double s = 0;
    double c = 0;
    DataVector::iterator iter = d->begin();
    for ( ; iter != d->end(); ++iter) {
      ValueType p = Predict(**iter, i);
      s += Squared((*iter)->label - p) * (*iter)->weight;
      c += (*iter)->weight;
    }
    std::cout << "rmse: " << std::sqrt(s / c) << std::endl;
  }
}
Exemplo n.º 22
0
void GBDT::LADLossProcess(DataVector *d, size_t samples, int i) {
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
  for (size_t j = 0; j < samples; ++j) {
    ValueType p = Predict(*(*d)[j], i);
    (*d)[j]->residual = (*d)[j]->label - p;
    (*d)[j]->target = Sign((*d)[j]->residual);
  }

  if (g_conf.debug) {
    double s = 0;
    double c = 0;
    for (auto iter = d->begin(); iter != d->end(); ++iter) {
      ValueType p = Predict(**iter, i);
      s += Abs((*iter)->label - p) * (*iter)->weight;
      c += (*iter)->weight;
    }
    std::cout << "mae: " << s/c << std::endl;
  }
}
Exemplo n.º 23
0
int main(int argc, char *argv[])
{
	MultiVariateSet *setVariate = NULL;
	int i;
	float* x;
	setVariate = CreateMultiVariateSet("data.dat");
	PrintMultiVariateSet(setVariate);
	printf("\n");

	printf("Covariance = %6.3f\n", ComputeCovariance(setVariate));
	printf("Correlation = %6.3f\n", ComputeCorrelation(setVariate));

	x = setVariate->elem[0];
	for (i = 0; i < setVariate->cols; i++,x++)
	{
		printf("Predict value : %6.3f", Predict(setVariate, *x));
		printf(" ei = %6.3f\n", *(setVariate->elem[1] + i) - Predict(setVariate, *x));
	}
	DestoryMultiVariateSet(setVariate);
	return 0;
}
Exemplo n.º 24
0
int testing(LeNet5 *lenet, image *test_data, uint8 *test_label,int total_size)
{
	int right = 0, percent = 0;
	for (int i = 0; i < total_size; ++i)
	{
		uint8 l = test_label[i];
		int p = Predict(lenet, test_data[i], 10);
		right += l == p;
		if (i * 100 / total_size > percent)
			printf("test:%2d%%\n", percent = i * 100 / total_size);
	}
	return right;
}
Exemplo n.º 25
0
/* Return the top N predictions. */
std::vector<Prediction> Classifier::Classify(const cv::Mat& img, int N) {
	std::vector<float> output = Predict(img);

	std::vector<int> maxN = Argmax(output, std::min((float)N, (float)output.size()));
	std::vector<Prediction> predictions;
	for (int i = 0; i < std::min((float)N, (float)output.size()); ++i) {
		int idx = maxN[i];
		//predictions.push_back(std::make_pair(labels_[idx], output[idx]));
		predictions.push_back(std::make_pair(idx, output[idx]));
	}

	return predictions;
}
Exemplo n.º 26
0
REAL CMetaModel::Test( const REAL* prInputs, const REAL* prOutputs, int count )
{
	vector<REAL> vcOutputs( GetOutputs() );

	//calculate the error function under the current weight
	REAL rError = 0;
	for( int i=0; i<count; i++ ){
		Predict( prInputs + i*GetInputs(), &vcOutputs[0] );
		rError += inner_product( vcOutputs.begin(), vcOutputs.end(), prOutputs, 0.0,
			plus<REAL>(), minus_pow<REAL>(2.0) ) / GetOutputs();
	}
	return rError/count;
}
Exemplo n.º 27
0
void TRecLinReg::Learn(const TFltV& Sample, const double& SampleVal) {
    double PredVal = Predict(Sample);

    TVector x(Sample);

    TVector Px = P * x;
	double xPx = Px.DotProduct(Sample);

	/*
	 * linreg.P = (linreg.P - (Px * Px') / (linreg.lambda + xPx)) / linreg.lambda;
	 * linreg.w = linreg.w + Px*((y - y_hat)/(linreg.lambda + xPx));
	 */
	P = (P - (Px*Px.GetT()) / (ForgetFact + xPx)) / ForgetFact;
	Coeffs += Px*((SampleVal - PredVal) / (ForgetFact + xPx));
}
Exemplo n.º 28
0
	int GradientBoostingForest::BatchPredict(InstancePool * pInstancepool, std::vector<FloatT> &vecPredict)
	{
		for(int i = 0; i < pInstancepool->Size(); i++)
		{
			FloatT predict;
			int ret = Predict(pInstancepool->GetInstance(i).X, predict);
			if(ret != 0)
			{
				Comm::LogErr("GradientBoostingForest::BatchPredict fail i = %d  Instance = %s", i, pInstancepool->GetInstance(i).DebugStr().c_str());
				return ret;
			}
			vecPredict.push_back(predict);
		}
		return 0;
	}
Exemplo n.º 29
0
int main(int argc, char const *argv[])
{
	printf("请输入年份\n");
	scanf("%d",&year);
	printf("输入月份\n");
	scanf("%d",&month);
	printf("输入日期\n");
	scanf("%d",&day);

  if(Check())
    Predict();
  else
  	printf("Date illegal!\n");
	return 0;
}
Exemplo n.º 30
0
Arquivo: step.hpp Projeto: aliddell/b2
		SuccessCode Step(config::Predictor predictor_choice,
		                    Vec<ComplexType> & next_space, ComplexType & next_time,
				               System & sys,
				               Vec<ComplexType> const& current_space, ComplexType current_time, 
				               ComplexType const& delta_t,
				               RealType & condition_number_estimate,
				               unsigned & num_steps_since_last_condition_number_computation, 
				               unsigned frequency_of_CN_estimation, PrecisionType prec_type, 
				               RealType const& tracking_tolerance,
				               RealType const& path_truncation_threshold,
				               unsigned min_num_newton_iterations,
				               unsigned max_num_newton_iterations,
				               config::AdaptiveMultiplePrecisionConfig const& AMP_config)
		{

			SuccessCode predictor_code = Predict(next_space,
							               		sys,
							               		current_space, current_time, 
							               		delta_t,
							               		condition_number_estimate,
							               		num_steps_since_last_condition_number_computation, 
							               		frequency_of_CN_estimation, prec_type, 
							               		tracking_tolerance,
							               		AMP_config);

			if (predictor_code!=SuccessCode::Success)
				return predictor_code;

			next_time = current_time + delta_t;

			SuccessCode corrector_code = Correct(next_space,
								               sys,
								               current_space, // pass by value to get a copy of it
								               next_time, 
								               prec_type, 
								               tracking_tolerance,
								               path_truncation_threshold,
								               min_num_newton_iterations,
								               max_num_newton_iterations,
								               AMP_config);


			if (corrector_code!=SuccessCode::Success)
				return corrector_code;

			return SuccessCode::Success;
		}