AlphaReal SingleStumpLearner::run( int colIdx )
	{
		const int numClasses = _pTrainingData->getNumClasses();
		const int numColumns = _pTrainingData->getNumAttributes();
		
		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( 1.0 / (AlphaReal)_pTrainingData->getNumExamples() * 0.01 );
		
		vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions
		
		AlphaReal tmpAlpha;
		
		AlphaReal bestEnergy = numeric_limits<AlphaReal>::max();
		
		StumpAlgorithm<FeatureReal> sAlgo(numClasses);
		sAlgo.initSearchLoop(_pTrainingData);
		
		AlphaReal halfTheta;
		if ( _abstention == ABST_REAL || _abstention == ABST_CLASSWISE )
			halfTheta = _theta/2.0;
		else
			halfTheta = 0;
		
		int numOfDimensions = _maxNumOfDimensions;
		
		
		const pair<vpIterator,vpIterator> dataBeginEnd = 
		static_cast<SortedData*>(_pTrainingData)->getFileteredBeginEnd( colIdx );
		
		
		const vpIterator dataBegin = dataBeginEnd.first;
		const vpIterator dataEnd = dataBeginEnd.second;
		
		// also sets mu, tmpV, and bestHalfEdge
		_threshold = sAlgo.findSingleThresholdWithInit(dataBegin, dataEnd, _pTrainingData, 
													   halfTheta, &mu, &tmpV);
		
		bestEnergy = getEnergy(mu, tmpAlpha, tmpV);
		
		_alpha = tmpAlpha;
		_v = tmpV;
		_selectedColumn = colIdx;
		
		if ( _selectedColumn != -1 )
		{
			stringstream thresholdString;
			thresholdString << _threshold;
			_id = _pTrainingData->getAttributeNameMap().getNameFromIdx(_selectedColumn) + thresholdString.str();	
		} else {
			bestEnergy = numeric_limits<float>::signaling_NaN();
		}
		
		return bestEnergy;
		
	}
Esempio n. 2
0
void Bullet::reset(const Vector2f & pos, const Vector2f & vel) {
    
    setPosition(pos);
    Vector2f tmpV(V);
    if (vel[0]> 0) {
        tmpV[0] = tmpV[0] +vel[0];
    }
    setVelocity(V);
}
void SlidingReducedOrderObserver::process()
{
  DEBUG_BEGIN("void SlidingReducedOrderObserver::process()\n");
  if (!_pass)
  {
    DEBUG_PRINT("First pass \n ");
    _pass = true;
    //update the estimate using the first value of y, such that C\hat{x}_0 = y_0
    const SiconosVector& y = _sensor->y();
    _e->zero();
    prod(*_C, *_xHat, *_e);
    *_e -= y;

    SiconosVector tmpV(_DS->n());
    SimpleMatrix tmpC(*_C);
    for (unsigned int i = 0; i < _e->size(); ++i)
      tmpV(i) = (*_e)(i);

    tmpC.SolveByLeastSquares(tmpV);
    *(_xHat) -= tmpV;
    *(_DS->x()) -= tmpV;
    _DS->initMemory(1);
    _DS->swapInMemory();
    DEBUG_EXPR(_DS->display(););
Esempio n. 4
0
float Vector::angle(const Vector &V) 
{//return value should be in [0,PI], if -1, means error
  double t1 = length();
  Vector tmpV(V);
  double t2 = tmpV.length();
  
  if ((fabs(t1)<ZERO) ||(fabs(t2)<ZERO)) {
	//printf("\n Vector undefined in Vector::angle(Vector)");//---need to be restored
        //if (t1 == 0) outputs();
	return -1.0f; 
  }
  float alpha = dotProduct(tmpV)/t1/t2;
  //alpha = facos (alpha);
  alpha = acos (alpha);
  return alpha;
}
Esempio n. 5
0
AlphaReal ConstantLearner::run()
{

   const int numClasses = _pTrainingData->getNumClasses();
   //const int numColumns = _pTrainingData->getNumColumns();

   // set the smoothing value to avoid numerical problem
   // when theta=0.
   setSmoothingVal( 1.0 / (AlphaReal)_pTrainingData->getNumExamples() * 0.01 );

   vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
   vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions

   ConstantAlgorithm cAlgo;
   cAlgo.findConstant(_pTrainingData,&mu,&tmpV);
   
   _v = tmpV;

   return getEnergy(mu, _alpha, _v);
}
Esempio n. 6
0
    void calcAndResetEmissions(SeqData const & seqData, Grammar & g, EmitWrappers & ew, EmitAnnoMap & eam, string const & annoName)
    {
      // mk emissions
      unsigned n = seqData.seqSize();
      xvector_t tmpV(n);
      reset(tmpV);
      vector<xvector_t> vv(ew.vecWrapNames().size(), tmpV);
      ew.vectorEmissions(vv, seqData);
      
      xmatrix_t tmpM(n, n);
      reset(tmpM);
      vector<xmatrix_t> vm(ew.matWrapNames().size(), tmpM);
      ew.matrixEmissions(vm, seqData);
      
      // mask according to anno
      if ( eam.size() and annoName.size() ) {
	string const & anno = seqData.getAnno(annoName);
	maskEmissions(vv, ew.vecWrapNames(), anno, eam);
	maskEmissions(vm, ew.matWrapNames(), anno, eam);
      }
      
      g.resetEmissions(vv, vm);
    }
Esempio n. 7
0
float UCBVHaarSingleStumpLearner::run()
{
    if ( UCBVHaarSingleStumpLearner::_numOfCalling == 0 ) {
        init();
    }

    UCBVHaarSingleStumpLearner::_numOfCalling++;
    //cout << "Num of iter:\t" << UCBVHaarSingleStumpLearner::_numOfCalling << " " << this->getTthSeriesElement( UCBVHaarSingleStumpLearner::_numOfCalling ) << flush << endl;
    const int numClasses = _pTrainingData->getNumClasses();

    // set the smoothing value to avoid numerical problem
    // when theta=0.
    setSmoothingVal( 1.0 / (float)_pTrainingData->getNumExamples() * 0.01 );

    vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
    vector<float> tmpV(numClasses); // The class-wise votes/abstentions

    float tmpThreshold;
    float tmpAlpha;

    float bestEnergy = numeric_limits<float>::max();
    float tmpEnergy;

    HaarData* pHaarData = static_cast<HaarData*>(_pTrainingData);

    // get the whole data matrix
    //const vector<int*>& intImages = pHaarData->getIntImageVector();

    // The data matrix transformed into the feature's space
    vector< pair<int, float> > processedHaarData(_pTrainingData->getNumExamples());

    // I need to prepare both type of sampling

    StumpAlgorithm<float> sAlgo(numClasses);
    sAlgo.initSearchLoop(_pTrainingData);

    float halfTheta;
    if ( _abstention == ABST_REAL || _abstention == ABST_CLASSWISE )
        halfTheta = _theta/2.0;
    else
        halfTheta = 0;

    // The declared features types
    vector<HaarFeature*>& loadedFeatures = pHaarData->getLoadedFeatures();

    // for every feature type
    vector<HaarFeature*>::iterator ftIt;
    //vector<HaarFeature*>::iterator maxftIt;

    vector<float> maxV( loadedFeatures.size() );
    vector<int> maxKey( loadedFeatures.size() );
    vector<int> maxNum( loadedFeatures.size() );

    //claculate the Bk,s,t of the randomly chosen features
    int key = getKeyOfMaximalElement();
    int featureIdx = (int) (key / 10);
    int featureType = (key % 10);

    //for (i = 0, ftIt = loadedFeatures.begin(); ftIt != loadedFeatures.end(); i++ ++ftIt)
    //*ftIt = loadedFeatures[ featureType ];

    // just for readability
    //HaarFeature* pCurrFeature = *ftIt;
    HaarFeature* pCurrFeature = loadedFeatures[ featureType ];
    if (_samplingType != ST_NO_SAMPLING)
        pCurrFeature->setAccessType(AT_RANDOM_SAMPLING);

    // Reset the iterator on the configurations. For random sampling
    // this clear the visited list
    pCurrFeature->loadConfigByNum( featureIdx );


    if (_verbose > 1)
        cout << "Learning type " << pCurrFeature->getName() << ".." << flush;

    // transform the data from intImages to the feature's space
    pCurrFeature->fillHaarData( _pTrainingData->getExamples(), processedHaarData );
    //pCurrFeature->fillHaarData(intImages, processedHaarData);

    // sort the examples in the new space by their coordinate
    sort( processedHaarData.begin(), processedHaarData.end(),
          nor_utils::comparePair<2, int, float, less<float> >() );

    // find the optimal threshold
    tmpThreshold = sAlgo.findSingleThresholdWithInit(processedHaarData.begin(),
                   processedHaarData.end(),
                   _pTrainingData, halfTheta, &mu, &tmpV);

    tmpEnergy = getEnergy(mu, tmpAlpha, tmpV);


    // Store it in the current weak hypothesis.
    // note: I don't really like having so many temp variables
    // but the alternative would be a structure, which would need
    // to be inheritable to make things more consistent. But this would
    // make it less flexible. Therefore, I am still undecided. This
    // might change!
    _alpha = tmpAlpha;
    _v = tmpV;

    // I need to save the configuration because it changes within the object
    _selectedConfig = pCurrFeature->getCurrentConfig();
    // I save the object because it contains the informations about the type,
    // the name, etc..
    _pSelectedFeature = pCurrFeature;
    _threshold = tmpThreshold;

    bestEnergy = tmpEnergy;

    float edge = 0.0;
    for( vector<sRates>::iterator itR = mu.begin(); itR != mu.end(); itR++ ) edge += ( itR->rPls - itR->rMin );
    //need to set the X value
    updateKeys( key, edge * edge );

    if (!_pSelectedFeature)
    {
        cerr << "ERROR: No Haar Feature found. Something must be wrong!" << endl;
        exit(1);
    }
    else
    {
        if (_verbose > 1)
            cout << "Selected type: " << _pSelectedFeature->getName() << endl;
    }

    return bestEnergy;
}
	AlphaReal SingleStumpLearner::run()
	{
		const int numClasses = _pTrainingData->getNumClasses();
		const int numColumns = _pTrainingData->getNumAttributes();
		
		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( 1.0 / (AlphaReal)_pTrainingData->getNumExamples() * 0.01 );
		
		vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions
		
		FeatureReal tmpThreshold;
		AlphaReal tmpAlpha;
		
		AlphaReal bestEnergy = numeric_limits<AlphaReal>::max();
		AlphaReal tmpEnergy;
		
		StumpAlgorithm<FeatureReal> sAlgo(numClasses);
		sAlgo.initSearchLoop(_pTrainingData);
		
		AlphaReal halfTheta;
		if ( _abstention == ABST_REAL || _abstention == ABST_CLASSWISE )
			halfTheta = _theta/2.0;
		else
			halfTheta = 0;
		
		int numOfDimensions = _maxNumOfDimensions;
		for (int j = 0; j < numColumns; ++j)
		{
			// Tricky way to select numOfDimensions columns randomly out of numColumns
			int rest = numColumns - j;
			float r = rand()/static_cast<float>(RAND_MAX);
			
			if ( static_cast<float>(numOfDimensions) / rest > r ) 
			{
				--numOfDimensions;
				//if ( static_cast<SortedData*>(_pTrainingData)->isAttributeEmpty( j ) ) continue;
				
				const pair<vpIterator,vpIterator> dataBeginEnd = 
				static_cast<SortedData*>(_pTrainingData)->getFileteredBeginEnd(j);
				
				
				const vpIterator dataBegin = dataBeginEnd.first;
				const vpIterator dataEnd = dataBeginEnd.second;
				
				
				
				// also sets mu, tmpV, and bestHalfEdge
				tmpThreshold = sAlgo.findSingleThresholdWithInit(dataBegin, dataEnd, _pTrainingData, 
																 halfTheta, &mu, &tmpV);
				
				if (tmpThreshold == tmpThreshold) // tricky way to test Nan
				{ 
					// small inconsistency compared to the standard algo (but a good
					// trade-off): in findThreshold we maximize the edge (suboptimal but
					// fast) but here (among dimensions) we minimize the energy.
					tmpEnergy = getEnergy(mu, tmpAlpha, tmpV);
					
					if (tmpEnergy < bestEnergy && tmpAlpha > 0)
					{
						// Store it in the current weak hypothesis.
						// note: I don't really like having so many temp variables
						// but the alternative would be a structure, which would need
						// to be inheritable to make things more consistent. But this would
						// make it less flexible. Therefore, I am still undecided. This
						// might change!
						
						_alpha = tmpAlpha;
						_v = tmpV;
						_selectedColumn = j;
						_threshold = tmpThreshold;
						
						bestEnergy = tmpEnergy;
					}
				} // tmpThreshold == tmpThreshold
			}
		}
		
		if ( _selectedColumn != -1 )
		{
			stringstream thresholdString;
			thresholdString << _threshold;
			_id = _pTrainingData->getAttributeNameMap().getNameFromIdx(_selectedColumn) + thresholdString.str();	
		} else {
			bestEnergy = numeric_limits<AlphaReal>::signaling_NaN();
		}
		
		return bestEnergy;
		
	}
Esempio n. 9
0
void SolveSLE(const Matrix &A, Matrix &Z, const Matrix &b, float initialZ)
{
	int aCols = A.GetColsCount();
	int aRows = A.GetRowsCount();
	int zCols = Z.GetColsCount();
	int zRows = Z.GetRowsCount();
	int bCols = b.GetColsCount();
	int bRows = b.GetRowsCount();

	if (aCols != aRows || zCols != 1 || bCols != 1 || zRows != bRows || zRows != aCols)
		throw("SLE solver: This is not a SLE!\n");
	
	printf(" Solving SLE ... \n");
	clock_t time = clock();

	int m = aCols;
	float normb = b.Norm();

	Matrix rk(m, 1);
	Matrix pk(m, 1);
	Matrix Ark(m, 1);
	Matrix Apk(m, 1);
	Matrix zp(m, 1);
	Matrix zpp(m, 1);
	Matrix tmpV(m, 1);

	zpp.Fill(initialZ);
	pk.Fill(0.0f);
	rk.Fill(0.0f);
	Ark.Fill(0.0f);
	Apk.Fill(0.0f);
	zp.Fill(0.0f);
	tmpV.Fill(0.0f);

	rk = A * zpp - b;
	float normr = rk.Norm();

	Ark = A * rk;
	float d = Ark.Dot(rk);

	zp = zpp - rk * (normr * normr / d);

	
	int flag = 1;
	int iterations = 1;
	
	while (flag == 1)
	{
		rk = A * zp - b;
		normr = rk.Norm();

		pk = zp - zpp;

		Ark = A * rk;
		Apk = A * pk;

		float dot1 = Ark.Dot(pk);
		float dot2 = rk.Dot(pk);
		float dot3 = Ark.Dot(rk);
		float dot4 = Apk.Dot(pk);
		d = dot3 * dot4 - dot1 * dot1;

		float gamma = ((normr * normr) * dot4 - dot2 * dot1) / d;
		float beta = ((normr * normr) * dot1 - dot2 * dot3) / d;
			
		zpp = zp;
		zp -= rk * gamma - pk * beta; 

		tmpV = A * zp - b;
		double norm = tmpV.Norm();
			
		double error = norm / normb;
		
		printf("   Iteration:%d\terror:%f\n", iterations, error);
			
		if (error < 0.0001)
			flag = 0;

		iterations++;
	}
	
	printf(" SLE solved with %d iterations for %f\n", iterations, (double)(clock() - time) / (CLOCKS_PER_SEC * 60));

	Z = zp;
	
	return;
}
Esempio n. 10
0
void TScene::DrawRadarLabel(Vector pos)
{
	// Draw Radar Slots
	glMatrixMode(GL_MODELVIEW);
	Vector col(0.25f,0.25f,0);
//	DrawLine2D(0.3f,0,0.7f,0,Vector(1,1,0));
	DrawLine2D(0.7f,0,0.7f,0.3f,col);
	DrawLine2D(0.7f,0.3f,0.3f,0.3f,col);
	DrawLine2D(0.3f,0.3f,0.3f,0,col);

	DrawLine2D(0.3f,1,0.3f,0.7f,col);
	DrawLine2D(0.3f,0.7f,0.7f,0.7f,col);
	DrawLine2D(0.7f,0.7f,0.7f,1,col);

	Vector lskPos(0,0,0),lskPos2(0,0,0),tmpV(0,0,0);
	float tmp=0;
	OGLMath_VectorMatrixMultiply( lskPos, pos, Camera->CameraMatrix);
	lskPos2=lskPos;
/*
	float mmodel[16]={0}; 
    gluLookAt(0,0,0,dir.x,dir.y,dir.z,up.x,up.y,up.z);
//    glPushMatrix();
    glGetFloatv(GL_MODELVIEW_MATRIX, mmodel);
//    glPopMatrix();
    glLoadIdentity();
    glTranslatef(pos.x, pos.y, pos.z);
    glMultTransposeMatrixf(mmodel);*/
	//Camera->Apply();
	float l=2000.0f-GetVectorLength(Camera->GetPosition()-pos);
	lskPos.x=lskPos.x/(3000.0f-l)+0.5f+0.35f;
	lskPos.y=lskPos.y/(3000.0f-l);//+0.5f;
	lskPos.z=-lskPos.z/(3000.0f-l)+0.5f-0.35f;
	if (lskPos.y>0.3f) lskPos.y=0.3f;
	if (lskPos.y<-0.3f) lskPos.y=-0.3f;
	DrawLine2D(lskPos.x, lskPos.z, lskPos.x, lskPos.z+lskPos.y, Vector(1,1,1));
	DrawLine2D(lskPos.x, lskPos.z+lskPos.y, lskPos.x+0.01f, lskPos.z+lskPos.y, Vector(1,1,1));
	DrawLine2D(0.85-0.01f,0.15,0.85+0.01f,0.15,Vector(1,1,1));
	DrawLine2D(0.85,0.15-0.01f,0.85,0.15+0.01f,Vector(1,1,1));

// Draw Label of Sphere Radar
	float mmodel[16]={0}; 
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
    gluLookAt(0,0,10,0,2.3f,1,0,1,0);
    glGetFloatv(GL_MODELVIEW_MATRIX, mmodel);

	lskPos2=NormalizeVector(lskPos2)*1.2f;
	
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	glMultMatrixf(ProjectionMatrix);
	glMultMatrixf(mmodel);
	glMatrixMode(GL_MODELVIEW);
//	tmpV=Camera->GetPosition();
//	DrawLine(Vector(tmpV.x,tmpV.y,tmpV.z-1.0f),Vector(tmpV.x+50.0f,tmpV.y+50.0f,tmpV.z-100.0f), Vector(1,1,1));
//	DrawLine(Vector(0,0,0),Vector(100,100,-100),Vector(1,1,1));
	DrawLine(Vector(0,0,0),lskPos2,Vector(1,1,1));
//	glBlendFunc(GL_SRC_ALPHA,GL_NONE);					// Select The Type Of Blending
//	glEnable(GL_COLOR_MATERIAL);
//	glDisable(GL_LIGHTING);
//	glDisable(GL_LIGHT0);
	RenderPlanet(0.25f,Vector(0,0,0),6);
//	glEnable(GL_LIGHTING);
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	glMultMatrixf(ProjectionMatrix);
	glMultMatrixf(Camera->CameraMatrix);
	
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
}
Esempio n. 11
0
	AlphaReal HaarMultiStumpLearner::run()
	{
		const int numClasses = _pTrainingData->getNumClasses();
		
		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( 1.0 / (AlphaReal)_pTrainingData->getNumExamples() * 0.01 );
		
		vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions
		
		vector<FeatureReal> tmpThresholds(numClasses);
		AlphaReal tmpAlpha;
		
		AlphaReal bestEnergy = numeric_limits<AlphaReal>::max();
		AlphaReal tmpEnergy;
		
		HaarData* pHaarData = static_cast<HaarData*>(_pTrainingData);
		
		// get the whole data matrix
		//   const vector<int*>& intImages = pHaarData->getIntImageVector();
		
		// The data matrix transformed into the feature's space
		vector< pair<int, FeatureReal> > processedHaarData(_pTrainingData->getNumExamples());
		
		// I need to prepare both type of sampling
		int numConf; // for ST_NUM
		time_t startTime, currentTime; // for ST_TIME
		
		long numProcessed;
		bool quitConfiguration;
		
		StumpAlgorithm<FeatureReal> sAlgo(numClasses);
		sAlgo.initSearchLoop(_pTrainingData);
		
		// The declared features types
		vector<HaarFeature*>& loadedFeatures = pHaarData->getLoadedFeatures();
		
		// for every feature type
		vector<HaarFeature*>::iterator ftIt;
		for (ftIt = loadedFeatures.begin(); ftIt != loadedFeatures.end(); ++ftIt)
		{
			// just for readability
			HaarFeature* pCurrFeature = *ftIt;
			if (_samplingType != ST_NO_SAMPLING)
				pCurrFeature->setAccessType(AT_RANDOM_SAMPLING);
			
			// Reset the iterator on the configurations. For random sampling
			// this shuffles the configurations.
			pCurrFeature->resetConfigIterator();
			quitConfiguration = false;
			numProcessed = 0;
			
			numConf = 0;
			time( &startTime );
			
			if (_verbose > 1)
				cout << "Learning type " << pCurrFeature->getName() << ".." << flush;
			
			// While there is a configuration available
			while ( pCurrFeature->hasConfigs() ) 
			{
				// transform the data from intImages to the feature's space
				pCurrFeature->fillHaarData(_pTrainingData->getExamples(), processedHaarData);
				// sort the examples in the new space by their coordinate
				sort( processedHaarData.begin(), processedHaarData.end(), 
					 nor_utils::comparePair<2, int, float, less<float> >() );
				
				// find the optimal threshold
				sAlgo.findMultiThresholdsWithInit(processedHaarData.begin(), processedHaarData.end(), 
												  _pTrainingData, tmpThresholds, &mu, &tmpV);
				
				tmpEnergy = getEnergy(mu, tmpAlpha, tmpV);
				++numProcessed;
				
				if (tmpEnergy < bestEnergy)
				{
					// Store it in the current weak hypothesis.
					// note: I don't really like having so many temp variables
					// but the alternative would be a structure, which would need
					// to be inheritable to make things more consistent. But this would
					// make it less flexible. Therefore, I am still undecided. This
					// might change!
					_alpha = tmpAlpha;
					_v = tmpV;
					
					// I need to save the configuration because it changes within the object
					_selectedConfig = pCurrFeature->getCurrentConfig();
					// I save the object because it contains the informations about the type,
					// the name, etc..
					_pSelectedFeature = pCurrFeature;
					_thresholds = tmpThresholds;
					
					bestEnergy = tmpEnergy;
				}
				
				// Move to the next configuration
				pCurrFeature->moveToNextConfig();
				
				// check stopping criterion for random configurations
				switch (_samplingType)
				{
					case ST_NUM:
						++numConf;
						if (numConf >= _samplingVal)
							quitConfiguration = true;
						break;
					case ST_TIME:            
					{
						time( &currentTime );
						float diff = difftime(currentTime, startTime); // difftime is in seconds
						if (diff >= _samplingVal)
							quitConfiguration = true;
					}
						break;
					case ST_NO_SAMPLING:
						perror("ERROR: st no sampling... not sure what this means");
						
						break;
						
				} // end switch
				
				if (quitConfiguration)
					break;
				
			} // end while
			
			if (_verbose > 1)
			{
				time( &currentTime );
				float diff = difftime(currentTime, startTime); // difftime is in seconds
				
				cout << "done! "
				<< "(processed: " << numProcessed
				<< " - elapsed: " << diff << " sec)" 
				<< endl;
			}
			
		}
		
		if (!_pSelectedFeature)
		{
			cerr << "ERROR: No Haar Feature found. Something must be wrong!" << endl;
			exit(1);
		}
		else
		{
			if (_verbose > 1)
				cout << "Selected type: " << _pSelectedFeature->getName() << endl;
		}
		
		return bestEnergy;
	}
Esempio n. 12
0
void AlgFilter::ComFog::apply()
{
	cv::Mat* mat = image.GetMat();
	unsigned int width = mat->cols;
	unsigned int height = mat->rows;
	unsigned int channels = mat->channels();
	cv::Mat tmpMat;
	mat->copyTo(tmpMat);

    int randNum, randDirection, m, n;

	std::vector<cv::Mat> v(channels);
	cv::split(*mat, v);
	std::vector<cv::Mat> tmpV(channels);
	cv::split(*mat, tmpV);

    for (unsigned int k = 0; k < channels; k++)
	{
        for (unsigned int j = 0; j < height; j++)
		{	
            for (unsigned int i = 0; i < width; i++)
			{
				randNum = rand() % randRange;
				randDirection = rand() % 4;
				
				switch (randDirection)
				{
				case 0:		
					m = i + randNum;
					n = j + randNum;
					break;

				case 1:
					m = i - randNum;
					n = j - randNum;
					break;

				case 2:
					m = i + randNum;
					n = j - randNum;
					break;

				case 3:
					m = i - randNum;
					n = j + randNum;
					break;

				default:
					break;
				}

                if ((unsigned int)m >= width)
				{
					m = width - 1;
				}
                if ((unsigned int)n >= height)
				{
					n = height - 1;
				}
				if (m < 0)
				{
					m = 0;
				}
				if (n < 0)
				{
					n = 0;
				}

				v[k].at<uchar>(j, i) = tmpV[k].at<uchar>(n, m);
			}
		}
	}

	cv::merge(v, *mat);
}
Esempio n. 13
0
	AlphaReal BanditSingleSparseStump::run()
	{

		if ( ! this->_banditAlgo->isInitialized() ) {
			init();
		}

		const int numClasses = _pTrainingData->getNumClasses();
		const int numColumns = _pTrainingData->getNumAttributes();

		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( (AlphaReal) 1.0 / (AlphaReal)_pTrainingData->getNumExamples() * (AlphaReal)0.01 );

		vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions

		FeatureReal tmpThreshold;
		AlphaReal tmpAlpha;

		AlphaReal bestEnergy = numeric_limits<AlphaReal>::max();
		AlphaReal tmpEnergy;

		StumpAlgorithmLSHTC<FeatureReal> sAlgo(numClasses);
		sAlgo.initSearchLoop(_pTrainingData);

		AlphaReal halfTheta;
		if ( _abstention == ABST_REAL || _abstention == ABST_CLASSWISE )
			halfTheta = _theta/(AlphaReal)2.0;
		else
			halfTheta = 0;
		
		AlphaReal bestReward = 0.0;


		_banditAlgo->getKBestAction( _K, _armsForPulling );
		_rewards.resize( _armsForPulling.size() );

		if ( this->_armsForPulling.size() == 0 )
		{
			cout << "error" << endl;
		}

		for( int i = 0; i < (int)_armsForPulling.size(); i++ ) {
			//columnIndices[i] = p.second;			


			const pair<vpReverseIterator,vpReverseIterator> dataBeginEnd = 
				static_cast<SortedData*>(_pTrainingData)->getFileteredReverseBeginEnd( _armsForPulling[i] );

			/*
			const pair<vpIterator,vpIterator> dataBeginEnd = 
			static_cast<SortedData*>(_pTrainingData)->getFileteredBeginEnd( _armsForPulling[i] );
			*/

			const vpReverseIterator dataBegin = dataBeginEnd.first;
			const vpReverseIterator dataEnd = dataBeginEnd.second;

			/*
			const vpIterator dataBegin = dataBeginEnd.first;
			const vpIterator dataEnd = dataBeginEnd.second;
			*/

			// also sets mu, tmpV, and bestHalfEdge
			tmpThreshold = sAlgo.findSingleThresholdWithInit(dataBegin, dataEnd, _pTrainingData, 
				halfTheta, &mu, &tmpV);

			tmpEnergy = getEnergy(mu, tmpAlpha, tmpV);
			//update the weights in the UCT tree

			AlphaReal edge = 0.0;
			for ( vector<sRates>::iterator itR = mu.begin(); itR != mu.end(); itR++ ) edge += ( itR->rPls - itR->rMin );
			AlphaReal reward = this->getRewardFromEdge( edge );
			_rewards[i] = reward;

			if ( _verbose > 3 ) {
				//cout << "\tK = " <<i << endl;
				cout << "\tTempAlpha: " << tmpAlpha << endl;
				cout << "\tTempEnergy: " << tmpEnergy << endl;
				cout << "\tUpdate weight: " << reward << endl;
			}


			if ( (i==0) || (tmpEnergy < bestEnergy && tmpAlpha > 0) )
			{
				// Store it in the current weak hypothesis.
				// note: I don't really like having so many temp variables
				// but the alternative would be a structure, which would need
				// to be inheritable to make things more consistent. But this would
				// make it less flexible. Therefore, I am still undecided. This
				// might change!

				_alpha = tmpAlpha;
				_v = tmpV;
				_selectedColumn = _armsForPulling[i];
				_threshold = tmpThreshold;

				bestEnergy = tmpEnergy;
				bestReward = reward;
			}
		}

		if ( _banditAlgoName == BA_EXP3G2 )
		{
			vector<AlphaReal> ePayoffs( numColumns );			
			fill( ePayoffs.begin(), ePayoffs.end(), 0.0 );

			for( int i=0; i<_armsForPulling.size(); i++ )
			{
				ePayoffs[_armsForPulling[i]] = _rewards[i];
			}		
			estimatePayoffs( ePayoffs );

			(dynamic_cast<Exp3G2*>(_banditAlgo))->receiveReward( ePayoffs );
		} else {
			for( int i=0; i<_armsForPulling.size(); i++ )
			{
				_banditAlgo->receiveReward( _armsForPulling[i], _rewards[i] );
			}		
		}

		if ( _verbose > 2 ) cout << "Column has been selected: " << _selectedColumn << endl;

		stringstream thresholdString;
		thresholdString << _threshold;
		_id = _pTrainingData->getAttributeNameMap().getNameFromIdx(_selectedColumn) + thresholdString.str();

		_reward = bestReward;

		return bestEnergy;
	}
Esempio n. 14
0
	float SelectorLearner::run()
	{
		const int numClasses = _pTrainingData->getNumClasses();
		const int numColumns = _pTrainingData->getNumAttributes();
		const int numExamples = _pTrainingData->getNumExamples();
		
		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( 1.0 / (float)_pTrainingData->getNumExamples() * 0.01 );
		
		vector<sRates> vMu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<float> tmpV(numClasses); // The class-wise votes/abstentions
		
		float tmpAlpha, tmpEnergy;
		float bestEnergy = numeric_limits<float>::max();
		
		int numOfDimensions = _maxNumOfDimensions;
		for (int j = 0; j < numColumns; ++j)
		{
			// Tricky way to select numOfDimensions columns randomly out of numColumns
			int rest = numColumns - j;
			float r = rand()/static_cast<float>(RAND_MAX);
			
			if ( static_cast<float>(numOfDimensions) / rest > r ) 
			{
				--numOfDimensions;
				/*
				 if (_verbose > 2)
				 cout << "    --> trying attribute = "
				 <<_pTrainingData->getAttributeNameMap().getNameFromIdx(j)
				 << endl << flush;
				 */
				const int numIdxs = _pTrainingData->getEnumMap(j).getNumNames();
				
				// Create and initialize the numIdxs x numClasses gamma matrix
				vector<vector<float> > tmpGammasPls(numIdxs);
				vector<vector<float> > tmpGammasMin(numIdxs);
				for (int io = 0; io < numIdxs; ++io) {
					vector<float> tmpGammaPls(numClasses);
					vector<float> tmpGammaMin(numClasses);
					fill(tmpGammaPls.begin(), tmpGammaPls.end(), 0.0);
					fill(tmpGammaMin.begin(), tmpGammaMin.end(), 0.0);
					tmpGammasPls[io] = tmpGammaPls;
					tmpGammasMin[io] = tmpGammaMin;
				}
				
				// Compute the elements of the gamma plus and minus matrices
				float entry;
				for (int i = 0; i < numExamples; ++i) {
					const vector<Label>& labels = _pTrainingData->getLabels(i);
					int io = static_cast<int>(_pTrainingData->getValue(i,j));	    
					for (int l = 0; l < numClasses; ++l) {
						entry = labels[l].weight * labels[l].y;
						if (entry > 0)
							tmpGammasPls[io][l] += entry;
						else if (entry < 0)
							tmpGammasMin[io][l] += -entry;
					}
				}
				
				// Initialize the u vector to random +-1
				vector<sRates> uMu(numIdxs); // The idx-wise rates
				vector<float> tmpU(numIdxs);// The idx-wise votes/abstentions
				vector<float> previousTmpU(numIdxs);// The idx-wise votes/abstentions
				
				for (int io = 0; io < numIdxs; ++io) {
					uMu[io].classIdx = io;	
				}
				
				for (int io = 0; io < numIdxs; ++io) {					
					// initializing u as it has only one positive element
					fill( tmpU.begin(), tmpU.end(), -1 );
					tmpU[io] = +1;
					
					vector<sRates> vMu(numClasses); // The label-wise rates
					for (int l = 0; l < numClasses; ++l)
						vMu[l].classIdx = l;
					vector<float> tmpV(numClasses); // The label-wise votes/abstentions
					
					float tmpVal;
					tmpAlpha = 0.0;
					
					//filling out tmpV and vMu
					for (int l = 0; l < numClasses; ++l) {
						vMu[l].rPls = vMu[l].rMin = vMu[l].rZero = 0; 
						for (int io = 0; io < numIdxs; ++io) {
							if (tmpU[io] > 0) {
								vMu[l].rPls += tmpGammasPls[io][l];
								vMu[l].rMin += tmpGammasMin[io][l];
							}
							else if (tmpU[io] < 0) {
								vMu[l].rPls += tmpGammasMin[io][l];
								vMu[l].rMin += tmpGammasPls[io][l];
							}
						}
						if (vMu[l].rPls >= vMu[l].rMin) {
							tmpV[l] = +1;
						}
						else {
							tmpV[l] = -1;
							tmpVal = vMu[l].rPls;
							vMu[l].rPls = vMu[l].rMin;
							vMu[l].rMin = tmpVal;
						}
					}
					
					tmpEnergy = AbstainableLearner::getEnergy(vMu, tmpAlpha, tmpV);
					
					if ( tmpEnergy < bestEnergy && tmpAlpha > 0 ) {
						_alpha = tmpAlpha;
						_v = tmpV;
						//_u = tmpU;
						_positiveIdxOfArrayU = io;
						_selectedColumn = j;
						bestEnergy = tmpEnergy;
					}
				}
			}
		}
		
		
		if (_selectedColumn>-1)
		{
			_id = _pTrainingData->getAttributeNameMap().getNameFromIdx(_selectedColumn);
			return bestEnergy;
		} else {
			return bestEnergy = numeric_limits<float>::signaling_NaN();
		}				
	}
Esempio n. 15
0
		float EnumLearner::run( int colIdx )
	{
		const int numClasses = _pTrainingData->getNumClasses();
		const int numColumns = _pTrainingData->getNumAttributes();
		const int numExamples = _pTrainingData->getNumExamples();

		// set the smoothing value to avoid numerical problem
		// when theta=0.
		setSmoothingVal( 1.0 / (float)_pTrainingData->getNumExamples() * 0.01 );

		vector<sRates> vMu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.
		vector<float> tmpV(numClasses); // The class-wise votes/abstentions
		vector<float> previousTmpV(numClasses); // The class-wise votes/abstentions

		float tmpAlpha,previousTmpAlpha, previousEnergy;
		float bestEnergy = numeric_limits<float>::max();

		int numOfDimensions = _maxNumOfDimensions;

		// Tricky way to select numOfDimensions columns randomly out of numColumns
		int j = colIdx;


		if (_verbose > 2)
			cout << "    --> trying attribute = "
			<<_pTrainingData->getAttributeNameMap().getNameFromIdx(j)
			<< endl << flush;

		const int numIdxs = _pTrainingData->getEnumMap(j).getNumNames();

		// Create and initialize the numIdxs x numClasses gamma matrix
		vector<vector<float> > tmpGammasPls(numIdxs);
		vector<vector<float> > tmpGammasMin(numIdxs);
		for (int io = 0; io < numIdxs; ++io) {
			vector<float> tmpGammaPls(numClasses);
			vector<float> tmpGammaMin(numClasses);
			fill(tmpGammaPls.begin(), tmpGammaPls.end(), 0.0);
			fill(tmpGammaMin.begin(), tmpGammaMin.end(), 0.0);
			tmpGammasPls[io] = tmpGammaPls;
			tmpGammasMin[io] = tmpGammaMin;
		}

		// Compute the elements of the gamma plus and minus matrices
		float entry;
		for (int i = 0; i < numExamples; ++i) {
			const vector<Label>& labels = _pTrainingData->getLabels(i);
			int io = static_cast<int>(_pTrainingData->getValue(i,j));	    
			for (int l = 0; l < numClasses; ++l) {
				entry = labels[l].weight * labels[l].y;
				if (entry > 0)
					tmpGammasPls[io][l] += entry;
				else if (entry < 0)
					tmpGammasMin[io][l] += -entry;
			}
		}

		// Initialize the u vector to random +-1
		vector<sRates> uMu(numIdxs); // The idx-wise rates
		vector<float> tmpU(numIdxs);// The idx-wise votes/abstentions
		vector<float> previousTmpU(numIdxs);// The idx-wise votes/abstentions
		for (int io = 0; io < numIdxs; ++io) {
			uMu[io].classIdx = io;	    
			if ( rand()/static_cast<float>(RAND_MAX) > 0.5 )
				tmpU[io] = +1;
			else
				tmpU[io] = -1;
		}

		//vector<sRates> vMu(numClasses); // The label-wise rates
		for (int l = 0; l < numClasses; ++l)
			vMu[l].classIdx = l;
		//vector<float> tmpV(numClasses); // The label-wise votes/abstentions

		float tmpEnergy = numeric_limits<float>::max();
		float tmpVal;
		tmpAlpha = 0.0;

		while (1) {
			previousEnergy = tmpEnergy;
			previousTmpV = tmpV;
			previousTmpAlpha = tmpAlpha;

			//filling out tmpV and vMu
			for (int l = 0; l < numClasses; ++l) {
				vMu[l].rPls = vMu[l].rMin = vMu[l].rZero = 0; 
				for (int io = 0; io < numIdxs; ++io) {
					if (tmpU[io] > 0) {
						vMu[l].rPls += tmpGammasPls[io][l];
						vMu[l].rMin += tmpGammasMin[io][l];
					}
					else if (tmpU[io] < 0) {
						vMu[l].rPls += tmpGammasMin[io][l];
						vMu[l].rMin += tmpGammasPls[io][l];
					}
				}
				if (vMu[l].rPls >= vMu[l].rMin) {
					tmpV[l] = +1;
				}
				else {
					tmpV[l] = -1;
					tmpVal = vMu[l].rPls;
					vMu[l].rPls = vMu[l].rMin;
					vMu[l].rMin = tmpVal;
				}
			}

			tmpEnergy = AbstainableLearner::getEnergy(vMu, tmpAlpha, tmpV);

			if (_verbose > 2)
				cout << "        --> energy V = " << tmpEnergy << "\talpha = " << tmpAlpha << endl << flush;

			if (tmpEnergy >= previousEnergy) {
				tmpV = previousTmpV;
				break;
			}

			previousEnergy = tmpEnergy;
			previousTmpU = tmpU;
			previousTmpAlpha = tmpAlpha;

			//filling out tmpU and uMu
			for (int io = 0; io < numIdxs; ++io) {
				uMu[io].rPls = uMu[io].rMin = uMu[io].rZero = 0; 
				for (int l = 0; l < numClasses; ++l) {
					if (tmpV[l] > 0) {
						uMu[io].rPls += tmpGammasPls[io][l];
						uMu[io].rMin += tmpGammasMin[io][l];
					}
					else if (tmpV[l] < 0) {
						uMu[io].rPls += tmpGammasMin[io][l];
						uMu[io].rMin += tmpGammasPls[io][l];
					}
				}
				if (uMu[io].rPls >= uMu[io].rMin) {
					tmpU[io] = +1;
				}
				else {
					tmpU[io] = -1;
					tmpVal = uMu[io].rPls;
					uMu[io].rPls = uMu[io].rMin;
					uMu[io].rMin = tmpVal;
				}
			}

			tmpEnergy = AbstainableLearner::getEnergy(uMu, tmpAlpha, tmpU);

			if (_verbose > 2)
				cout << "        --> energy U = " << tmpEnergy << "\talpha = " << tmpAlpha << endl << flush;

			if (tmpEnergy >= previousEnergy) {
				tmpU = previousTmpU;
				break;
			}

			if ( previousEnergy < bestEnergy && previousTmpAlpha > 0 ) {
				_alpha = previousTmpAlpha;
				_v = tmpV;
				_u = tmpU;
				_selectedColumn = j;
				bestEnergy = previousEnergy;
			}
		}
		

		_id = _pTrainingData->getAttributeNameMap().getNameFromIdx(_selectedColumn);
		return bestEnergy;

	}
Esempio n. 16
0
void Primitive::_initVb(Type::E typ, bool bFlat, bool bFlip) {
	int indexI = bFlip ? 1 : 0;

	if(!(_hlVb = s_wVb[typ][indexI].lock())) {
		boom::Vec3V tmpPos;
		boom::IndexV tmpIndex;
		boom::Vec2V tmpUv;
		spn::Pose3D pose;
		pose.setRot(spn::Quat::RotationZ(spn::DegF(1.f)));
		if(typ != Type::Cube) {
			pose.setScale({1,1,2});
			pose.setOffset({0, 0, -1});
		}
		switch(typ) {
			case Type::Cone:
				boom::geo3d::Geometry::MakeCone(tmpPos, tmpIndex, 16);
				break;
			case Type::Cube:
				boom::geo3d::Geometry::MakeCube(tmpPos, tmpIndex);
				break;
			case Type::Sphere:
				boom::geo3d::Geometry::MakeSphere(tmpPos, tmpIndex, 32, 16);
				break;
			case Type::Torus:
				boom::geo3d::Geometry::MakeTorus(tmpPos, tmpIndex, 0.5f, 64, 32);
				break;
			case Type::Capsule:
				boom::geo3d::Geometry::MakeCapsule(tmpPos, tmpIndex, 1, 8);
				break;
			default:
				AssertF(Trap, "invalid type")
		};
		if(bFlip)
			boom::FlipFace(tmpIndex.begin(), tmpIndex.end(), tmpIndex.begin(), 0);

		if(typ == Type::Torus)
			boom::geo3d::Geometry::UVUnwrapCylinder(tmpUv, pose, tmpPos);
		else
			boom::geo3d::Geometry::UVUnwrapSphere(tmpUv, 2,2, pose, tmpPos);

		boom::Vec3V posv, normalv;
		boom::Vec2V uvv;
		boom::IndexV indexv;
		if(bFlat) {
			boom::geo3d::Geometry::MakeVertexNormalFlat(posv, indexv, normalv, uvv,
														tmpPos, tmpIndex, tmpUv);
		} else {
			boom::geo3d::Geometry::MakeVertexNormal(normalv, tmpPos, tmpIndex);
			posv = tmpPos;
			uvv = tmpUv;
			indexv = tmpIndex;
		}
		if(typ == Type::Cube)
			boom::geo3d::Geometry::UVUnwrapCube(uvv, pose, posv, indexv);

		boom::Vec4V tanv;
		boom::geo3d::Geometry::CalcTangent(tanv, posv, indexv, normalv, uvv);

		// 大きさ1の立方体を定義しておいて後で必要に応じてスケーリングする
		const int nV = posv.size();
		std::vector<vertex::prim_tan> tmpV(nV);
		for(int i=0 ; i<nV ; i++) {
			auto& v = tmpV[i];
			v.pos = posv[i];
			v.tex = uvv[i];
			v.normal = normalv[i];
			v.tangent_c = tanv[i];
		}
		_hlVb = mgr_gl.makeVBuffer(GL_STATIC_DRAW);
		_hlVb.ref()->initData(std::move(tmpV));
		s_wVb[typ][indexI] = _hlVb.weak();
		_hlIb = mgr_gl.makeIBuffer(GL_STATIC_DRAW);
		_hlIb.ref()->initData(std::move(indexv));
		_hlVbLine = _MakeVbLine(posv, normalv, tanv);
		s_wIb[typ][indexI] = _hlIb.weak();
		s_wVbLine[typ][indexI] = _hlVbLine.weak();
	} else {
AlphaReal MultiThresholdStumpLearner::run() {
	const int numClasses = _pTrainingData->getNumClasses();
	const int numColumns = _pTrainingData->getNumAttributes();

	// set the smoothing value to avoid numerical problem
	// when theta=0.
	setSmoothingVal(1.0 / (AlphaReal) _pTrainingData->getNumExamples() * 0.01);

	vector<sRates> mu(numClasses); // The class-wise rates. See BaseLearner::sRates for more info.

	vector<AlphaReal> tmpV(numClasses); // The class-wise votes/abstentions
	vector<FeatureReal> tmpThresholds(numClasses);
	AlphaReal tmpAlpha;

	AlphaReal bestEnergy = numeric_limits<AlphaReal>::max();
	AlphaReal tmpEnergy;

	StumpAlgorithm<FeatureReal> sAlgo(numClasses);
	sAlgo.initSearchLoop(_pTrainingData);

	int numOfDimensions = _maxNumOfDimensions;
	for (int j = 0; j < numColumns; ++j) {
		// Tricky way to select numOfDimensions columns randomly out of numColumns
		int rest = numColumns - j;
		float r = rand() / static_cast<float> (RAND_MAX);

		if (static_cast<float> (numOfDimensions) / rest > r) {
			--numOfDimensions;
			const pair<vpIterator, vpIterator>
					dataBeginEnd =
							static_cast<SortedData*> (_pTrainingData)->getFileteredBeginEnd(
									j);

			const vpIterator dataBegin = dataBeginEnd.first;
			const vpIterator dataEnd = dataBeginEnd.second;

			sAlgo.findMultiThresholdsWithInit(dataBegin, dataEnd,
					_pTrainingData, tmpThresholds, &mu, &tmpV);

			tmpEnergy = getEnergy(mu, tmpAlpha, tmpV);
			if (tmpEnergy < bestEnergy && tmpAlpha > 0) {
				// Store it in the current algorithm
				// note: I don't really like having so many temp variables
				// but the alternative would be a structure, which would need
				// to be inheritable to make things more consistent. But this would
				// make it less flexible. Therefore, I am still undecided. This
				// might change!

				_alpha = tmpAlpha;
				_v = tmpV;
				_selectedColumn = j;
				_thresholds = tmpThresholds;

				bestEnergy = tmpEnergy;
			}
		}
	}

	return bestEnergy;

}
Esempio n. 18
0
lab1_4::LamVec* lab1_4::system::getResult()
{
    LamVec *str = new LamVec;
    std::vector<double> *res = new std::vector<double>(this->size, 0);
    

    std::vector<std::vector<std::vector<double> >*> vecU;

    double exz = 1000;

    std::vector<std::vector<double> > aNew((*(this->a)));
    while (exz > this->eps)
    {
        std::vector<std::vector<double> > aCopy(aNew);

        std::vector<std::vector<double> > *u = new std::vector<std::vector<double> >(this->size, std::vector<double>(size, 0));
        for (int i = 0; i < this->size; ++i)
            (*(u))[i][i] = 1;
    
        double max = aCopy[0][1];
        int iMax = 0;
        int jMax = 1;
        for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                if (i == j)
                    continue;
                if (max < fabs(aCopy[i][j]))
                {
                    max = fabs(aCopy[i][j]);
                    iMax = i;
                    jMax = j;
                }
            }
        }
        (*(u))[iMax][jMax] = -sin(0.5 * atan(2 * aCopy[iMax][jMax] / (aCopy[iMax][iMax] - aCopy[jMax][jMax])));
        (*(u))[jMax][iMax] = sin(0.5 * atan(2 * aCopy[iMax][jMax] / (aCopy[iMax][iMax] - aCopy[jMax][jMax])));
        (*(u))[iMax][iMax] = cos(0.5 * atan(2 * aCopy[iMax][jMax] / (aCopy[iMax][iMax] - aCopy[jMax][jMax])));
        (*(u))[jMax][jMax] = cos(0.5 * atan(2 * aCopy[iMax][jMax] / (aCopy[iMax][iMax] - aCopy[jMax][jMax])));

        /*for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                std::cout << (*(u))[i][j] << " ";
            }
            std::cout << "\n";
        }
        std::cout << iMax << " " << jMax << "\n";*/

        std::vector<std::vector<double> > tmpA(aCopy);
        for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                double tmp = 0;
                for (int z = 0; z < this->size; ++z)
                {
                    tmp += (*(u))[z][i] * aCopy[z][j];
                }
                tmpA[i][j] = tmp;
            }
        }
        for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                double tmp = 0;
                for (int z = 0; z < this->size; ++z)
                {
                    tmp += tmpA[i][z] * (*(u))[z][j];
                }
                aNew[i][j] = tmp;
            }
        }
        exz = 0;
        for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < i; ++j)
            {
                exz += pow(aNew[i][j], 2);
            }
        }
        exz = pow(exz, 0.5);
        std::cout << exz << " -exz\n";

        /*for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                std::cout << aNew[i][j] << " ";
            }
            std::cout << "\n";
        }

        std::cout << "\n";
        std::cout << "=============\n";
        std::cout << "\n";
        _sleep(1000);*/

        vecU.push_back(u);
    }
    
    str->vec = new std::vector<std::vector<double> >((*(vecU[0])));
    for (int k = 1; k < vecU.size(); ++k)
    {
        std::vector<std::vector<double> > tmpV((*(str->vec)));
        for (int i = 0; i < this->size; ++i)
        {
            for (int j = 0; j < this->size; ++j)
            {
                double tmp = 0;
                for (int z = 0; z < this->size; ++z)
                {
                    tmp += tmpV[i][z] * (*(vecU[k]))[z][j];
                }
                (*(str->vec))[i][j] = tmp;
            }
        }
    }
    for (int i = 0; i < vecU.size(); ++i)
    {
        delete vecU[i];
    }

    for (int i = 0; i < this->size; ++i)
    {
        (*(res))[i] = aNew[i][i];
    }
    str->lambda = res;
    return str;
}