Exemple #1
0
float Vector3d::inverseVectorDistanceSquared(Vector3d v1, Vector3d v2){
     Vector3d tmpVector(v1.getX() - v2.getX(),
                        v1.getY() - v2.getY(),
                        v1.getZ() - v2.getZ());
    return(tmpVector.getX() * tmpVector.getX() +
           tmpVector.getY() * tmpVector.getY() +
           tmpVector.getZ() * tmpVector.getZ());
}
Exemple #2
0
void MrgSorter::merge
    (
        int start,
        int middle,
        int end
    )
{
    int newCapacity = ( end - start ) + 1;
    SorterClass<DateType> tmpVector( newCapacity );
    int lhIndex = 0;
    int rhIndex = middle - start + 1;
    int tmpMid = middle - start;
    int tmpEnd = end - start;
    int thisIndex = start;
    
    sorterNCopy( tmpVector, *this, start, end );
    
    // Sort elements into result list
    while( lhIndex <= tmpMid && rhIndex <= tmpEnd )
    {
        if( compareTo( tmpVector[ lhIndex ], tmpVector[ rhIndex ] ) <= 0 )
        {
            setValueAt( thisIndex, tmpVector[ lhIndex ] );
            lhIndex++;
            thisIndex++;
        }
        else
        {
            setValueAt( thisIndex, tmpVector[ rhIndex ] );
            rhIndex++;
            thisIndex++;
        }
    }
    
    // Take care of any leftover elements
    while( lhIndex <= tmpMid )
    {
        setValueAt( thisIndex, tmpVector[ lhIndex ] );
        lhIndex++;
        thisIndex++;
    }
    while( rhIndex <= tmpEnd )
    {
        setValueAt( thisIndex, tmpVector[ rhIndex ] );
        rhIndex++;
        thisIndex++;
    }
    
}
Exemple #3
0
void TreeLearnerUCT::initLearningOptions(const nor_utils::Args& args)
{
    BaseLearner::initLearningOptions(args);

    string baseLearnerName;
    args.getValue("baselearnertype", 0, baseLearnerName);
    args.getValue("baselearnertype", 1, _numBaseLearners);

    // get the registered weak learner (type from name)
    BaseLearner* pWeakHypothesisSource =
        BaseLearner::RegisteredLearners().getLearner(baseLearnerName);

    for( int ib = 0; ib < _numBaseLearners; ++ib ) {
        _baseLearners.push_back(pWeakHypothesisSource->create());
        _baseLearners[ib]->initLearningOptions(args);

        vector< int > tmpVector( 2, -1 );
        _idxPairs.push_back( tmpVector );
    }

    string updateRule = "";
    if ( args.hasArgument( "updaterule" ) )
        args.getValue("updaterule", 0, updateRule );

    if ( updateRule.compare( "edge" ) == 0 )
        _updateRule = EDGE_SQUARE;
    else if ( updateRule.compare( "alphas" ) == 0 )
        _updateRule = ALPHAS;
    else if ( updateRule.compare( "edgesquare" ) == 0 )
        _updateRule = ESQUARE;
    else {
        cerr << "Unknown update rule in ProductLearnerUCT (set to default [edge]" << endl;
        _updateRule = EDGE_SQUARE;
    }

}
Exemple #4
0
Data MatrixGraph::simulatedAnnealing(uint temperature)
{
	clock_t overallTime = clock();
	initRand();
	stringstream results;
	vector<uint> route, bestRoute;
	route.reserve(vertexNumber);
	bestRoute.reserve(vertexNumber);
	uint prevCost = greedyAlg(route), bestCost;
	route.pop_back();
	bestRoute = route;
	bestCost = prevCost;

	if (!temperature)
		temperature = vertexNumber << 10;

	default_random_engine gen(uint(time(nullptr)));
	uniform_real_distribution<double> doubleRnd(0.0, 1.0);

	uint i = 0;
	for (; temperature; --temperature, ++i)
	{
		i %= route.size();
		uint firstIndex = rand() % (route.size());
		uint secondIndex = rand() % (route.size() - 1);
		if (secondIndex >= firstIndex)
			++secondIndex;

		vector<uint> tmpVector(route);
		uint tmp = tmpVector[firstIndex];
		tmpVector[firstIndex] = tmpVector[secondIndex];
		tmpVector[secondIndex] = tmp;

		uint tmpCost = calculateCost(tmpVector);
		if (tmpCost < prevCost)
		{
			route = tmpVector;
			prevCost = tmpCost;
		}
		else
		{
			if (acceptanceProbability(prevCost, tmpCost, temperature) >= doubleRnd(gen))
			{
				route = tmpVector;
				prevCost = tmpCost;
			}
		}

		// Śledzenie najlepszego rozwiązania
		if (prevCost < bestCost)
		{
			bestRoute = route;
			bestCost = prevCost;
		}
	}

	double duration = (clock() - overallTime) / (double)CLOCKS_PER_SEC;
	results << "Koszt drogi: " << bestCost << "\nCalkowity czas trwania: " << duration << " sekund\n";
	return Data(bestRoute, bestCost, results.str(), duration);

}
Exemple #5
0
	AlphaReal TreeLearner::run()
	{		
		set< int > tmpIdx, idxPos, idxNeg, origIdx;
		//ScalarLearner* pCurrentBaseLearner = 0;
		ScalarLearner* pTmpBaseLearner = 0;		
		int ib = 0;
		vector< int > tmpVector( 2, -1 );
		
		_pTrainingData->getIndexSet( origIdx );
		
		
		_pScalaWeakHypothesisSource->setTrainingData(_pTrainingData);
		
		//train the first learner		
		NodePoint parentNode, nodeLeft, nodeRight;								
		parentNode._idx = 0;
		parentNode._parentIdx = -1;
		parentNode._learnerIdxSet = origIdx;
		
		calculateEdgeImprovement( parentNode );		
		
		// insert the root
		if ( parentNode._edgeImprovement <= 0.0 ) // the constant is the best, in this case the treelearner is equivalent to the constant learner
		{
			_baseLearners.push_back( parentNode._constantLearner );			
			_idxPairs.push_back( tmpVector );
			this->_alpha = parentNode._constantLearner->getAlpha();
			ib++;			
			delete parentNode._learner;
			return parentNode._constantEnergy;
		}
		
		_baseLearners.push_back( parentNode._learner );
		_idxPairs.push_back( tmpVector );
		ib++;
		
		// put the first two children into the priority queue								
		extendNode( parentNode, nodeLeft, nodeRight );
		
		calculateEdgeImprovement( nodeLeft );
		calculateEdgeImprovement( nodeRight );
		
		priority_queue< NodePoint, vector<NodePoint>, greater_first_tree<NodePoint> > pq;
		
		pq.push(nodeLeft);
		pq.push(nodeRight);
		
		
		while ( ! pq.empty() )
		{
			NodePoint currentNode = pq.top();
			pq.pop();
			
			
			if ( _verbose > 3 ) {
				cout << "Current edge imporvement: " << currentNode._edgeImprovement << endl;
			}
			
			if (currentNode._edgeImprovement>0)
			{
				_baseLearners.push_back( currentNode._learner );
				_idxPairs.push_back( tmpVector );
				//_baseLearners[ib] = currentNode._learner;
				delete currentNode._constantLearner;				
			} else {
				_baseLearners.push_back(currentNode._constantLearner);
				_idxPairs.push_back( tmpVector );
				//_baseLearners[ib] = currentNode._constantLearner;
				delete currentNode._learner;		
				continue;
			}
			
			_idxPairs[ currentNode._parentIdx ][ currentNode._leftOrRightChild ] = ib;
			currentNode._idx = ib;
			ib++;																		
			if (ib >= _numBaseLearners) break;
			
			extendNode( currentNode, nodeLeft, nodeRight );
			
			calculateEdgeImprovement( nodeLeft );
			calculateEdgeImprovement( nodeRight );						
			
			pq.push(nodeLeft);
			pq.push(nodeRight);						
		}
		
		while ( ! pq.empty() )
		{
			NodePoint currentNode = pq.top();
			pq.pop();
			
			if (_verbose>3) cout << "Discarded node's edge improvement: " << currentNode._edgeImprovement << endl;
			
			if (currentNode._learner) delete currentNode._learner;
			delete currentNode._constantLearner;
		}
		
		_id = _baseLearners[0]->getId();
		for(int ib = 1; ib < _baseLearners.size(); ++ib)
			_id += "_x_" + _baseLearners[ib]->getId();
		
		//calculate alpha
		this->_alpha = 0.0;
		AlphaReal eps_min = 0.0, eps_pls = 0.0;
		
		//_pTrainingData->clearIndexSet();
		_pTrainingData->loadIndexSet( origIdx );
		for( int i = 0; i < _pTrainingData->getNumExamples(); i++ ) {
			vector< Label> l = _pTrainingData->getLabels( i );
			for( vector< Label >::iterator it = l.begin(); it != l.end(); it++ ) {
				AlphaReal result  = this->classify( _pTrainingData, i, it->idx );
				
				if ( ( result * it->y ) < 0 ) eps_min += it->weight;
				if ( ( result * it->y ) > 0 ) eps_pls += it->weight;
			}
			
		}
		
		// set the smoothing value to avoid numerical problem
	    // when theta=0.
   	    setSmoothingVal( (AlphaReal)(1.0 / _pTrainingData->getNumExamples() * 0.01 ) );
		
		
		this->_alpha = getAlpha( eps_min, eps_pls );
		
		// calculate the energy (sum of the energy of the leaves
		AlphaReal energy = this->getEnergy( eps_min, eps_pls );
		
		return energy;
	}