Ejemplo n.º 1
0
    inline void decomr(double fac1,MatrixReal& Jac)
    {
      if(calhes)
	{
	  int ilo=1,ihi=n,lda=n,info;
	  int k=-1;
	  //find the optimum size for array work/
	  int nn=n;
	  dgehrd_(&nn,&ilo,&ihi,&Jac,&lda,tau,work,&k,&info);
	 
	  k=work[0];
	  if(lwork<k)
	    {
	      if(work!=0) delete[] work;
	      lwork=k;
	      work=new double[lwork];
	    }
	  dgehrd_(&nn,&ilo,&ihi,&Jac,&lda,tau,work,&k,&info);
	  if(info!=0)
	    throw OdesException("odes::Matrices::Matrices, decomr: info=",
				   info);
	  calhes=false;
	}
      E1.equal_minus(Jac);
      E1.addDiag(fac1);
      int info=dech<n>(E1,ipr);
      if(info!=0)
	throw OdesException("odes::Matrices::Matrices, decomr (dech): ",
			       info);
  
    }
Ejemplo n.º 2
0
void DualityDiagram::check_(
                            const MatrixReal& matrix,
                            const std::vector<double>& rowWeights,
                            const std::vector<double>& colWeights,
                            unsigned int nbAxes) throw (RbException)
{
    size_t rowNb = matrix.getNumberOfRows();
    size_t colNb = matrix.getNumberOfColumns();
    
    if (rowWeights.size() != rowNb)
        throw RbException("DualityDiagram::check_. The number of row weigths has to be equal to the number of rows!");
    if (colWeights.size() != colNb)
        throw RbException("DualityDiagram::check_. The number of column weigths has to be equal to the number of columns!");
    
    // All row weigths have to be positive
    for (std::vector<double>::const_iterator it = rowWeights.begin(); it != rowWeights.end(); it++)
    {
        if (*it < 0.)
            throw RbException("DualityDiagram::check_. All row weights have to be positive");
    }
    
    // All column weigths have to be positive
    for (std::vector<double>::const_iterator it = colWeights.begin(); it != colWeights.end(); it++)
    {
        if (*it < 0.)
            throw RbException("DualityDiagram::check_. All column weights have to be positive");
    }
}
Ejemplo n.º 3
0
    //! build and factorize "real" matrix
    //! \param fac1 : we add fac1*I to the Jacobian.
    //! \param Jac the jacobian.
    inline void decomr(double fac1,MatrixReal& Jac)
    {
      E1.equal_minus(Jac);
      E1.addDiag(fac1);
      int nn=n,info;
      dgetrf_(&nn,&nn,&E1,&nn,&(ipivr[0]),&info);
      if(info!=0)
	throw OdesException("odes::Matrices::decomr dgetrf,info=",info);
    }
Ejemplo n.º 4
0
    //! see full matrix case.
    inline void decomr(double fac1,const MatrixReal& Jac)
    {
      E1.equal_minus(Jac);
      E1.addDiag(fac1);
      int nn=n,knsub=nsub,knsup=nsup,lldab=ldab,info;
      dgbtrf_(&nn,&nn,&knsub,&knsup,&E1,&lldab,&(ipivr[0]),&info);
      if(info!=0)
	throw OdesException("odes::Matrices::decomr dgbtrf,info=",info);
    }
double RbStatistics::DecomposedInverseWishart::lnPdf(double nu, const MatrixReal &r) {

    size_t k = r.getDim();
    
    if ( r.isPositive() == false )
        {
        return RbConstants::Double::neginf;
        }
    
    double lnP = (0.5 * (k - 1.0) * (nu - 1.0) - 1.0);
    lnP += r.getLogDet();
    
    MatrixReal submatrix(k-1);
    for (size_t i=0; i<k; i++)
        {
        size_t ai = 0;
        for (size_t a=0; a<k; a++)
            {
            if (a != i)
                {
                size_t bi = 0;
                for (size_t b=0; b<k; b++)
                    {
                    if ( b != i )
                        {
                        submatrix[ai][bi] = r[a][b];
                        bi++;
                        }
                    }
                ai++;
                }
            }

        lnP += submatrix.getLogDet();
        std::cout << "logdet=" << submatrix.getLogDet() << std::endl;
        }

    return lnP;
}
Ejemplo n.º 6
0
PrecisionMatrix::PrecisionMatrix(const MatrixReal& from) : MatrixReal(from), eigensystem(this), eigenflag(false), inverse(from.getNumberOfColumns(), from.getNumberOfColumns(), 0) {
    if (getNumberOfRows() != getNumberOfColumns())    {
        std::cerr << "error in PrecisionMatrix: copy constructor from a non-square matrix\n";
        throw(NULL);
    }
}
Ejemplo n.º 7
0
void Tree::trainTree(const MatrixReal& featMat, const VectorInteger& labels)
{

//	We work with a queue of nodes, initially containing only the root node.
//	We process the queue until it becomes empty. 
	std::queue<int> toTrain;

	int size,numClasses,numVars,dims;

	size = labels.size();
	dims = featMat.cols();
	numClasses = labels.maxCoeff();

	
	classWts = VectorReal::Zero(numClasses+1);

	for(int i = 0; i < labels.size(); ++i)
		classWts(labels(i)) += 1.0;

	classWts /= size;
	for(int i = 0; i < size; ++i)
		nodeix.push_back(i);

	std::cout<<"Training tree, dimensions set\n";

	numVars = (int)((double)sqrt((double)dims)) + 1;
	int cur;

//	The relevant indices for the root node is the entire set of training data
	nodes[0].start = 0;
	nodes[0].end = size-1;

//	Initialise the queue with just the root node
	toTrain.push(0);

//	Stores the relevant features.
	VectorReal relFeat;

//	Resize our boolean array, more on this later. 
	indices.resize(size);
	
	std::cout<<"Starting the queue\n";

	int lpoints,rpoints;
//	While the queue isn't empty, continue processing.
	while(!toTrain.empty())
	{
		int featNum;
		double threshold;
		lpoints = rpoints = 0;

		cur = toTrain.front();
		
//		std::cout<<"In queue, node being processed is d :"<<cur.depth<<"\n";
	
//		There are two ways for a node to get out of the queue trivially,
//		a) it doesn't have enough data to be a non-trivial split, or
//		b) it has hit the maximum permissible depth
		
		if((nodes[cur].end - nodes[cur].start < DATA_MIN) || (nodes[cur].depth == depth))
		{
//			Tell ourselves that this is a leaf node, and remove the node
//			from the queue.
//			std::cout<<"Popping a leaf node\n";

			nodes[cur].setType(true);

//			Initialize the histogram and set it to zero
			
			nodes[cur].hist = VectorReal::Zero(numClasses+1);

//			The below code should give the histogram of all the elements
			for(int i = nodes[cur].start; i <= nodes[cur].end; ++i)
			{
				nodes[cur].hist[labels(nodeix[i])] += 1.0;
			}

			for(int i = 0 ; i < classWts.size(); ++i)
				nodes[cur].hist[i] = nodes[cur].hist[i] / classWts[i];

			toTrain.pop();
			continue;

		}	

		double infoGain(-100.0);
		relFeat.resize(size);

//		In case this isn't a trivial node, we need to process it. 
		for(int i = 0; i < numVars; ++i)
		{
//			std::cout<<"Choosing a random variable\n";
//			Randomly select a feature
			featNum = rand()%dims;
//			std::cout<<"Feat: "<<featNum<<std::endl;
//			Extract the relevant feature set from the training data
			relFeat = featMat.col(featNum);

			double tmax,tmin,curInfo;

			tmax = relFeat.maxCoeff();
			tmin = relFeat.minCoeff();
//			infoGain = -100;
			//std::cout<<"Min "<<tmin<<"Max: "<<tmax<<std::endl;

//			NUM_CHECKS is a macro defined at the start
			for(int j = 0; j  < NUM_CHECKS; ++j)
			{
//				std::cout<<"Choosing a random threshold\n";
//				Generate a random threshold
				threshold = ((rand()%100)/100.0)*(tmax - tmin) + tmin;
				//std::cout<<"Thresh: "<<threshold<<std::endl;
				
				for(int k = nodes[cur].start; k <= nodes[cur].end ; ++k)
					indices[k] = (relFeat(k) < threshold);

//				Check if we have enough information gain
				curInfo = informationGain(nodes[cur].start,nodes[cur].end, labels);
//				std::cout<<"Info gain : "<<curInfo<<"\n";
//				curInfo = (double) ((rand()%10)/10.0);

				if(curInfo > infoGain)
				{
					infoGain = curInfo;
					nodes[cur].x = featNum;
					nodes[cur].threshold = threshold;
				}

			}

		}
//		We have selected a feature and a threshold for it that maximises the information gain.

		relFeat = featMat.col(nodes[cur].x);

//		We just set the indices depending on whether the features are greater or lesser.
//		Conventions followed : greater goes to the right.
		for(int k = nodes[cur].start; k <= nodes[cur].end; ++k)
		{
//			If relfeat is lesser, indices[k] will be true, which will put it in the 
//			left side of the partition.
			indices[k] = relFeat(k) < nodes[cur].threshold;
//			indices[k] = (bool)(rand()%2);
			
			if(indices[k])
				lpoints++;
			else
				rpoints++;
		}


		if( (lpoints < DATA_MIN) || (rpoints < DATA_MIN) )
		{
//			Tell ourselves that this is a leaf node, and remove the node
//			from the queue.
//			std::cout<<"Popping a leaf node\n";

			nodes[cur].setType(true);

//			Initialize the histogram and set it to zero
			
			nodes[cur].hist.resize(numClasses+1);
			nodes[cur].hist = VectorReal::Zero(numClasses+1);

//			The below code should give the histogram of all the elements
			for(int i = nodes[cur].start; i <= nodes[cur].end; ++i)
			{
				nodes[cur].hist[labels(nodeix[i])] += 1.0;
			}
			
			toTrain.pop();
			continue;

		}

		int part;

//		Use the prebuilt function to linearly partition our data
		part = partition(nodes[cur].start,nodes[cur].end);

		Node right, left;
//		Increase the depth of the children
		right.depth = left.depth = nodes[cur].depth + 1;

//		Correctly assign the partitions
		left.start = nodes[cur].start;
		left.end = part -1;
		
//		Push back into the relevant places and also link the parent and the child
		nodes.push_back(left);
		nodes[cur].leftChild = nodes.size()-1;
		toTrain.push(nodes[cur].leftChild);

//		Ditto with the right node. 
		right.start = part;
		right.end = nodes[cur].end;

		nodes.push_back(right);
		nodes[cur].rightChild = nodes.size()-1;
		toTrain.push(nodes[cur].rightChild);

//		Finally remove our node from the queue. 
		toTrain.pop();

	}
}
Ejemplo n.º 8
0
void DualityDiagram::compute_(const MatrixReal& matrix, double tol)
{
    size_t rowNb = matrix.getNumberOfRows();
    size_t colNb = matrix.getNumberOfColumns();
    
    // If there are less rows than columns, the variance-covariance or correlation matrix is obtain differently (see below)
    bool transpose = (rowNb < colNb);
    
    // The initial matrix is multiplied by the square root of the row weigths.
    std::vector<double> rW(rowWeights_);
    for (size_t i = 0; i < rowWeights_.size(); i++)
    {
        rW[i] = sqrt(rowWeights_[i]);
    }
    
    MatrixReal M1(rowNb, colNb);
    RbMath::hadamardMult(matrix, rW, M1, true);
    
    // The resulting matrix is then multiplied by the square root of the column weigths.
    std::vector<double> cW(colWeights_);
    for (unsigned int i = 0; i < colWeights_.size(); i++)
    {
        cW[i] = sqrt(colWeights_[i]);
    }
    
    MatrixReal M2(rowNb,colNb);
    RbMath::hadamardMult(M1, cW, M2, false);
    
    // The variance-covariance (if the data is centered) or the correlation (if the data is centered and normalized) matrix is calculated
    MatrixReal tM2(M2.getNumberOfColumns(),M2.getNumberOfRows());
    RbMath::transposeMatrix(M2, tM2);
    MatrixReal *M3 = new MatrixReal(0,0);
    if (!transpose)
        (*M3) = tM2 * M2;
    else
        (*M3) = M2 * tM2;
    
    EigenSystem eigen(M3);
    eigen.update();
    // @todo: This may be implemented some time ... (Sebastian)
//    if (!eigen.isSymmetric())
//        throw RbException("DualityDiagram (constructor). The variance-covariance or correlation matrix should be symmetric...");
    
    eigenValues_ = eigen.getRealEigenvalues();
    eigenVectors_ = eigen.getEigenvectors();
    
    // How many significant axes have to be conserved?
    size_t rank = 0;
    for (size_t i = eigenValues_.size(); i > 0; i--)
    {
        if ((eigenValues_[i - 1] / eigenValues_[eigenValues_.size() - 1]) > tol)
            rank++;
    }
    
    if (nbAxes_ <=0)
    {
        throw RbException("DualityDiagram (constructor). The number of axes to keep must be positive.");
    }
    if (nbAxes_ > rank)
    {
        nbAxes_ = rank;
    }
    
    /*The eigen values are initially sorted into ascending order by the 'eigen' function. Here the significant values are sorted
     in the other way around.*/
    std::vector<double> tmpEigenValues(nbAxes_);
    size_t cpt = 0;
    for (size_t i = eigenValues_.size(); i > (eigenValues_.size() - nbAxes_); i--)
    {
        tmpEigenValues[cpt] = eigenValues_[i-1];
        cpt++;
    }
    eigenValues_ = tmpEigenValues;
    
    for (std::vector<double>::iterator it = rowWeights_.begin(); it != rowWeights_.end(); it++)
    {
        if (*it == 0.)
            *it = 1.;
    }
    
    for (std::vector<double>::iterator it = colWeights_.begin(); it != colWeights_.end(); it++)
    {
        if (*it == 0.)
            *it = 1.;
    }
    
    std::vector<double> dval(nbAxes_);
    for (size_t i = 0; i < dval.size(); i++)
    {
        dval[i] = sqrt(eigenValues_[i]);
    }
    
    std::vector<double> invDval(nbAxes_);
    for (size_t i = 0; i < invDval.size(); i++)
    {
        invDval[i] = 1. / sqrt(eigenValues_[i]);
    }
    
    // Calculation of the row and column coordinates as well as the principal axes and components:
    if (!transpose)
    {
        std::vector<double> tmpColWeights(colNb);
        for (unsigned int i = 0; i < colWeights_.size(); i++)
        {
            tmpColWeights[i] = 1. / sqrt(colWeights_[i]);
        }
        
        // The eigen vectors are placed in the same order as their corresponding eigen value in eigenValues_.
        MatrixReal tmpEigenVectors(0,0);
        tmpEigenVectors.resize(eigenVectors_.getNumberOfRows(), nbAxes_);
        size_t cpt2 = 0;
        for (size_t i = eigenVectors_.getNumberOfColumns(); i > (eigenVectors_.getNumberOfColumns() - nbAxes_); i--)
        {
            for (unsigned int j = 0; j < eigenVectors_.getNumberOfRows(); j++)
            {
                tmpEigenVectors[j][cpt2] = eigenVectors_[j][i-1];
            }
            cpt2++;
        }
        
        // matrix of principal axes
        RbMath::hadamardMult(tmpEigenVectors, tmpColWeights, ppalAxes_, true);
        // matrix of row coordinates
        MatrixReal tmpRowCoord_(0,0);
        tmpRowCoord_.resize(rowNb, nbAxes_);
        RbMath::hadamardMult(matrix, colWeights_, tmpRowCoord_, false);
        rowCoord_ = tmpRowCoord_ * ppalAxes_;
        
        // matrix of column coordinates
        RbMath::hadamardMult(ppalAxes_, dval, colCoord_, false);
        // matrix of principal components
        RbMath::hadamardMult(rowCoord_, invDval, ppalComponents_, false);
    }
    else
    {
        std::vector<double> tmpRowWeights(rowNb);
        for (unsigned int i = 0; i < rowWeights_.size(); i++)
        {
            tmpRowWeights[i] = 1. / sqrt(rowWeights_[i]);
        }
        
        // The eigen vectors are placed in the same order as their corresponding eigen value in eigenValues_.
        MatrixReal tmpEigenVectors(0,0);
        tmpEigenVectors.resize(eigenVectors_.getNumberOfRows(), nbAxes_);
        size_t cpt2 = 0;
        for (size_t i = eigenVectors_.getNumberOfColumns(); i > (eigenVectors_.getNumberOfColumns() - nbAxes_); i--)
        {
            for (size_t j = 0; j < eigenVectors_.getNumberOfRows(); j++)
            {
                tmpEigenVectors[j][cpt2] = eigenVectors_[j][i-1];
            }
            cpt2++;
        }
        
        // matrix of principal components
        RbMath::hadamardMult(tmpEigenVectors, tmpRowWeights, ppalComponents_, true);
        // matrix of column coordinates
        MatrixReal tmpColCoord_(colNb, nbAxes_);
        RbMath::hadamardMult(matrix, rowWeights_, tmpColCoord_, true);
        MatrixReal tTmpColCoord_(tmpColCoord_.getNumberOfColumns(),tmpColCoord_.getNumberOfRows());
        RbMath::transposeMatrix(tmpColCoord_, tTmpColCoord_);
        colCoord_ = tTmpColCoord_ * ppalComponents_;
        
        // matrix of row coordinates
        RbMath::hadamardMult(ppalComponents_, dval, rowCoord_, false);
        // matrix of principal axes
        RbMath::hadamardMult(colCoord_, invDval, ppalAxes_, false);
    }
}