示例#1
0
void ColorRenderMatrixFunctions(void)
{

    MultiplyMatrix(modelViewMatrix, viewMatrix, modelWorldMatrix);
    //------------------------------------------------------------    
    LoadIdentity(tempMatrix_A);    
    MultiplyMatrix(tempMatrix_A, viewMatrix, viewRotateMatrix);    
    //---------------------------------------------------------------------    
    MultiplyMatrix(moveSet_x_modelWorldMatrix, moveSetMatrix, modelWorldMatrix);    
    //---------------------------------------------------------------------    
    LoadIdentity(tempMatrix_B);     
    MultiplyMatrix(tempMatrix_B,  shadowTextureMatrix, moveSet_x_modelWorldMatrix);     
    //---------------------------------------------------------------------    
    LoadIdentity(tempMatrix_C);     
    MultiplyMatrix(tempMatrix_C,  projectionMatrix, tempMatrix_A);       
    //---------------------------------------------------------------------    
    LoadIdentity(tempMatrix_D);     
    MultiplyMatrix(tempMatrix_D,  tempMatrix_C, moveSet_x_modelWorldMatrix);     
    
    //----------------------------------------------------------------------------------------------------------    
    LoadIdentity(shadowTextureMatrix);
    MultiplyMatrix(shadowTextureMatrix, shadowBiasMatrix, shadowProjectionMatrix);    
    MultiplyMatrix(shadowTextureMatrix, shadowTextureMatrix, shadowViewMatrix);    //_MULTIPLY THESE TO A TEMP MATRIX TO AVOID ROUND OFF ERRORS   //USES MORE CODE WITH INCREASED PRECISION 
    //----------------------------------------------------------------------------------------------------------
   
    LoadIdentity(tempMatrix_E);
    MultiplyMatrix(tempMatrix_E, viewRotateMatrix, modelRotationMatrix);
    //----------------------------------------------------------------------------------------------------------
    LoadIdentity(modelRotationINVmatrix);
    InvertMatrix(modelRotationINVmatrix, tempMatrix_E);
     //----------------------------------------------------------------------------------------------------------     
}
示例#2
0
void setupTransforms_MainColor(void)
{
        
        LoadIdentity(modelView);
        LookAt(modelView,    eyeposition[0],
                             eyeposition[1],
                             eyeposition[2],
                             lookAt[0],
                             lookAt[1],
                             lookAt[2],
                             upVector[0],
                             upVector[1],
                             upVector[2]);
        
        Rotate(modelView, 0.0, 1.0, 0.0, turnTable);
        Rotate(modelView, 1.0, 0.0, 0.0, audioRotation[3]);        
        //--------------------------------------------------------------------------------------
        LoadIdentity(viewMatrix);
        AssignMatrix(viewMatrix, modelView);
        InvertMatrix(viewMatrix, viewMatrix);
        //--------------------------------------------------------------------------------------
        Translate(modelView, moveSet[0] , moveSet[1] , moveSet[2] );
        Rotate(modelView, 1.0, 0.0, 0.0, rotateModelWithLeftMouse[0]);
        Rotate(modelView, 0.0, 1.0, 0.0, rotateModelWithLeftMouse[1]);
}
void MahalanobisSimilarity::Init() {
  local_info_ = new LocalInfo<L2ItemInfo, L2ClusterInfo, L2CoClusterInfo>();
  local_info_->Init(NULL, // row_item_info
                    NULL, // col_item_info
                    new vector<L2ClusterInfo>(global_info_->GetNumRowClusts(),
                        L2ClusterInfo(data_->GetNumCols())), // row_clust_info
                    (do_bicluster_?
                     new vector<L2ClusterInfo>(global_info_->GetNumColClusts(),
                         L2ClusterInfo(global_info_->GetNumRowItems())):
                     NULL), // col_clust_info
                    NULL); // co_cluster_info

  // First column is an indicator for training/test data
  uint32_t num_cols = data_->GetNumCols();

  double cov_matrix[num_cols*num_cols];
  for (uint32_t i = 0; i < num_cols*num_cols; i++) {
    cov_matrix[i] = 0;
  }
  cov_matrix_ = new double [num_cols*num_cols];
  vector<double> avarages(num_cols, 0.0);
  uint32_t num_train = 0;

  // Computing avarages
  for (uint32_t node_id = 0; node_id < data_->GetNumRows(); ++node_id) {
    Data<double>::const_iterator_base *it = data_->begin(node_id);
    Data<double>::const_iterator_base *end = data_->end(node_id);

    if (!global_info_->IsTrain(node_id)) continue;
    num_train++;
    for (; *it != *end; ++(*it)) {
      avarages[it->GetID()] += **it;
    }
  }
  for (uint32_t i = 0; i < avarages.size(); ++i) {
    avarages[i] /= num_train;
    log_dbg("Avg[%d]=%f", i, avarages[i]);
  }

  // Computing the covariance matrix
  for (uint32_t node_id = 0; node_id < data_->GetNumRows(); ++node_id) {
    Data<double>::const_iterator_base *it = data_->begin(node_id);
    Data<double>::const_iterator_base *it2;
    Data<double>::const_iterator_base *end = data_->end(node_id);

    uint32_t col_id;
    for (; *it != *end; ++(*it)) {
      col_id = it->GetID();
      it2 = data_->begin(node_id);
      for (; *it2 != *end; ++(*it2)) {
        uint32_t col2_id = it2->GetID();
        cov_matrix[col_id*num_cols + col2_id] +=
          (**it - avarages[col_id])*(**it2 - avarages[col2_id])/num_train;
      }
    }
  }

  // Inverting the covariance matrix
  InvertMatrix(cov_matrix, cov_matrix_, num_cols);
}
示例#4
0
/*----------------------------------------------------------------------------------------------------------------------
|	Creates and returns a matrix that represents the inverse of this SquareMatrix.
*/
SquareMatrix * SquareMatrix::Inverse() const
    {
    double * col = new double[dim];
    int * permutation = new int[dim];
    SquareMatrix * tmp = new SquareMatrix(*this);
    double ** a = tmp->GetMatrixAsRawPointer();
    SquareMatrix * inv = new SquareMatrix(*this);
    double ** a_inv = inv->GetMatrixAsRawPointer();

    // double **  a           matrix represented as vector of row pointers
    // int        n           order of matrix
    // double *   col         work vector of size n
    // int *      permutation work vector of size n
    // double **  a_inv       inverse of input matrix a (matrix a is destroyed)
    int result = InvertMatrix(a, dim, col, permutation, a_inv);
    delete tmp;
    delete [] col;
    delete [] permutation;
    if (result != 0)
        {
        delete inv;
        return 0;
        }
    return inv;
    }
  //
  // Return the inverse of the matrix. Throws if it does not exist.
  const FLOAT *InverseMatrixOf(void)
  {
    if (!m_bInverseValid)
      InvertMatrix();

    assert(m_bInverseValid);
    
    return m_fInverse;
  }
示例#6
0
bool ParsingFileAndIvertMatrix(char *nameInputFile) 
{
	std::ifstream inputFile;
	OpenFile(nameInputFile, inputFile);
	if (IsOpenedFileCorrect(inputFile))
	{
		std::vector<std::vector<float> > matrix(MATRIX_SIZE, std::vector<float>(MATRIX_SIZE));
		FillingAnArrayOfData(inputFile, matrix);
		return InvertMatrix(matrix);
	}
	return false;
}
void GeneralizedEigenSystemSolverRealSymmetricMatrices(const Array2 < doublevar > & Ain, const Array2 < doublevar> & Bin, Array1 < doublevar> & evals, Array2 < doublevar> & evecs){
  //solves generalized eigensystem problem A.x=labda*B.x
  //returns eigenvalues from largest to lowest and
  //eigenvectors, where for i-th eigenvalue, the eigenvector components are evecs(*,i)
  //eigenvectors are normalized such that: evecs**T*B*evecs = I;
#ifdef USE_LAPACK //if LAPACK
  int N=Ain.dim[0];
  
  /* allocate and initialise the matrix */
  Array2 <doublevar> A_temp(N,N), B_temp(N,N);
  Array1 <doublevar>  W,WORK;
  
  
  /* allocate space for the output parameters and workspace arrays */
  W.Resize(N);
  A_temp=Ain;
  B_temp=Bin;
  
  int info;
  int NB=64;
  int NMAX=N;
  int lda=NMAX;
  int ldb=NMAX;
  int LWORK=(NB+2)*NMAX;
  WORK.Resize(LWORK);

  /* get the eigenvalues and eigenvectors */
  info=dsygv(1, 'V', 'U' , N,  A_temp.v,  lda,  B_temp.v, ldb, W.v, WORK.v, LWORK);

  if(info>0)
    error("Internal error in the LAPACK routine dsyevr");
  if(info<0)
    error("Problem with the input parameter of LAPACK routine dsyevr in position "-info);

  for (int i=0; i<N; i++)
    evals(i)=W[N-1-i];

  for (int i=0; i<N; i++) {
    for (int j=0; j<N; j++) {
      evecs(j,i)=A_temp(N-1-i,j);
    }
  }
 //END OF LAPACK 
#else //IF NO LAPACK
  //for now we will solve it only approximatively
   int N=Ain.dim[0];
   Array2 <doublevar> B_inverse(N,N),A_renorm(N,N);
   InvertMatrix(Bin,B_inverse,N);
   MultiplyMatrices(B_inverse,Ain,A_renorm,N);
   //note A_renorm is not explicitly symmetric
   EigenSystemSolverRealSymmetricMatrix(A_renorm,evals,evecs);
#endif //END OF NO LAPACK
}
示例#8
0
void ProjectionState::Refresh()
{
	glGetIntegerv( GL_VIEWPORT, viewport );

	float modelviewFloat[16];
	float projectionFloat[16];
	glGetFloatv( GL_MODELVIEW_MATRIX, modelviewFloat );
	glGetFloatv( GL_PROJECTION_MATRIX, projectionFloat );
	MultiplyMatrices( projectionFloat, modelviewFloat, perspective );
	InvertMatrix( perspective, perspective );

	glGetDoublev( GL_MODELVIEW_MATRIX, modelview );
	glGetDoublev( GL_PROJECTION_MATRIX, projection );
}
示例#9
0
void DrawCube() {
	//Listing 3.2. OpenGL code for selecting the slice direction.
	//Real Time Volume Graphics page 52

	GLfloat pModelViewMatrix[16];
	GLfloat pModelViewMatrixInv[16];
	//GLfloat pMatrixIdentity[16];
	// get the current modelview matrix
	glGetFloatv(GL_MODELVIEW_MATRIX, pModelViewMatrix);
	// invert the modelview matrix
	InvertMatrix(pModelViewMatrix, pModelViewMatrixInv);
	// rotate the initial viewing direction
	GLfloat pViewVector[4] = { 0.0f, 0.0f, -1.0f, 0.0f };
	//GLfloat pViewVector1[4];

	MatVecMultiply(pModelViewMatrixInv, pViewVector);

	// find the maximal vector component
	int nMax = FindAbsMaximum(pViewVector);

	//printf("%d %f %f %f %f \n", nMax, pViewVector[0],pViewVector[1],pViewVector[2],pViewVector[3]);
	switch (nMax) {
	case X:
		if (pViewVector[X] > 0.0f) {
			DrawSliceStack(1);
		} else {
			DrawSliceStack(0);
		}
		break;
	case Y:
		if (pViewVector[Y] > 0.0f) {
			DrawSliceStack(3);
		} else {
			DrawSliceStack(2);
		}
		break;
	case Z:
		if (pViewVector[Z] > 0.0f) {
			DrawSliceStack(5);
		} else {
			DrawSliceStack(4);
		}
		break;
	}

}
//===============================================================================================================
//===============================================================================================================
void setupTransforms_MainColor(void)
{
        
        LoadIdentity(view);
        Translate(view,             eyePosition[0], 
                                    eyePosition[1], 
                                    eyePosition[2]);


        //===========================================
        LoadIdentity(invertView);
        InvertMatrix(invertView, view);
        //=============================
        
        LoadIdentity(view_rotate);
        //--------------------   
        Rotate(view_rotate, 1.0, 0.0, 0.0, rotateModelWithMiddleMouse[0]);
        Rotate(view_rotate, 0.0, 1.0, 0.0, rotateModelWithMiddleMouse[1]);        
}
// Viewpoints set the inverse as the rotation matrix.
void Viewpoint::SetOrientation( Matrix3x3 m ) {

	int i, j;

	Matrix3x3 inverse;

	CopyMatrix( orientation, m );
	InvertMatrix( inverse, m );

	for ( i = 0; i < 3; i++ ) {
		for ( j = 0; j < 3; j++ ) {
			gl_orientation[ i * 4 + j ] = inverse[i][j];
		}
		gl_orientation[ i * 4 + j ] = 0.0;
	}
	for ( int j = 0; j < 3; j++ ) {
		gl_orientation[ i * 4 + j ] = 0.0;
	}
	gl_orientation[ i * 4 + j ] = 1.0;
}
// Viewpoints set the inverse as the rotation matrix.
void Viewpoint::SetAttitude( Matrix3x3 m ) {

	int i, j;

	Matrix3x3 inverse;

	CopyMatrix( attitude, m );
	InvertMatrix( inverse, m );

	for ( i = 0; i < 3; i++ ) {
		for ( j = 0; j < 3; j++ ) {
			gl_attitude[ i * 4 + j ] = inverse[i][j];
		}
		gl_attitude[ i * 4 + j ] = 0.0;
	}
	for ( int j = 0; j < 3; j++ ) {
		gl_attitude[ i * 4 + j ] = 0.0;
	}
	gl_attitude[ i * 4 + j ] = 1.0;
}
示例#13
0
文件: CalcJbar.c 项目: McPHAC/McPHAC
void CalcJbar(double **W, double *Q, column_type  *c, int ndepths) {
	// Calculates the quantity Jbar, used in the Rybicki solution of the radiative transfer

	extern double  *dnu;

	int  i, k;
	double **tmp1, *tmp2, nutot,a;

	tmp1 = ddvector(ndepths);
	tmp2 = dvector(ndepths);
	for (i=0; i<ndepths; i++) {
		tmp1[i]=dvector(ndepths);
	}

	InvertMatrix(tmp1, W, ndepths);
	MatrixVectorMultiply(tmp2, tmp1, Q, ndepths);

	nutot=0;
	for (k=0; k<NFREQ; k++) {
		nutot+=dnu[k];
	}

	for (i=0; i<ndepths; i++) {
		c[i].Jbar = tmp2[i];

		c[i].Jbarb=0;
		for (k=0; k<NFREQ; k++) {
			c[i].Jbarb += c[i].J[k]*dnu[k];
		}
		c[i].Jbarb /= nutot;
		a=c[i].Jbarb/c[i].Jbar;
	}

	for (i=0; i<ndepths; i++) {
		free(tmp1[i]);
	}
	free(tmp1);
	free(tmp2);

}
void Force_fitter::setup(doublevar  maxcut, int nexpansion ) {
  m=2;
  cut=maxcut;
  nexp=nexpansion;
  coeff.Resize(nexp);
  Array2 <doublevar> S(nexp, nexp);
  Array1 <doublevar> h(nexp);
  for(int i=0; i< nexp; i++) {
    for(int j=0; j< nexp; j++) {
      S(i,j)=pow(cut,m+i+j+1)/(m+i+j+1);
    }
    h(i)=pow(cut,i+1)/(i+1);
  }
  
  Array2 <doublevar> Sinv(nexp, nexp);
  InvertMatrix(S, Sinv, nexp);
  
  coeff=0;
  for(int i=0; i< nexp; i++) 
    for(int j=0;j< nexp;j++) 
      coeff(i)+=Sinv(i,j)*h(j);
}
void LS::Regression::regress(const std::vector<double>& Z, 
						     const std::vector<McLmm_LS::LMMSimulationResult>& lmmSimulationResults,
							 std::vector<std::vector<double>>& basis_value_on_allPath_buffer)
{
	
	constructBasisMatrix(lmmSimulationResults,basis_value_on_allPath_buffer);

	std::vector<double> coef_weight;

	normalizationMatrix(basis_value_on_allPath_buffer, coef_weight);
	
	//---------------------change vector of vector to Rmatrix-----------------------------------------------//
	size_t nbCol = basis_value_on_allPath_buffer[0].size();
	size_t nbRow= basis_value_on_allPath_buffer.size();
	Rmatrix basis_value_on_allPath_boostMatrix(nbRow,nbCol);
	for(size_t i=0; i<nbRow;i++)
	{
		for(size_t j=0; j<nbCol;j++)
			basis_value_on_allPath_boostMatrix(i,j)=basis_value_on_allPath_buffer[i][j];
	}
	Rvector Z_vector(Z.size());
	std::copy(Z.begin(), Z.end(),Z_vector.begin());
	//---------------------End of change vector of vector to Rmatrix---------------------------------------//

	// //TODO: junbin : copy the matrix multiplication code here ... 
	Rmatrix inverse_TransposeX_multiple_X(nbCol,nbCol); 
	const Rmatrix& transposeX = ublas::trans(basis_value_on_allPath_boostMatrix);
	assert(InvertMatrix(ublas::prod(transposeX, basis_value_on_allPath_boostMatrix), inverse_TransposeX_multiple_X));
	const ublas::vector<double>& matrix_transposeX_multiple_Y = ublas::prod(transposeX,Z_vector);
	const ublas::vector<double>& rg_coef_vect = ublas::prod(inverse_TransposeX_multiple_X, matrix_transposeX_multiple_Y);

	//result save to regressionRepresentation_'s coeff ! 
	size_t nbRgCoef = rg_coef_vect.size();
	if(regressionRepresentation_.getRegressionCoeffs().size()!=nbRgCoef)
		regressionRepresentation_.getRegressionCoeffs().resize(nbRgCoef);
	assert(coef_weight.size()==nbRgCoef);
	for(size_t i = 0; i<nbRgCoef; i++)
		regressionRepresentation_.getRegressionCoeffs()[i]=rg_coef_vect[i]/coef_weight[i];
}
//===============================================================================================================
//===============================================================================================================
void setupTransforms_MainColor(void)
{
        
        LoadIdentity(view);
        
        LookAt(view, eyePosition[0]  -  moveModelWithMiddleMouse[0], 
                     eyePosition[1]  -  moveModelWithMiddleMouse[1], 
                     eyePosition[2]  -  zoomModelWithMiddleMouse   ,  
                     0.0             -  moveModelWithMiddleMouse[0]                              , 
                     0.0             -  moveModelWithMiddleMouse[1]                              , 
                     0.0                                           ,            
                     0.0, 1.0, 0.0);
        /*
        Translate(view,             eyePosition[0]  + moveModelWithMiddleMouse[0]
                                    eyePosition[1] + moveModelWithMiddleMouse[1], 
                                    eyePosition[2] + zoomModelWithMiddleMouse);

        */

        
        LoadIdentity(view_rotate);
        //--------------------   

        
        Rotate(view, 1.0, 0.0, 0.0, rotateModelWithMiddleMouse[0]);
        Rotate(view, 0.0, 1.0, 0.0, rotateModelWithMiddleMouse[1]);        
        
          Translate(view_rotate,      moveScenePivot[0],
                                    moveScenePivot[1], 
                                    moveScenePivot[2]);  
                                    
        //===========================================
        LoadIdentity(invertView);
        InvertMatrix(invertView, view);
        //=============================                                    
                                       
}
示例#17
0
void runBeagle(int resource, 
               int stateCount, 
               int ntaxa, 
               int nsites, 
               bool manualScaling, 
               bool autoScaling,
               bool dynamicScaling,
               int rateCategoryCount,
               int nreps,
               bool fullTiming,
               bool requireDoublePrecision,
               bool requireSSE,
               int compactTipCount,
               int randomSeed,
               int rescaleFrequency,
               bool unrooted,
               bool calcderivs,
               bool logscalers,
               int eigenCount,
               bool eigencomplex,
               bool ievectrans,
               bool setmatrix)
{
    
    int edgeCount = ntaxa*2-2;
    int internalCount = ntaxa-1;
    int partialCount = ((ntaxa+internalCount)-compactTipCount)*eigenCount;
    int scaleCount = ((manualScaling || dynamicScaling) ? ntaxa : 0);
    
    BeagleInstanceDetails instDetails;
    
    // create an instance of the BEAGLE library
	int instance = beagleCreateInstance(
			    ntaxa,			  /**< Number of tip data elements (input) */
				partialCount, /**< Number of partials buffers to create (input) */
                compactTipCount,	/**< Number of compact state representation buffers to create (input) */
				stateCount,		  /**< Number of states in the continuous-time Markov chain (input) */
				nsites,			  /**< Number of site patterns to be handled by the instance (input) */
				eigenCount,		          /**< Number of rate matrix eigen-decomposition buffers to allocate (input) */
                (calcderivs ? (3*edgeCount*eigenCount) : edgeCount*eigenCount),/**< Number of rate matrix buffers (input) */
                rateCategoryCount,/**< Number of rate categories */
                scaleCount*eigenCount,          /**< scaling buffers */
				&resource,		  /**< List of potential resource on which this instance is allowed (input, NULL implies no restriction */
				1,			      /**< Length of resourceList list (input) */
                0,         /**< Bit-flags indicating preferred implementation charactertistics, see BeagleFlags (input) */
                (ievectrans ? BEAGLE_FLAG_INVEVEC_TRANSPOSED : BEAGLE_FLAG_INVEVEC_STANDARD) |
                (logscalers ? BEAGLE_FLAG_SCALERS_LOG : BEAGLE_FLAG_SCALERS_RAW) |
                (eigencomplex ? BEAGLE_FLAG_EIGEN_COMPLEX : BEAGLE_FLAG_EIGEN_REAL) |
                (dynamicScaling ? BEAGLE_FLAG_SCALING_DYNAMIC : 0) | 
                (autoScaling ? BEAGLE_FLAG_SCALING_AUTO : 0) |
                (requireDoublePrecision ? BEAGLE_FLAG_PRECISION_DOUBLE : BEAGLE_FLAG_PRECISION_SINGLE) |
                (requireSSE ? BEAGLE_FLAG_VECTOR_SSE : BEAGLE_FLAG_VECTOR_NONE),	  /**< Bit-flags indicating required implementation characteristics, see BeagleFlags (input) */
				&instDetails);
    if (instance < 0) {
	    fprintf(stderr, "Failed to obtain BEAGLE instance\n\n");
	    return;
    }
        
    int rNumber = instDetails.resourceNumber;
    fprintf(stdout, "Using resource %i:\n", rNumber);
    fprintf(stdout, "\tRsrc Name : %s\n",instDetails.resourceName);
    fprintf(stdout, "\tImpl Name : %s\n", instDetails.implName);    
    
    if (!(instDetails.flags & BEAGLE_FLAG_SCALING_AUTO))
        autoScaling = false;
    
    // set the sequences for each tip using partial likelihood arrays
	gt_srand(randomSeed);	// fix the random seed...
    for(int i=0; i<ntaxa; i++)
    {
        if (i >= compactTipCount) {
            double* tmpPartials = getRandomTipPartials(nsites, stateCount);
            beagleSetTipPartials(instance, i, tmpPartials);
            free(tmpPartials);
        } else {
            int* tmpStates = getRandomTipStates(nsites, stateCount);
            beagleSetTipStates(instance, i, tmpStates);
            free(tmpStates);                
        }
    }
    
#ifdef _WIN32
	std::vector<double> rates(rateCategoryCount);
#else
    double rates[rateCategoryCount];
#endif
	
    for (int i = 0; i < rateCategoryCount; i++) {
        rates[i] = gt_rand() / (double) GT_RAND_MAX;
    }
    
	beagleSetCategoryRates(instance, &rates[0]);
    
	double* patternWeights = (double*) malloc(sizeof(double) * nsites);
    
    for (int i = 0; i < nsites; i++) {
        patternWeights[i] = gt_rand() / (double) GT_RAND_MAX;
    }    

    beagleSetPatternWeights(instance, patternWeights);
    
    free(patternWeights);
	
    // create base frequency array

#ifdef _WIN32
	std::vector<double> freqs(stateCount);
#else
    double freqs[stateCount];
#endif
    
    // create an array containing site category weights
#ifdef _WIN32
	std::vector<double> weights(rateCategoryCount);
#else
    double weights[rateCategoryCount];
#endif

    for (int eigenIndex=0; eigenIndex < eigenCount; eigenIndex++) {
        for (int i = 0; i < rateCategoryCount; i++) {
            weights[i] = gt_rand() / (double) GT_RAND_MAX;
        } 
    
        beagleSetCategoryWeights(instance, eigenIndex, &weights[0]);
    }
    
    double* eval;
    if (!eigencomplex)
        eval = (double*)malloc(sizeof(double)*stateCount);
    else
        eval = (double*)malloc(sizeof(double)*stateCount*2);
    double* evec = (double*)malloc(sizeof(double)*stateCount*stateCount);
    double* ivec = (double*)malloc(sizeof(double)*stateCount*stateCount);
    
    for (int eigenIndex=0; eigenIndex < eigenCount; eigenIndex++) {
        if (!eigencomplex && ((stateCount & (stateCount-1)) == 0)) {
            
            for (int i=0; i<stateCount; i++) {
                freqs[i] = 1.0 / stateCount;
            }

            // an eigen decomposition for the general state-space JC69 model
            // If stateCount = 2^n is a power-of-two, then Sylvester matrix H_n describes
            // the eigendecomposition of the infinitesimal rate matrix
             
            double* Hn = evec;
            Hn[0*stateCount+0] = 1.0; Hn[0*stateCount+1] =  1.0; 
            Hn[1*stateCount+0] = 1.0; Hn[1*stateCount+1] = -1.0; // H_1
         
            for (int k=2; k < stateCount; k <<= 1) {
                // H_n = H_1 (Kronecker product) H_{n-1}
                for (int i=0; i<k; i++) {
                    for (int j=i; j<k; j++) {
                        double Hijold = Hn[i*stateCount + j];
                        Hn[i    *stateCount + j + k] =  Hijold;
                        Hn[(i+k)*stateCount + j    ] =  Hijold;
                        Hn[(i+k)*stateCount + j + k] = -Hijold;
                        
                        Hn[j    *stateCount + i + k] = Hn[i    *stateCount + j + k];
                        Hn[(j+k)*stateCount + i    ] = Hn[(i+k)*stateCount + j    ];
                        Hn[(j+k)*stateCount + i + k] = Hn[(i+k)*stateCount + j + k];                                
                    }
                }        
            }
            
            // Since evec is Hadamard, ivec = (evec)^t / stateCount;    
            for (int i=0; i<stateCount; i++) {
                for (int j=i; j<stateCount; j++) {
                    ivec[i*stateCount+j] = evec[j*stateCount+i] / stateCount;
                    ivec[j*stateCount+i] = ivec[i*stateCount+j]; // Symmetric
                }
            }
           
            eval[0] = 0.0;
            for (int i=1; i<stateCount; i++) {
                eval[i] = -stateCount / (stateCount - 1.0);
            }
       
        } else if (!eigencomplex) {
            for (int i=0; i<stateCount; i++) {
                freqs[i] = gt_rand() / (double) GT_RAND_MAX;
            }
        
            double** qmat=New2DArray<double>(stateCount, stateCount);    
            double* relNucRates = new double[(stateCount * stateCount - stateCount) / 2];
            
            int rnum=0;
            for(int i=0;i<stateCount;i++){
                for(int j=i+1;j<stateCount;j++){
                    relNucRates[rnum] = gt_rand() / (double) GT_RAND_MAX;
                    qmat[i][j]=relNucRates[rnum] * freqs[j];
                    qmat[j][i]=relNucRates[rnum] * freqs[i];
                    rnum++;
                }
            }

            //set diags to sum rows to 0
            double sum;
            for(int x=0;x<stateCount;x++){
                sum=0.0;
                for(int y=0;y<stateCount;y++){
                    if(x!=y) sum+=qmat[x][y];
                        }
                qmat[x][x]=-sum;
            } 
            
            double* eigvalsimag=new double[stateCount];
            double** eigvecs=New2DArray<double>(stateCount, stateCount);//eigenvecs
            double** teigvecs=New2DArray<double>(stateCount, stateCount);//temp eigenvecs
            double** inveigvecs=New2DArray<double>(stateCount, stateCount);//inv eigenvecs    
            int* iwork=new int[stateCount];
            double* work=new double[stateCount];
            
            EigenRealGeneral(stateCount, qmat, eval, eigvalsimag, eigvecs, iwork, work);
            memcpy(*teigvecs, *eigvecs, stateCount*stateCount*sizeof(double));
            InvertMatrix(teigvecs, stateCount, work, iwork, inveigvecs);
            
            for(int x=0;x<stateCount;x++){
                for(int y=0;y<stateCount;y++){
                    evec[x * stateCount + y] = eigvecs[x][y];
                    if (ievectrans)
                        ivec[x * stateCount + y] = inveigvecs[y][x];
                    else
                        ivec[x * stateCount + y] = inveigvecs[x][y];
                }
            } 
            
            Delete2DArray(qmat);
            delete relNucRates;
            
            delete eigvalsimag;
            Delete2DArray(eigvecs);
            Delete2DArray(teigvecs);
            Delete2DArray(inveigvecs);
            delete iwork;
            delete work;
        } else if (eigencomplex && stateCount==4 && eigenCount==1) {
            // create base frequency array
            double temp_freqs[4] = { 0.25, 0.25, 0.25, 0.25 };
            
            // an eigen decomposition for the 4-state 1-step circulant infinitesimal generator
            double temp_evec[4 * 4] = {
                -0.5,  0.6906786606674509,   0.15153543380548623, 0.5,
                0.5, -0.15153543380548576,  0.6906786606674498,  0.5,
                -0.5, -0.6906786606674498,  -0.15153543380548617, 0.5,
                0.5,  0.15153543380548554, -0.6906786606674503,  0.5
            };
            
            double temp_ivec[4 * 4] = {
                -0.5,  0.5, -0.5,  0.5,
                0.6906786606674505, -0.15153543380548617, -0.6906786606674507,   0.15153543380548645,
                0.15153543380548568, 0.6906786606674509,  -0.15153543380548584, -0.6906786606674509,
                0.5,  0.5,  0.5,  0.5
            };
            
            double temp_eval[8] = { -2.0, -1.0, -1.0, 0, 0, 1, -1, 0 };
            
            for(int x=0;x<stateCount;x++){
                freqs[x] = temp_freqs[x];
                eval[x] = temp_eval[x];
                eval[x+stateCount] = temp_eval[x+stateCount];
                for(int y=0;y<stateCount;y++){
                    evec[x * stateCount + y] = temp_evec[x * stateCount + y];
                    if (ievectrans)
                        ivec[x * stateCount + y] = temp_ivec[x + y * stateCount];
                    else
                        ivec[x * stateCount + y] = temp_ivec[x * stateCount + y];
                }
            } 
        } else {
            abort("should not be here");
        }
            
        beagleSetStateFrequencies(instance, eigenIndex, &freqs[0]);
        
        if (!setmatrix) {
            // set the Eigen decomposition
            beagleSetEigenDecomposition(instance, eigenIndex, &evec[0], &ivec[0], &eval[0]);
        }
    }
    
    free(eval);
    free(evec);
    free(ivec);


    
    // a list of indices and edge lengths
	int* edgeIndices = new int[edgeCount*eigenCount];
	int* edgeIndicesD1 = new int[edgeCount*eigenCount];
	int* edgeIndicesD2 = new int[edgeCount*eigenCount];
	for(int i=0; i<edgeCount*eigenCount; i++) {
        edgeIndices[i]=i;
        edgeIndicesD1[i]=(edgeCount*eigenCount)+i;
        edgeIndicesD2[i]=2*(edgeCount*eigenCount)+i;
    }
	double* edgeLengths = new double[edgeCount];
	for(int i=0; i<edgeCount; i++) {
        edgeLengths[i]=gt_rand() / (double) GT_RAND_MAX;
    }
    
    // create a list of partial likelihood update operations
    // the order is [dest, destScaling, source1, matrix1, source2, matrix2]
	int* operations = new int[(internalCount)*BEAGLE_OP_COUNT*eigenCount];
    int* scalingFactorsIndices = new int[(internalCount)*eigenCount]; // internal nodes
	for(int i=0; i<internalCount*eigenCount; i++){
		operations[BEAGLE_OP_COUNT*i+0] = ntaxa+i;
        operations[BEAGLE_OP_COUNT*i+1] = (dynamicScaling ? i : BEAGLE_OP_NONE);
        operations[BEAGLE_OP_COUNT*i+2] = (dynamicScaling ? i : BEAGLE_OP_NONE);
        
        int child1Index;
        if (((i % internalCount)*2) < ntaxa)
            child1Index = (i % internalCount)*2;
        else
            child1Index = i*2 - internalCount * (int)(i / internalCount);
        operations[BEAGLE_OP_COUNT*i+3] = child1Index;
        operations[BEAGLE_OP_COUNT*i+4] = child1Index;

        int child2Index;
        if (((i % internalCount)*2+1) < ntaxa)
            child2Index = (i % internalCount)*2+1;
        else
            child2Index = i*2+1 - internalCount * (int)(i / internalCount);
		operations[BEAGLE_OP_COUNT*i+5] = child2Index;
		operations[BEAGLE_OP_COUNT*i+6] = child2Index;

        scalingFactorsIndices[i] = i;
        
//        printf("i %d dest %d c1 %d c2 %d\n", i, ntaxa+i, child1Index, child2Index);
        
        if (autoScaling)
            scalingFactorsIndices[i] += ntaxa;
	}	

    int* rootIndices = new int[eigenCount];
	int* lastTipIndices = new int[eigenCount];
    int* categoryWeightsIndices = new int[eigenCount];
    int* stateFrequencyIndices = new int[eigenCount];
    int* cumulativeScalingFactorIndices = new int[eigenCount];
    
    for (int eigenIndex=0; eigenIndex < eigenCount; eigenIndex++) {
        rootIndices[eigenIndex] = ntaxa+(internalCount*(eigenIndex+1))-1;//ntaxa*2-2;
        lastTipIndices[eigenIndex] = ntaxa-1;
        categoryWeightsIndices[eigenIndex] = eigenIndex;
        stateFrequencyIndices[eigenIndex] = 0;
        cumulativeScalingFactorIndices[eigenIndex] = ((manualScaling || dynamicScaling) ? (scaleCount*eigenCount-1)-eigenCount+eigenIndex+1 : BEAGLE_OP_NONE);
        
        if (dynamicScaling)
            beagleResetScaleFactors(instance, cumulativeScalingFactorIndices[eigenIndex]);
    }

    // start timing!
	struct timeval time1, time2, time3, time4, time5;
    double bestTimeUpdateTransitionMatrices, bestTimeUpdatePartials, bestTimeAccumulateScaleFactors, bestTimeCalculateRootLogLikelihoods, bestTimeTotal;
    
    double logL = 0.0;
    double deriv1 = 0.0;
    double deriv2 = 0.0;
    
    double previousLogL = 0.0;
    double previousDeriv1 = 0.0;
    double previousDeriv2 = 0.0;

    for (int i=0; i<nreps; i++){
        if (manualScaling && (!(i % rescaleFrequency) || !((i-1) % rescaleFrequency))) {
            for(int j=0; j<internalCount*eigenCount; j++){
                operations[BEAGLE_OP_COUNT*j+1] = (((manualScaling && !(i % rescaleFrequency))) ? j : BEAGLE_OP_NONE);
                operations[BEAGLE_OP_COUNT*j+2] = (((manualScaling && (i % rescaleFrequency))) ? j : BEAGLE_OP_NONE);
            }
        }
        
        gettimeofday(&time1,NULL);

        for (int eigenIndex=0; eigenIndex < eigenCount; eigenIndex++) {
            if (!setmatrix) {
                // tell BEAGLE to populate the transition matrices for the above edge lengths
                beagleUpdateTransitionMatrices(instance,     // instance
                                               eigenIndex,             // eigenIndex
                                               &edgeIndices[eigenIndex*edgeCount],   // probabilityIndices
                                               (calcderivs ? &edgeIndicesD1[eigenIndex*edgeCount] : NULL), // firstDerivativeIndices
                                               (calcderivs ? &edgeIndicesD2[eigenIndex*edgeCount] : NULL), // secondDerivativeIndices
                                               edgeLengths,   // edgeLengths
                                               edgeCount);            // count
            } else {
                double* inMatrix = new double[stateCount*stateCount*rateCategoryCount];
                for (int matrixIndex=0; matrixIndex < edgeCount; matrixIndex++) {
                    for(int z=0;z<rateCategoryCount;z++){
                        for(int x=0;x<stateCount;x++){
                            for(int y=0;y<stateCount;y++){
                                inMatrix[z*stateCount*stateCount + x*stateCount + y] = gt_rand() / (double) GT_RAND_MAX;
                            }
                        } 
                    }
                    beagleSetTransitionMatrix(instance, edgeIndices[eigenIndex*edgeCount + matrixIndex], inMatrix, 1);
                    if (calcderivs) {
                        beagleSetTransitionMatrix(instance, edgeIndicesD1[eigenIndex*edgeCount + matrixIndex], inMatrix, 0);
                        beagleSetTransitionMatrix(instance, edgeIndicesD2[eigenIndex*edgeCount + matrixIndex], inMatrix, 0);
                    }
                }
            }
        }

        gettimeofday(&time2, NULL);
        
        // update the partials
        beagleUpdatePartials( instance,      // instance
                        (BeagleOperation*)operations,     // eigenIndex
                        internalCount*eigenCount,              // operationCount
                        (dynamicScaling ? internalCount : BEAGLE_OP_NONE));             // cumulative scaling index

        gettimeofday(&time3, NULL);

        int scalingFactorsCount = internalCount;
                
        for (int eigenIndex=0; eigenIndex < eigenCount; eigenIndex++) {
            if (manualScaling && !(i % rescaleFrequency)) {
                beagleResetScaleFactors(instance,
                                        cumulativeScalingFactorIndices[eigenIndex]);
                
                beagleAccumulateScaleFactors(instance,
                                       &scalingFactorsIndices[eigenIndex*internalCount],
                                       scalingFactorsCount,
                                       cumulativeScalingFactorIndices[eigenIndex]);
            } else if (autoScaling) {
                beagleAccumulateScaleFactors(instance, &scalingFactorsIndices[eigenIndex*internalCount], scalingFactorsCount, BEAGLE_OP_NONE);
            }
        }
        
        gettimeofday(&time4, NULL);
                
        // calculate the site likelihoods at the root node
        if (!unrooted) {
            beagleCalculateRootLogLikelihoods(instance,               // instance
                                        rootIndices,// bufferIndices
                                        categoryWeightsIndices,                // weights
                                        stateFrequencyIndices,                 // stateFrequencies
                                        cumulativeScalingFactorIndices,
                                        eigenCount,                      // count
                                        &logL);         // outLogLikelihoods
        } else {
            // calculate the site likelihoods at the root node
            beagleCalculateEdgeLogLikelihoods(instance,               // instance
                                              rootIndices,// bufferIndices
                                              lastTipIndices,
                                              lastTipIndices,
                                              (calcderivs ? edgeIndicesD1 : NULL),
                                              (calcderivs ? edgeIndicesD2 : NULL),
                                              categoryWeightsIndices,                // weights
                                              stateFrequencyIndices,                 // stateFrequencies
                                              cumulativeScalingFactorIndices,
                                              eigenCount,                      // count
                                              &logL,    // outLogLikelihood
                                              (calcderivs ? &deriv1 : NULL),
                                              (calcderivs ? &deriv2 : NULL));
        }
        // end timing!
        gettimeofday(&time5,NULL);
        
        if (i == 0 || getTimeDiff(time1, time2) < bestTimeUpdateTransitionMatrices)
            bestTimeUpdateTransitionMatrices = getTimeDiff(time1, time2);
        if (i == 0 || getTimeDiff(time2, time3) < bestTimeUpdatePartials)
            bestTimeUpdatePartials = getTimeDiff(time2, time3);
        if (i == 0 || getTimeDiff(time3, time4) < bestTimeAccumulateScaleFactors)
            bestTimeAccumulateScaleFactors = getTimeDiff(time3, time4);
        if (i == 0 || getTimeDiff(time4, time5) < bestTimeUpdateTransitionMatrices)
            bestTimeCalculateRootLogLikelihoods = getTimeDiff(time4, time5);
        if (i == 0 || getTimeDiff(time1, time5) < bestTimeTotal)
            bestTimeTotal = getTimeDiff(time1, time5);
        
        if (!(logL - logL == 0.0))
            abort("error: invalid lnL");
        
        if (i > 0 && abs(logL - previousLogL) > MAX_DIFF)
            abort("error: large lnL difference between reps");
        
        if (calcderivs) {
            if (!(deriv1 - deriv1 == 0.0) || !(deriv2 - deriv2 == 0.0))
                abort("error: invalid deriv");
            
            if (i > 0 && ((abs(deriv1 - previousDeriv1) > MAX_DIFF) || (abs(deriv2 - previousDeriv2) > MAX_DIFF)) )
                abort("error: large deriv difference between reps");
        }

        previousLogL = logL;
        previousDeriv1 = deriv1;
        previousDeriv2 = deriv2;        
    }

    if (resource == 0) {
        cpuTimeUpdateTransitionMatrices = bestTimeUpdateTransitionMatrices;
        cpuTimeUpdatePartials = bestTimeUpdatePartials;
        cpuTimeAccumulateScaleFactors = bestTimeAccumulateScaleFactors;
        cpuTimeCalculateRootLogLikelihoods = bestTimeCalculateRootLogLikelihoods;
        cpuTimeTotal = bestTimeTotal;
    }
    
    if (!calcderivs)
        fprintf(stdout, "logL = %.5f \n", logL);
    else
        fprintf(stdout, "logL = %.5f d1 = %.5f d2 = %.5f\n", logL, deriv1, deriv2);
    
    std::cout.setf(std::ios::showpoint);
    std::cout.setf(std::ios::floatfield, std::ios::fixed);
    int timePrecision = 6;
    int speedupPrecision = 2;
    int percentPrecision = 2;
	std::cout << "best run: ";
    printTiming(bestTimeTotal, timePrecision, resource, cpuTimeTotal, speedupPrecision, 0, 0, 0);
    if (fullTiming) {
        std::cout << " transMats:  ";
        printTiming(bestTimeUpdateTransitionMatrices, timePrecision, resource, cpuTimeUpdateTransitionMatrices, speedupPrecision, 1, bestTimeTotal, percentPrecision);
        std::cout << " partials:   ";
        printTiming(bestTimeUpdatePartials, timePrecision, resource, cpuTimeUpdatePartials, speedupPrecision, 1, bestTimeTotal, percentPrecision);
        if (manualScaling || autoScaling) {
            std::cout << " accScalers: ";
            printTiming(bestTimeAccumulateScaleFactors, timePrecision, resource, cpuTimeAccumulateScaleFactors, speedupPrecision, 1, bestTimeTotal, percentPrecision);
        }
        std::cout << " rootLnL:    ";
        printTiming(bestTimeCalculateRootLogLikelihoods, timePrecision, resource, cpuTimeCalculateRootLogLikelihoods, speedupPrecision, 1, bestTimeTotal, percentPrecision);
    }
    std::cout << "\n";
    
	beagleFinalizeInstance(instance);
}
示例#18
0
void PlaneStressJ2::CalculateMaterialResponse(const Vector& StrainVector,
        const Matrix& DeformationGradient,
        Vector& StressVector,
        Matrix& AlgorithmicTangent,
        const ProcessInfo& CurrentProcessInfo,
        const Properties& props,
        const GeometryType& geom,
        const Vector& ShapeFunctionsValues,
        bool CalculateStresses,
        int CalculateTangent,
        bool SaveInternalVariables)
{
    KRATOS_TRY

    mE = props.GetValue(YOUNG_MODULUS);
    mNU = props.GetValue(POISSON_RATIO);
    msigma_y = props.GetValue(YIELD_STRESS);
    mH = 0.0;
    mtheta = 0.0;

//        double theta = 0.0;

    //resize output quantities
    if (StressVector.size() != 3 && CalculateStresses == true) StressVector.resize(3, false);
    if (AlgorithmicTangent.size1() != 3 && CalculateTangent != 0) AlgorithmicTangent.resize(3, 3, false);


    array_1d<double, 3 > elastic_strain;
    noalias(elastic_strain) = StrainVector;
    noalias(elastic_strain) -= mOldPlasticStrain;
    //        KRATOS_WATCH(StrainVector);
    //        KRATOS_WATCH(mOldPlasticStrain);

    boost::numeric::ublas::bounded_matrix<double, 3, 3 > C, Cinv, P;
    CalculateElasticMatrix(C);
    CalculateInverseElasticMatrix(Cinv);
    CalculateP(P);

//                        KRATOS_WATCH(C);

    array_1d<double, 3 > s_trial = prod(C, elastic_strain);
    array_1d<double, 3 > xi_trial = s_trial;
    noalias(s_trial) -= mbeta_old;

    //                KRATOS_WATCH(compute_f_trial(xi_trial, malpha_old));

    //        double fbar2 = fbar_2(0.0, xi_trial);
    //        double fbar_value = sqrt(fbar_2(0.0, xi_trial));
    //        double r2_value = R_2(0.0, fbar_value, malpha_old);
    //        double aaa = fbar_value - sqrt(r2_value);
    //                KRATOS_WATCH(sqrt(r2_value));
    //                KRATOS_WATCH(fbar_value);
    //                KRATOS_WATCH(sqrt(r2_value));
    //                        KRATOS_WATCH(aaa);

    double H1 = (1.0 - mtheta) * mH;
    ////        KRATOS_WATCH(xi_trial);
    //                KRATOS_WATCH(mbeta_old)
    if (compute_f_trial(xi_trial, malpha_old) < 0) //elastic case
    {
        if (CalculateStresses == true) noalias(StressVector) = s_trial;
        if (CalculateTangent != 0) noalias(AlgorithmicTangent) = C;
        //note that in this case internal variables are not modified!

    }
    else
    {
        //algorithm copied identically from the Simo Hughes, BOX3.3, pag 130
        double dgamma = ComputeDGamma(xi_trial, malpha_old);
        double ccc = 0.6666666666666667 * dgamma*H1;

        //            KRATOS_WATCH(dgamma);
        //            KRATOS_WATCH(xi_trial);
        //                        KRATOS_WATCH(malpha_old);

        //calculate XImat
        //note that here i reuse the C as auxiliary variable as i don't need it anymore
        boost::numeric::ublas::bounded_matrix<double, 3, 3 > XImat;
        noalias(C) = Cinv;
        noalias(C) += (dgamma / (1.0 + ccc)) * P;
        double detC;
        InvertMatrix(C, XImat, detC);
        //            KRATOS_WATCH(XImat);


        array_1d<double, 3 > aux, xi;
        noalias(aux) = prod(Cinv, xi_trial);
        noalias(xi) = prod(XImat, aux);
        xi /= (1.0 + ccc);

        //            KRATOS_WATCH(compute_f_trial(xi, malpha_old));

        noalias(mbeta_n1) = mbeta_old;
        noalias(mbeta_n1) += ccc*xi;

        array_1d<double, 3 > stress;
        noalias(stress) = xi;
        noalias(stress) += mbeta_n1;
        if (CalculateStresses == true) noalias(StressVector) = s_trial;

        malpha_current = malpha_old + sqrt(0.6666666666666667) * dgamma * sqrt(fbar_2(dgamma, xi));


        //KRATOS_WATCH(StressVector);
        noalias(aux) = prod(P, xi);

        noalias(mCurrentPlasticStrain) = mOldPlasticStrain;
        noalias(mCurrentPlasticStrain) += dgamma*aux;



        if (CalculateTangent != 0)
        {
            noalias(AlgorithmicTangent) = XImat;

//                //compute tangent
//                array_1d<double, 3 > XPXi = prod(XImat, aux);
//
//                double K1_n1 = theta*mH; //msigma_y + theta * mH*alpha;
//                //                double K1_n1 = msigma_y + theta * mH*alpha;
//                double theta1 = 1.0 + 0.6666666666666667 * H1*dgamma;
//                double theta2 = 1.0 - 0.6666666666666667 * K1_n1*dgamma;
//                double beta_val = inner_prod(xi, aux);
//                beta_val *= 0.6666666666666667 * (theta1 / theta2) * (K1_n1 * theta1 + H1 * theta2);
//
//                double denom = inner_prod(aux, XPXi);
//                denom += beta_val;
//                denom = sqrt(denom);
//                XPXi /= denom;
//
//                noalias(AlgorithmicTangent) = XImat;
//                noalias(AlgorithmicTangent) -= outer_prod(XPXi, XPXi);

        }
        //KRATOS_WATCH(algorithmicTangent);

//            noalias(AlgorithmicTangent) = C;

    }

    KRATOS_CATCH("")
}
void StereoObject::SetUpForInputOfPoleOrMatrix()
示例#20
0
// Here's the main interpolate function, using
// Least Squares AutoRegression (LSAR):
void InterpolateAudio(float *buffer, int len,
                      int firstBad, int numBad)
{
   int N = len;
   int i, row, col;

   wxASSERT(len > 0 &&
            firstBad >= 0 &&
            numBad < len &&
            firstBad+numBad <= len);

   if(numBad >= len)
      return;  //should never have been called!

   if (firstBad == 0) {
      // The algorithm below has a weird asymmetry in that it
      // performs poorly when interpolating to the left.  If
      // we're asked to interpolate the left side of a buffer,
      // we just reverse the problem and try it that way.
      float *buffer2 = new float[len];
      for(i=0; i<len; i++)
         buffer2[len-1-i] = buffer[i];
      InterpolateAudio(buffer2, len, len-numBad, numBad);
      for(i=0; i<len; i++)
         buffer[len-1-i] = buffer2[i];
      return;
   }

   Vector s(len, buffer);

   // Choose P, the order of the autoregression equation
   int P = imin(numBad * 3, 50);
   P = imin(P, imax(firstBad - 1, len - (firstBad + numBad) - 1));

   if (P < 3) {
      LinearInterpolateAudio(buffer, len, firstBad, numBad);
      return;
   }

   // Add a tiny amount of random noise to the input signal -
   // this sounds like a bad idea, but the amount we're adding
   // is only about 1 bit in 16-bit audio, and it's an extremely
   // effective way to avoid nearly-singular matrices.  If users
   // run it more than once they get slightly different results;
   // this is sometimes even advantageous.
   for(i=0; i<N; i++)
      s[i] += (rand()-(RAND_MAX/2))/(RAND_MAX*10000.0);

   // Solve for the best autoregression coefficients
   // using a least-squares fit to all of the non-bad
   // data we have in the buffer
   Matrix X(P, P);
   Vector b(P);

   for(i=0; i<len-P; i++)
      if (i+P < firstBad || i >= (firstBad + numBad))
         for(row=0; row<P; row++) {
            for(col=0; col<P; col++)
               X[row][col] += (s[i+row] * s[i+col]);
            b[row] += s[i+P] * s[i+row];
         }

   Matrix Xinv(P, P);
   if (!InvertMatrix(X, Xinv)) {
      // The matrix is singular!  Fall back on linear...
      // In practice I have never seen this happen if
      // we add the tiny bit of random noise.
      LinearInterpolateAudio(buffer, len, firstBad, numBad);
      return;
   }

   // This vector now contains the autoregression coefficients
   Vector a = Xinv * b;

   // Create a matrix (a "Toeplitz" matrix, as it turns out)
   // which encodes the autoregressive relationship between
   // elements of the sequence.
   Matrix A(N-P, N);
   for(row=0; row<N-P; row++) {
      for(col=0; col<P; col++)
         A[row][row+col] = -a[col];
      A[row][row+P] = 1;
   }

   // Split both the Toeplitz matrix and the signal into
   // two pieces.  Note that this code could be made to
   // work even in the case where the "bad" samples are
   // not contiguous, but currently it assumes they are.
   //   "u" is for unknown (bad)
   //   "k" is for known (good)
   Matrix Au = MatrixSubset(A, 0, N-P, firstBad, numBad);
   Matrix A_left = MatrixSubset(A, 0, N-P, 0, firstBad);
   Matrix A_right = MatrixSubset(A, 0, N-P,
                                 firstBad+numBad, N-(firstBad+numBad));
   Matrix Ak = MatrixConcatenateCols(A_left, A_right);

   Vector s_left = VectorSubset(s, 0, firstBad);
   Vector s_right = VectorSubset(s, firstBad+numBad,
                                 N-(firstBad+numBad));
   Vector sk = VectorConcatenate(s_left, s_right);

   // Do some linear algebra to find the best possible
   // values that fill in the "bad" area
   Matrix AuT = TransposeMatrix(Au);
   Matrix X1 = MatrixMultiply(AuT, Au);
   Matrix X2(X1.Rows(), X1.Cols());
   if (!InvertMatrix(X1, X2)) {
      // The matrix is singular!  Fall back on linear...
      LinearInterpolateAudio(buffer, len, firstBad, numBad);
      return;
   }
   Matrix X2b = X2 * -1.0;
   Matrix X3 = MatrixMultiply(X2b, AuT);
   Matrix X4 = MatrixMultiply(X3, Ak);
   // This vector contains our best guess as to the
   // unknown values
   Vector su = X4 * sk;

   // Put the results into the return buffer
   for(i=0; i<numBad; i++)
      buffer[firstBad+i] = (float)su[i];
}
示例#21
0
void magnet::math::Spline::generate()
{
    if (size() < 2)
        throw std::runtime_error("Spline requires at least 2 points");

    // If any spline points are at the same x location, we have to
    // just slightly seperate them
    {
        bool testPassed(false);
        while (!testPassed) {
            testPassed = true;
            std::sort(base::begin(), base::end());

            for (auto iPtr = base::begin(); iPtr != base::end() - 1; ++iPtr)
                if (iPtr->first == (iPtr + 1)->first) {
                    if ((iPtr + 1)->first != 0)
                        (iPtr + 1)->first += (iPtr + 1)->first
                                             * std::numeric_limits
                                             <double>::epsilon() * 10;
                    else
                        (iPtr + 1)->first = std::numeric_limits
                                            <double>::epsilon() * 10;
                    testPassed = false;
                    break;
                }
        }
    }

    const size_t e = size() - 1;

    switch (_type) {
    case LINEAR: {
        _data.resize(e);
        for (size_t i(0); i < e; ++i) {
            _data[i].x = x(i);
            _data[i].a = 0;
            _data[i].b = 0;
            _data[i].c = (y(i + 1) - y(i)) / (x(i + 1) - x(i));
            _data[i].d = y(i);
        }
        break;
    }
    case CUBIC: {
        ublas::matrix<double> A(size(), size());
        for (size_t yv(0); yv <= e; ++yv)
            for (size_t xv(0); xv <= e; ++xv)
                A(xv, yv) = 0;

        for (size_t i(1); i < e; ++i) {
            A(i - 1, i) = h(i - 1);
            A(i, i) = 2 * (h(i - 1) + h(i));
            A(i + 1, i) = h(i);
        }

        ublas::vector<double> C(size());
        for (size_t xv(0); xv <= e; ++xv)
            C(xv) = 0;

        for (size_t i(1); i < e; ++i)
            C(i) = 6
                   * ((y(i + 1) - y(i)) / h(i) - (y(i) - y(i - 1)) / h(i - 1));

        // Boundary conditions
        switch (_BCLow) {
        case FIXED_1ST_DERIV_BC:
            C(0) = 6 * ((y(1) - y(0)) / h(0) - _BCLowVal);
            A(0, 0) = 2 * h(0);
            A(1, 0) = h(0);
            break;
        case FIXED_2ND_DERIV_BC:
            C(0) = _BCLowVal;
            A(0, 0) = 1;
            break;
        case PARABOLIC_RUNOUT_BC:
            C(0) = 0;
            A(0, 0) = 1;
            A(1, 0) = -1;
            break;
        }

        switch (_BCHigh) {
        case FIXED_1ST_DERIV_BC:
            C(e) = 6 * (_BCHighVal - (y(e) - y(e - 1)) / h(e - 1));
            A(e, e) = 2 * h(e - 1);
            A(e - 1, e) = h(e - 1);
            break;
        case FIXED_2ND_DERIV_BC:
            C(e) = _BCHighVal;
            A(e, e) = 1;
            break;
        case PARABOLIC_RUNOUT_BC:
            C(e) = 0;
            A(e, e) = 1;
            A(e - 1, e) = -1;
            break;
        }

        ublas::matrix<double> AInv(size(), size());
        InvertMatrix(A, AInv);

        _ddy = ublas::prod(C, AInv);

        _data.resize(size() - 1);
        for (size_t i(0); i < e; ++i) {
            _data[i].x = x(i);
            _data[i].a = (_ddy(i + 1) - _ddy(i)) / (6 * h(i));
            _data[i].b = _ddy(i) / 2;
            _data[i].c = (y(i + 1) - y(i)) / h(i) - _ddy(i + 1) * h(i) / 6
                         - _ddy(i) * h(i) / 3;
            _data[i].d = y(i);
        }
    }
    }
    _valid = true;
}
void ScatteredPointInterpolator::Init()
{
   // n: dimension of output vector // 1; the height value
   const size_t n = 1;

   // k: number of points
   const size_t k = m_vecPoints.size();

#ifdef USE_EIGEN
   // Eigen::Matrix() is row first, column second
   // calc G
   Eigen::MatrixXd G(k, k);

   for (size_t x=0; x<k; x++)
      for (size_t y=0; y<k; y++)
         G(x, y) = Gaussian(DistanceXY(m_vecPoints[x], m_vecPoints[y]));

   // calc Y
   Eigen::MatrixXd Y(n, k);
   for (size_t i=0; i<k; i++)
      Y(0, i) = m_vecPoints[i].Z();

   // calc w: solve Y=GW
   // invert G
   G.colPivHouseholderQr().solve(Y);
   Eigen::MatrixXd Ginv(k, k);
   Ginv = G;

   // W = Y * Ginv
   Eigen::MatrixXd W(k, n);
   W = Y * Ginv;

   m_vecWeights.resize(W.cols());

   for (size_t i=0, iMax=m_vecWeights.size(); i<iMax; i++)
      m_vecWeights[i] = W(0, i);

#else
   // calc G
   boost::numeric::ublas::matrix<double> G(k, k);

   for (size_t x=0; x<k; x++)
      for (size_t y=0; y<k; y++)
         G(x, y) = Gaussian(DistanceXY(m_vecPoints[x], m_vecPoints[y]));

   // calc Y
   boost::numeric::ublas::matrix<double> Y(n, k);
   for (size_t i=0; i<k; i++)
      Y(0, i) = m_vecPoints[i].Z();

   // calc w: solve Y=GW
   // invert G
   boost::numeric::ublas::matrix<double> Ginv(k, k);
   try
   {
      bool bRet = InvertMatrix(G, Ginv);
      if (!bRet)
         throw Exception(_T("invalid input data"), __FILE__, __LINE__);
   }
   catch(std::exception&)
   {
      throw Exception(_T("invalid input data"), __FILE__, __LINE__);
   }

   // W = Y * Ginv
   boost::numeric::ublas::matrix<double> W = boost::numeric::ublas::prod(Y, Ginv);

   // TODO replace with ublas function
   m_vecWeights.resize(W.size2());

   for (size_t i=0, iMax=m_vecWeights.size(); i<iMax; i++)
      m_vecWeights[i] = W(0, i);
#endif
}
/*
 * Train System
 * Runs through all the recorded sample data
 * and sets up the weights that train the system
 */
void Gesture::TrainSystem()
{

    //
    // Calculate matrices and averages for each class

    int numSymbols = GetNumSymbols();

    for( int i = 0; i < numSymbols; ++i )
    {
        memset( &m_symbols[i].m_averages, 0, sizeof(FeatureSet) );
    }

    struct coVarianceElement
    {
        double z[GESTURE_MAX_FEATURES+1][GESTURE_MAX_FEATURES+1];
    };

    coVarianceElement *coVarianceMatrix = new coVarianceElement[numSymbols];
    memset( coVarianceMatrix, 0, numSymbols * sizeof(coVarianceElement) );

    double commonCovarianceMatrix[GESTURE_MAX_FEATURES+1][GESTURE_MAX_FEATURES+1];
    memset( commonCovarianceMatrix, 0, (GESTURE_MAX_FEATURES+1) * (GESTURE_MAX_FEATURES+1) * sizeof(double) );

    memset( m_invertedCovarianceMatrix, 0, (GESTURE_MAX_FEATURES+1) * (GESTURE_MAX_FEATURES+1) * sizeof(double) );


    for( int c = 0; c < numSymbols; ++c )
    {

        Classification *theClass = &m_symbols[c].m_classes;

        //
        // Recalculate our averages

        for( int i = 0; i <= GESTURE_MAX_FEATURES; ++i )
        {
            double result = 0.0f;
            for( int e = 0; e < theClass->m_nextTrainer; ++e )
            {
                result += theClass->m_trainers[e].f[i];
            }
            result /= theClass->m_nextTrainer;

            m_symbols[c].m_averages.f[i] = result;
        }


        //
        // Calculate the sample estimate of the Covariance matrix for each Class

        for( int i = 0; i <= GESTURE_MAX_FEATURES; ++i )
        {
            for( int j = 0; j <= GESTURE_MAX_FEATURES; ++j )
            {
                double result = 0.0f;

                for( int e = 0; e < theClass->m_nextTrainer; ++e )
                {
                    result += ( (theClass->m_trainers[e].f[i] - m_symbols[c].m_averages.f[i]) *
                                (theClass->m_trainers[e].f[j] - m_symbols[c].m_averages.f[j]) );
                }

                coVarianceMatrix[c].z[i][j] = result;
            }
        }
    }

    //
    // Calculate the common coVariance Matrix

    double trainerSum = -numSymbols;
    for( int c = 0; c < numSymbols; ++c )
    {
        trainerSum += m_symbols[c].m_classes.m_nextTrainer;
    }

    for( int i = 0; i <= GESTURE_MAX_FEATURES; ++i )
    {
        for( int j = 0; j <= GESTURE_MAX_FEATURES; ++j )
        {
            double coVarianceSum = 0.0f;
            for( int c = 0; c < numSymbols; ++c )
            {
                coVarianceSum += coVarianceMatrix[c].z[i][j] /
                                 (double) m_symbols[c].m_classes.m_nextTrainer-1.0f;
            }
            commonCovarianceMatrix[i][j] = coVarianceSum / trainerSum;
        }
    }

    //
    // Invert the common coVariance Matrix

    InvertMatrix( (double *) commonCovarianceMatrix,
                  (double *) m_invertedCovarianceMatrix,
                  GESTURE_MAX_FEATURES+1, GESTURE_MAX_FEATURES+1 );

    //
    // Use that inverted matrix to calculate our weights

    for( int c = 0; c < numSymbols; ++c )
    {
        for( int j = 1; j <= GESTURE_MAX_FEATURES; ++j )
        {
            double total = 0.0f;
            for( int i = 1; i <= GESTURE_MAX_FEATURES; ++i )
            {
                total += ( m_invertedCovarianceMatrix[i][j] *
                           m_symbols[c].m_averages.f[i] );
            }
            m_symbols[c].m_classes.m_weights.f[j] = total;
        }
    }


    for( int c = 0; c < numSymbols; ++c )
    {
        // Calculate weight 0
        double total = 0.0f;
        for( int i = 1; i <= GESTURE_MAX_FEATURES; ++i )
        {
            total += ( m_symbols[c].m_classes.m_weights.f[i] *
                       m_symbols[c].m_averages.f[i] );
        }
        total *= -0.5f;
        m_symbols[c].m_classes.m_weights.f[0] = total;
    }

	delete coVarianceMatrix;
}