Example #1
0
///
/// \brief PLSData::Discriminate
/// \param data
/// \param labels
/// \return
/// Perform PLS-DA
bool PLSData::Discriminate(const mat &data, const mat &labels)
{
    //data (usually, but not necessarly spectra) is X
    //labels are Y;
    if (labels.n_rows != data.n_cols) return false;
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(data.t(), labels, labels.n_cols,
                                                                  X_loadings, Y_loadings,
                                                                  X_scores, Y_scores,
                                                                  coefficients, percent_variance,
                                                                  fitted);
    mat residuals = fitted - labels;

    if (success){
        AddMetadata("Type", "Calibration");
        AddMetadata("Components calculated", QString::number(labels.n_cols));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        AddMatrix("Residuals", residuals);
    }

    return success;
}
Example #2
0
/*
	以node_id为虚拟进程号,按照node_id号
	进行以二叉树算法进行归约
*/
int MPIN_Reduce(float *sendbuf,float *recvbuf,int count,int cur_node_id,int cur_rank,int tag)
{

    MPI_Status status;
    struct child *c;
    int rank;
    int size = MPIN_get_node_size();
    if(cur_node_id >= size) {
        return -1;
    }
    c = dt[cur_node_id].next->next;
    //从子进程接收数据
    while(c) {
        if(c->id != -1) { //如果当前节点有孩子
            rank = MPIN_get_master_rank(c->id);
            //printf("file:%s,func:%s,line:%d id %d cur_rank %d rank:%d\n",__FILE__,__func__,__LINE__,c->id,cur_rank,rank);
            MPI_Recv(recvbuf,count,MPI_FLOAT,rank,tag,MPI_COMM_WORLD,&status);
            AddMatrix(recvbuf,sendbuf,count);
        } else {
            break;
        }
        c = c->next;
    }
    int parent_rank = dt[cur_node_id].parent;
    //printf("file:%s,func:%s,line:%d cur_rank %d parent_rank:%d\n",__FILE__,__func__,__LINE__,cur_rank,parent_rank);
    if(parent_rank != -1) //不是根进程
    {
        MPI_Send(sendbuf,count,MPI_FLOAT,parent_rank,tag,MPI_COMM_WORLD);
    }
    return 0;
}
void AnimaMappedValues::SetMatrix(const AnimaString& propertyName, const AnimaMatrix& value)
{
	AnimaString pName = _uniqueName + propertyName;
	if (_matricesMap.find(pName) == _matricesMap.end())
		AddMatrix(propertyName, value);
	else
		_matricesMap[pName] = value;
}
Example #4
0
bool PLSData::Calibrate(const mat &spectra, const mat &controls)
{
    //spectra is y
    //controls are X
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(controls, spectra, controls.n_cols,
                                                                  X_loadings, Y_loadings,
                                                                  X_scores, Y_scores,
                                                                  coefficients, percent_variance,
                                                                  fitted);

    inplace_trans(coefficients);
    //mat residuals = fitted - spectra;
    if (success){
        AddMetadata("Type", "Calibration");
        AddMetadata("Components calculated", QString::number(controls.n_cols));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        //AddMatrix("Residuals", residuals);
    }

    return success;
}
Example #5
0
///
/// \brief PLSData::Apply
/// \param spectra Input matrix
/// \param wavelength Spectral abscissa
/// \param components Number of components to calculate
/// \return
/// Performs PLS analysis on the spectra matrix.
bool PLSData::Classify(const mat &spectra, const vec &wavelength, int components)
{
    mat Y = repmat(wavelength, 1, components);
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(spectra, Y, components,
                                        X_loadings, Y_loadings,
                                        X_scores, Y_scores,
                                        coefficients, percent_variance,
                                        fitted);

    //mat residuals = fitted - spectra;
    if (success){
        AddMetadata("Type", "Classification (PCA)");
        AddMetadata("Components calculated", QString::number(components));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        //AddMatrix("Residuals", residuals);
    }

    return success;

}
Example #6
0
int main(void)
{
	TSMatrix *a1, *a2, *result1, *result2;
	a1 = InitTriple(3, 3, 3);
	a2 = InitTriple(3, 3, 3);
	
	InputValue(a1);
	InputValue(a2);
	
	ShowMatrix(a1);
	ShowMatrix(a2);

	if ((result1 = AddMatrix(a1, a2)) != NULL)
		ShowMatrix(result1);
	if ((result2 = SubMatrix(a1, a2)) != NULL)
		ShowMatrix(result2);
	
	return 0;
}
///
/// \brief AnalysisResults::Concatenate
/// \param other
/// Merge the matrices of other into the same-named matrices of this one
/// Will perform a check of mergability for every matrix before performing merge
/// Non mergable matrices are removed from this result.
bool AnalysisResults::Concatenate(QSharedPointer<AnalysisResults> other)
{
    QSet<QString> intersection = KeyList().toSet().intersect(other->KeyList().toSet());
    //Check mergability (this should not be a problem if both come from same dataset)
    //User may be allowed to merge from two different datasets, if for some reason they wanted to do that
    QSet<QString> mergeable_matrices = intersection;
    for (auto key: intersection)
        if (GetMatrix(key).n_rows != other->GetMatrix(key).n_rows) {
            mergeable_matrices.remove(key);
            RemoveMatrix(key);
        }
    if (mergeable_matrices.isEmpty()) return false;
    for (auto key: mergeable_matrices) {
        mat matrix = GetMatrix(key);
        mat other_matrix = other->GetMatrix(key);
        matrix = join_horiz(matrix, other_matrix);
        AddMatrix(key, matrix);
    }
    return true;
}
Example #8
0
	/**
	 * @brief        Add a matrix to workspace
	 *
	 * @param  name  Name
	 * @param  m     The added matrix
     *
	 * @return       Success
	 */
	template<class T> inline Matrix<T>&
	AddMatrix        (const std::string& name, Matrix<T>& m) {

		return AddMatrix(name, &m);

	}
Example #9
0
/****************************************************************
 *函 数 名: DoEKF()
 *参    数:
 *               pModel model: 需要进行EKF滤波的模型
 *         pModelParam mParam: 状态量、观测量及估计值保存地址
 *         pEKFParam ekfParam: KF滤波所需要的参数
 *
 *功    能:对model所示的模型进行EKF滤波
 *          考虑了Pkk-1和Pk为对称矩阵,当求解这两项时的算法和
 *          普通矩阵的乘法不一样。
 *
 *返 回 值:
 *          成功: OK
 *          输入输出指针为空:ERR_MEM_NONE
 *
 *作    者:申文斌
 *完成日期:2011-3-22
 ****************************************************************/
int DoEKF(pModel model, pModelParam mParam, pEKFParam ekfParam)
{
    const double T = 0.033;      // 采样周期
    int i, j, k;                // 循环变量

    // 以下为滤波过程中要用的临时变量
    // 这里统一用一维数组表示矩阵,为了复用地址,这里按最大需求开辟
    // 由于在没有操作系统的ARM7上不可以动态开辟空间,所以这里只能用
    // 这种比较笨的方法了。
    double fx[MAX_STATE];    // 保存状态方程计算值
    double Xhatkk_1[MAX_STATE];
    double Hk[MAX_OBS * MAX_STATE];            // 保存观测矩阵Hk
    double Phikk_1[MAX_STATE * MAX_STATE];
    double tmp[MAX_STATE * MAX_STATE];
    double Pkk_1[MAX_STATE * MAX_STATE];
    double Kk[MAX_STATE * MAX_OBS];
    double PxHk[MAX_STATE * MAX_OBS];
    double TranHk[MAX_STATE * MAX_OBS];
    double KkTmp[MAX_OBS * MAX_OBS];
    double h[MAX_OBS];
    double KkxHk[MAX_STATE * MAX_STATE];

    //double TranPhikk_1[MAX_STATE * MAX_STATE];
      
#ifdef __DEBUG__
    if ( (model == NULL) || (mParam == NULL) || (ekfParam == NULL) )
    {
        return ERR_MEM_NONE;        // 输入输出指针为空
    }
#endif

    if (*ekfParam->isFirst == 1)        // 第一次执行滤波
    {
        
        *ekfParam->isFirst = 0;
        // 将传入的状态直接当作估计值
        for (i=0; i<mParam->lenState; i++)
        {
            ekfParam->preEst[i] = mParam->state[i];
            mParam->est[i] = mParam->state[i];
        }
        return OK;
    }//if

// EKF滤波过程
   
    
    // 进一步滤波Xhatkk_1 = Xhatk_1 + T * fxu(Xhatk_1) 式(3-60)
    model->fxu(ekfParam->preEst, mParam->othParam, fx);
    for (i=0; i<mParam->lenState; i++)
    {
        fx[i] *= T;
    }
    AddMatrix(ekfParam->preEst, fx, Xhatkk_1,  mParam->lenState, 1);

    if ( model->fxu == QGFxu)      // 如果状态方程为四元数的则需要归一Q
    {
        Norm(Xhatkk_1);       // 归一Q
    }

    // 求得观测矩阵对状态的求导,Hk被用的大小为mParam->lenObs * mParam->lenState
    model->hxDot(Xhatkk_1, Hk); 

    // 状态转移矩阵,离散化导致矩状态转移阵前面需要乘以T
    // Phikk_1被用的大小为mParam->lenState * mParam->lenState
    model->fxDot(ekfParam->preEst, mParam->othParam, Phikk_1);
    for (i=0; i<mParam->lenState; i++)
    {
        for (j=0; j<mParam->lenState; j++)
        {
            Phikk_1[i * mParam->lenState + j] *= T;
        }
    }

    // 预测误差方差矩阵Pkk_1
    // 计算Pkk_1,这里考虑Pkk_1为对称矩阵
    // 中间变量tmp用到的大小为mParam->lenState * mParam->lenState
    MulMatrix(Phikk_1, ekfParam->Pk, tmp,
              mParam->lenState, mParam->lenState, mParam->lenState);
    // 这里两矩阵相乘为对称矩阵,所以可以简化
    // 初始化Pkk_1,被用到的大小为mParam->lenState * mParam->lenState
    for (i=0; i<mParam->lenState; i++)
    {
        for (j=0; j<=i; j++)
        {
            Pkk_1[i * mParam->lenState + j] = 0.0;    // 将输出矩阵下三角初始化为0
        }
    }
    for (i=0; i<mParam->lenState; i++)        // 矩阵相乘
    {
        for (j=0; j<=i; j++)
        {
            for (k=0; k<mParam->lenState; k++)
            {
                // 这里乘以Phikk_1的转置
                Pkk_1[i * mParam->lenState + j] += 
                    tmp[i * mParam->lenState + k] *  Phikk_1[j * mParam->lenState + k];
            }
            Pkk_1[j * mParam->lenState + i] = Pkk_1[i * mParam->lenState + j];        // 对称的元素
        }
    }

    for (i=0; i<mParam->lenState; i++)
    {
        // 加上Q阵,这里只要加上对角线上的元素即可
        Pkk_1[i * mParam->lenState + i] += ekfParam->Q[i];
    }

    // 求取滤波增益矩阵 式(3-62)
    TranMatrix(Hk, TranHk, mParam->lenObs, mParam->lenState);
    // PxHk被用到的大小为 mParam->lenState * mParam->lenObs
    MulMatrix(Pkk_1, TranHk, PxHk, mParam->lenState, mParam->lenState, mParam->lenObs);
    // KkTmp被用到的大小为mParam->lenObs * mParam->lenObs
    MulMatrix(Hk, PxHk, KkTmp, mParam->lenObs, mParam->lenState, mParam->lenObs);

    // Hk*Pkk_1*Hk' + R,考虑到R为对角矩阵,所以不用按一般相乘计算
    // 只需要加对角线元素即可
    for (i=0; i<mParam->lenObs; i++)
    {
        KkTmp[i * mParam->lenObs + i] += ekfParam->R[i];
    }

    // Hk*Pkk_1*Hk' + R 对称矩阵
    if ( InvSymMtrx(KkTmp, KkTmp, mParam->lenObs) != OK )
    {
        // 如果Hk*Pkk_1*Hk' + R奇异无法求逆
        // 则将传入的状态直接当作估计值
        for (i=0; i<mParam->lenState; i++)
        {
            ekfParam->preEst[i] = mParam->state[i];
            mParam->est[i] = mParam->state[i];
        }
        return OK;
    }

    // Kk被用到的大小为mParam->lenState * mParam->lenObs
    MulMatrix(PxHk, KkTmp, Kk, mParam->lenState, mParam->lenObs, mParam->lenObs);

    // 状态估计
    model->hx(Xhatkk_1, h);
    SubMatrix( mParam->obs, h, h, mParam->lenObs, 1);        // h = obs - hx(Xhatkk_1)
     //----------------------- syc 2011.11.14------------------------------//
   if ( model->fxu == QGFxu)     // 如果状态方程为四元数的则需要调整角度
    {
      for(int i=0;i<3;i++)
      {
        if(h[i]>PI)
          h[i]-=2.0*PI;
        else
          if(h[i]<-PI)
            h[i]+=2.0*PI; 
      }
    }
    //------------------------------------------------------------------//
    MulMatrix(Kk, h, ekfParam->preEst, mParam->lenState, mParam->lenObs, 1);
    // ekfParam->preEst是本次求得的滤波值
    AddMatrix(ekfParam->preEst, Xhatkk_1, ekfParam->preEst, mParam->lenState, 1);

    if ( model->fxu == QGFxu)     // 如果状态方程为四元数的则需要归一Q
    {
        Norm(ekfParam->preEst);   // 归一Q
    }

    // 滤波方差矩阵
    // Kk被用到的大小为mParam->lenState * mParam->lenState
    MulMatrix(Kk, Hk, KkxHk, mParam->lenState, mParam->lenObs, mParam->lenState);
    for (i=0; i<mParam->lenState; i++)
    {
        for (j=0; j<mParam->lenState; j++)
        {
            KkxHk[i * mParam->lenState + j] = (i == j) - KkxHk[i * mParam->lenState + j];
        }
    }
   // MulMatrix(KkxHk, Pkk_1, ekfParam->Pk, mParam->lenState, mParam->lenState, mParam->lenState);
    for (i=0; i<mParam->lenState; i++)
    {
        for (j=0; j<=i; j++)
        {
            ekfParam->Pk[i*mParam->lenState + j] = 0.0;    // Pk下三角矩阵为零
        }
    }

    for (i=0; i<mParam->lenState; i++)        // 矩阵相乘
    {
        for (j=0; j<=i; j++)
        {
            for (k=0; k<mParam->lenState; k++)
            {
                ekfParam->Pk[i*mParam->lenState + j] +=
                    KkxHk[i * mParam->lenState + k] *  Pkk_1[k * mParam->lenState + j];
            }
            // 对称的元素
            ekfParam->Pk[j*mParam->lenState + i] = ekfParam->Pk[i*mParam->lenState + j];
        }
    }

    // 将结果复制到目的地址
    for (i=0; i<mParam->lenState; i++)
    {
        mParam->est[i] = ekfParam->preEst[i];
    }

    return OK;
}
Example #10
0
int main(int argc,char **argv){
	int m;
	int rank,size; 
	MPI_Status status;
	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	//将样本集均等划分,主进程不处理样本,子进程处理样本
	m = DATASET/(size-1);
	printf(" m = %d\n",m);
	if(rank == 0){
		int i;
		float *Ht,*Hh,*tempht,*temphh,*result;
		result = (float *)calloc(HIDDEN_NEURONS * OUTPUT_NEURONS,sizeof(float));	/* HIDDEN_NEURONS * OUTPUT_NEURONS */
		Ht = (float *)calloc(HIDDEN_NEURONS * OUTPUT_NEURONS,sizeof(float)); 		/* HIDDEN_NEURONS * OUTPUT_NEURONS */
		tempht = (float *)calloc(HIDDEN_NEURONS * OUTPUT_NEURONS,sizeof(float)); 	/* HIDDEN_NEURONS * OUTPUT_NEURONS */
		Hh = (float *)calloc(HIDDEN_NEURONS * HIDDEN_NEURONS,sizeof(float)); 		/* HIDDEN_NEURONS * HIDDEN_NEURONS */
		temphh = (float *)calloc(HIDDEN_NEURONS * HIDDEN_NEURONS,sizeof(float)); 	/* HIDDEN_NEURONS * HIDDEN_NEURONS */
		//初始化
		/*权重在计算过程中需要进行转置,因为参数全是随机,所以为了方便计算,
		开始定义就进行转置,原行列为HIDDEN_NEURONS*INPUT_NEURONS*/
		float *weight = (float *)calloc(INPUT_NEURONS*HIDDEN_NEURONS,sizeof(float)); /*INPUT_NEURONS * HIDDEN_NEURONS */
		float *bias = (float *)calloc(HIDDEN_NEURONS,sizeof(float)); 
		RandomWeight_s(weight,INPUT_NEURONS,HIDDEN_NEURONS);
		RandomBiase(bias,HIDDEN_NEURONS);
		//随机权重和偏置,并将权重和偏置广播给所有子进程
		printf("file:%s,func:%s,line:%d rank:%d\n",__FILE__,__func__,__LINE__,rank);
		MPI_Bcast(weight,INPUT_NEURONS * HIDDEN_NEURONS,MPI_FLOAT,0,MPI_COMM_WORLD);
		MPI_Bcast(bias,HIDDEN_NEURONS,MPI_FLOAT,0,MPI_COMM_WORLD);
		//接收子进程的数据,1、接收H'H,通过累加,2、H'T通过累加
		for(i = 1;i < size;i++){
			MPI_Recv(Hh,HIDDEN_NEURONS * HIDDEN_NEURONS,MPI_FLOAT,i,0,MPI_COMM_WORLD,&status);
			MPI_Recv(Ht,HIDDEN_NEURONS * OUTPUT_NEURONS,MPI_FLOAT,i,1,MPI_COMM_WORLD,&status);
			AddMatrix(Ht,tempht,HIDDEN_NEURONS * OUTPUT_NEURONS);
			AddMatrix(Hh,temphh,HIDDEN_NEURONS * HIDDEN_NEURONS);
		}
		//1、H'H累加的结果求解其逆
		//岭回归
		for(i = 0;i < HIDDEN_NEURONS*HIDDEN_NEURONS;i++){
				if(i % HIDDEN_NEURONS == i / HIDDEN_NEURONS) 
					Hh[i] += 1; 
		}
		InverseMatirx_cblas_s(Hh,HIDDEN_NEURONS);
		//2、将上面两个结果相乘得到最终结果
		MultiplyMatrix_cblas_s(Hh,HIDDEN_NEURONS,HIDDEN_NEURONS,Ht,HIDDEN_NEURONS,OUTPUT_NEURONS,result);
		//回归准确率测试
		SaveMatrix_s(result,"./result/result",HIDDEN_NEURONS,OUTPUT_NEURONS);	
	}else{
		int i,j = 0,k = 0;
		char dir[20];
		float *train_set,*T,*input,*weight,*bias,*tempI,*Ht,*Hh,*tranpH;
		train_set = (float *)calloc(m * NUMROWS,sizeof(float)); 				/* m * NUMROWS */
		T = (float *)calloc(m * OUTPUT_NEURONS,sizeof(float)); 					/* m * OUTPUT_NEURONS */
		input = (float *)calloc(m * INPUT_NEURONS,sizeof(float)); 				/* m * INPUT_NEURONS */
		weight = (float *)calloc(INPUT_NEURONS * HIDDEN_NEURONS,sizeof(float)); /* INPUT_NEURONS * HIDDEN_NEURONS */
		bias = (float *)calloc(HIDDEN_NEURONS,sizeof(float)); 					/* HIDDEN_NEURONS */
		tempI = (float *)calloc(m * HIDDEN_NEURONS,sizeof(float)); 				/* m * HIDDEN_NEURONS */
		Ht = (float *)calloc(HIDDEN_NEURONS * OUTPUT_NEURONS,sizeof(float)); 	/* HIDDEN_NEURONS * OUTPUT_NEURONS */
		Hh = (float *)calloc(HIDDEN_NEURONS * HIDDEN_NEURONS,sizeof(float)); 	/* HIDDEN_NEURONS * HIDDEN_NEURONS */
		tranpH = (float *)calloc(HIDDEN_NEURONS * m,sizeof(float)); 			/* m * HIDDEN_NEURONS */
		MPI_Bcast(weight,(INPUT_NEURONS)*(HIDDEN_NEURONS),MPI_FLOAT,0,MPI_COMM_WORLD);
		MPI_Bcast(bias,HIDDEN_NEURONS,MPI_FLOAT,0,MPI_COMM_WORLD);
		sprintf(dir,"./sample/p%d",rank);
		//从本地读取相应的样本集
		if(LoadMatrix_s(train_set,dir,m,NUMROWS) == 0){
			printf("rank %d:load input file error!!!\n",rank);
			MPI_Abort(MPI_COMM_WORLD,-1);
		}
		
		/*将数据集划分成输入和输出*/
		for(i = 0;i<m*NUMROWS;i++){
			if(i % NUMROWS == 0){
				T[k++] = train_set[i];
			}else{
				input[j++] = train_set[i];
			}
		}
		//input * weight + B;
		MultiplyMatrix_cblas_s(input,m,INPUT_NEURONS,weight,INPUT_NEURONS,HIDDEN_NEURONS,tempI);
		AddMatrix_bais_s(tempI,bias,m,HIDDEN_NEURONS);
		//sigmoid
		SigmoidHandle_s(tempI,m,HIDDEN_NEURONS);
		TranspositionMatrix_s(tempI,tranpH,m,HIDDEN_NEURONS);
		//H'H
		MultiplyMatrix_cblas_s(tranpH,HIDDEN_NEURONS,m,T,m,OUTPUT_NEURONS,Ht);
		//HT
		MultiplyMatrix_cblas_s(tranpH,HIDDEN_NEURONS,m,tempI,m,HIDDEN_NEURONS,Hh);
		//发送ht,和Hh给主进程;
		MPI_Send(Hh,HIDDEN_NEURONS * HIDDEN_NEURONS,MPI_FLOAT,0,0,MPI_COMM_WORLD);
		MPI_Send(Ht,HIDDEN_NEURONS * OUTPUT_NEURONS,MPI_FLOAT,0,1,MPI_COMM_WORLD);
	}
	MPI_Finalize();
	return 0;
}
void AnimaMappedValues::AddMatrix(const AnimaString& propertyName, AFloat value[16])
{
	AnimaMatrix mat(value);
	AddMatrix(propertyName, mat);
}