コード例 #1
0
MachineABC* LeastSquareTrainer::Train(DataSet* pData)
{
	LeastSquareMachine* pMachine=new LeastSquareMachine;

	Matd* m_pmData=new Matd((double**)pData->m_pprData, pData->m_nCount, pData->m_nDim);
	Vecd* m_pvObj=new Vecd((double*)pData->m_prCls, pData->m_nCount);
	
	Mat mB(m_pmData->Cols(), m_pmData->Cols(), MAT_Tdouble);
	MatOp::TrAA(&mB, m_pmData);
	mB.Invert();
		
	Mat mTemp(m_pmData->Cols(), m_pmData->Rows(), MAT_Tdouble);
	Mat mTr(m_pmData->Cols(), m_pmData->Rows(), MAT_Tdouble);
	MatOp::Transpose (&mTr, m_pmData);
	MatOp::Mul(&mTemp, &mB, &mTr);
	
	pMachine->m_vCoeff.Create (m_pmData->Cols(), MAT_Tfloat);
	Vec vRet(m_pmData->Cols(), MAT_Tdouble);
	MatOp::Mul(&vRet, &mTemp, m_pvObj);
	for (int i = 0; i < vRet.Length(); i ++)
		pMachine->m_vCoeff.data.fl[i] = (float)vRet.data.db[i];
	delete m_pmData;
	delete m_pvObj;
	return pMachine;
}
コード例 #2
0
ファイル: test_chol.cpp プロジェクト: victorliu/RNP2
void test_chol(const char *uplo, size_t n){
	typedef typename RNP::Traits<T>::real_type real_type;
	real_type rsnrm(1./((n*n) * RNP::Traits<real_type>::eps()));
	T *Afac = new T[n*n];
	for(size_t j = 0; j < n; ++j){
		RNP::Random::GenerateVector(RNP::Random::Distribution::Uniform_11, n, &Afac[0+j*n]);
	}
	T *A = new T[n*n];
	RNP::BLAS::MultMM("C", "N", n, n, n, T(1), Afac, n, Afac, n, T(0), A, n);

	// Workspace
	T *B = new T[n*n];
	T *C = new T[n*n];
	
	if(0){
		std::cout << "Original A:" << std::endl;
		RNP::Matrix<T> mA(n, n, A, n);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}
	
	RNP::BLAS::Copy(n, n, A, n, Afac, n);
	RNP::LA::Cholesky::Factor(uplo, n, Afac, n);
	
	if(0){
		std::cout << "Factored A:" << std::endl;
		RNP::Matrix<T> mA(n, n, Afac, n);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}

	RNP::BLAS::Copy(n, n, A, n, B, n);
	RNP::BLAS::Set(n, n, T(0), T(0), C, n);
	RNP::LA::Triangular::Copy(uplo, n, n, Afac, n, C, n);
	
	if('U' == uplo[0]){
		RNP::BLAS::MultMM("C", "N", n, n, n, T(1), C, n, C, n, T(-1), B, n);
	}else{
		RNP::BLAS::MultMM("N", "C", n, n, n, T(1), C, n, C, n, T(-1), B, n);
	}
	
	if(0){
		std::cout << "F*F - A:" << std::endl;
		RNP::Matrix<T> mB(n, n, B, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// Check to see if the lower triangle is correct
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < n; ++i){
				sum += RNP::Traits<T>::abs(B[i+j*n]);
			}
		}
		std::cout << "diff norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	delete [] C;
	delete [] B;
	delete [] Afac;
	delete [] A;
}
コード例 #3
0
ファイル: PetscTSExecutioner.C プロジェクト: ChaliZhg/moose
 static PetscErrorCode _computeIJacobian(TS /* ts */,PetscReal time,Vec X,Vec Xdot,PetscReal shift,Mat *J,Mat *B,MatStructure *mstr,void *ctx) {
   PetscTimeStepper *ths = (PetscTimeStepper*)ctx;
   PetscVector<Number> mX(X), mXdot(Xdot);
   PetscMatrix<Number> mJ(*J), mB(*B);
   ths->computeTransientImplicitJacobian(time,mX,mXdot,shift,mB);
   mJ.close();
   *mstr = SAME_NONZERO_PATTERN;
   return 0;
 }
コード例 #4
0
int	LinearRegressorTrainer::Train (ClassifierABC* pMachine)
{
	LinearRegressor* pRegressor = (LinearRegressor*)pMachine;

	int i;

	Mat mB(m_pmData->Cols(), m_pmData->Cols(), MAT_Tdouble);
	MatOp::TrAA(&mB, m_pmData);
	LUDecomposition lu(&mB);
	
	Mat mI(m_pmData->Cols(), m_pmData->Cols(), MAT_Tdouble);
	mI.Zero();
	for (i = 0; i < mI.Rows(); i ++)
		mI.data.db[i][i] = 1.0;
	Mat* pmInverse = lu.Solve (&mI);
	if (pmInverse==0)
		return 0;
	
	Mat mTemp(m_pmData->Cols(), m_pmData->Rows(), MAT_Tdouble);
	Mat mTr(m_pmData->Cols(), m_pmData->Rows(), MAT_Tdouble);
	MatOp::Transpose (&mTr, m_pmData);
	MatOp::Mul(&mTemp, pmInverse, &mTr);

	pRegressor->m_vCoeff.Create (m_pmData->Cols(), MAT_Tfloat);
	Vec vRet(m_pmData->Cols(), MAT_Tdouble);
	MatOp::Mul(&vRet, &mTemp, m_pvObj);
	for (i = 0; i < vRet.Length(); i ++)
		pRegressor->m_vCoeff.data.fl[i] = (float)vRet.data.db[i];
	
	ReleaseMat(pmInverse);
	mTemp.Release();
	mTr.Release();
	mI.Release();
	mB.Release();

	return 1;
}
コード例 #5
0
ファイル: test_ql_fat.cpp プロジェクト: victorliu/RNP2
void test_ql(size_t m, size_t n){
	typedef typename RNP::Traits<T>::real_type real_type;
	real_type rsnrm(1./((m*n) * RNP::Traits<real_type>::eps()));
	T *A = new T[m*n];
	for(size_t j = 0; j < n; ++j){
		RNP::Random::GenerateVector(RNP::Random::Distribution::Uniform_11, m, &A[0+j*m]);
	}
	T *Afac = new T[m*n];

	// Workspace
	T *B = new T[m*n];
	
	if(0){
		std::cout << "Original A:" << std::endl;
		RNP::Matrix<T> mA(m, n, A, m);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}
	
	T *tau = new T[m];
	T *work = NULL;
	size_t lwork = 0;
	RNP::BLAS::Copy(m, n, A, m, Afac, m);
	RNP::LA::QL::Factor(m, n, Afac, m, tau, &lwork, work);
	//lwork = n;
	std::cout << "lwork = " << lwork << std::endl;
	work = new T[lwork];
	RNP::LA::QL::Factor(m, n, Afac, m, tau, &lwork, work);
	//int info; dgeql2_(m, n, Afac, m, tau, work, &info); std::cout << "info = " << info << std::endl;
	
	if(0){
		std::cout << "Factored A:" << std::endl;
		RNP::Matrix<T> mA(m, n, Afac, m);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}

	// Apply Q' to the left of original A (use B for workspace)
	RNP::BLAS::Copy(m, n, A, m, B, m);
	delete [] work; work = NULL; lwork = 0;
	RNP::LA::QL::MultQ("L", "C", m, n, m, &Afac[0+(n-m)*m], m, tau, B, m, &lwork, work);
	//lwork = n;
	work = new T[lwork];
	RNP::LA::QL::MultQ("L", "C", m, n, m, &Afac[0+(n-m)*m], m, tau, B, m, &lwork, work);
	//dorm2l_("L", "T", m, n, m, &Afac[0+(n-m)*m], m, tau, B, m, work, &info); std::cout << "info = " << info << std::endl;
	
	if(0){
		std::cout << "Q' * origA:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// Check to see if the lower triangle is correct
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			const size_t i0 = (j > n-m ? j-(n-m) : 0);
			for(size_t i = i0; i < m; ++i){
				sum += RNP::Traits<T>::abs(Afac[i+j*m] - B[i+j*m]);
			}
		}
		std::cout << "L norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Apply Q to the left of L
	RNP::BLAS::Set(m, n, T(0), T(0), B, m);
	RNP::BLAS::Copy(m, n-m, Afac, m, B, m);
	RNP::LA::Triangular::Copy("L", m, m, &Afac[0+(n-m)*m], m, &B[0+(n-m)*m], m);
	if(0){
		std::cout << "B = L:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::LA::QL::MultQ("L", "N", m, n, m, &Afac[0+(n-m)*m], m, tau, B, m, &lwork, work);
	if(0){
		std::cout << "B = Q*L:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover the original matrix
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - B[i+j*m]);
			}
		}
		std::cout << "(A - Q*L) norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Now treat B as a n-by-m matrix, and copy A' into it,
	// and apply Q from the right
	for(size_t j = 0; j < m; ++j){
		for(size_t i = 0; i < n; ++i){
			B[i+j*n] = RNP::Traits<T>::conj(A[j+i*m]);
		}
	}
	if(0){
		std::cout << "B = A':" << std::endl;
		RNP::Matrix<T> mB(n, m, B, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::LA::QL::MultQ("R", "N", n, m, m, &Afac[0+(n-m)*m], m, tau, B, n, &lwork, work);
	if(0){
		std::cout << "B = L':" << std::endl;
		RNP::Matrix<T> mB(n, m, B, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover L'
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			const size_t i0 = (j > n-m ? j-(n-m) : 0);
			for(size_t i = i0; i < m; ++i){
				sum += RNP::Traits<T>::abs(Afac[i+j*m] - RNP::Traits<T>::conj(B[j+i*n]));
			}
		}
		std::cout << "L' norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Now set B = L', and apply Q' from the right to get A'
	RNP::BLAS::Set(n, m, T(0), T(0), B, n);
	for(size_t j = 0; j < n; ++j){
		const size_t i0 = (j > n-m ? j-(n-m) : 0);
		for(size_t i = i0; i < m; ++i){
			B[j+i*n] = RNP::Traits<T>::conj(Afac[i+j*m]);
		}
	}
	RNP::LA::QL::MultQ("R", "C", n, m, m, &Afac[0+(n-m)*m], m, tau, B, n, &lwork, work);
	// We should recover A'
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - RNP::Traits<T>::conj(B[j+i*n]));
			}
		}
		std::cout << "A' norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	
	// Make Q
	T *Q = new T[m*m];
	RNP::BLAS::Copy(m, m, &Afac[0+(n-m)*m], m, Q, m);
	delete [] work; work = NULL; lwork = 0;
	RNP::LA::QL::GenerateQ(m, m, m, Q, m, tau, &lwork, work);
	//lwork = n;
	work = new T[lwork];
	RNP::LA::QL::GenerateQ(m, m, m, Q, m, tau, &lwork, work);
	
	if(0){
		std::cout << "Q:" << std::endl;
		RNP::Matrix<T> mQ(m, m, Q, m);
		std::cout << RNP::IO::Chop(mQ) << std::endl << std::endl;
	}
	
	// Form Q'*Q
	T *QQ = new T[m*n];
	RNP::BLAS::MultMM("C", "N", m, m, m, 1., Q, m, Q, m, 0., QQ, m);
	
	if(0){
		std::cout << "Q' * Q:" << std::endl;
		RNP::Matrix<T> mQQ(m, m, QQ, m);
		std::cout << RNP::IO::Chop(mQQ) << std::endl << std::endl;
	}
	
	// Check to see if we get I
	if(1){
		T sum = 0;
		for(size_t j = 0; j < m; ++j){
			for(size_t i = 0; i < m; ++i){
				T delta = (i == j ? 1 : 0);
				sum += RNP::Traits<T>::abs(QQ[i+j*m] - delta);
			}
		}
		std::cout << "Q' * Q - I norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Form Q*L
	//  Put L in QQ for now
	RNP::BLAS::Set(m, n, T(0), T(0), QQ, m);
	RNP::BLAS::Copy(m, n-m, Afac, m, QQ, m);
	RNP::LA::Triangular::Copy("L", m, m, &Afac[0+(n-m)*m], m, &QQ[0+(n-m)*m], m);
	if(0){
		std::cout << "QQ = L:" << std::endl;
		RNP::Matrix<T> mB(m, n, QQ, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::BLAS::MultMM("N", "N", m, n, m, T(1), Q, m, QQ, m, T(0), B, m);
	if(0){
		std::cout << "B = Q*L:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover the original matrix
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - B[i+j*m]);
			}
		}
		std::cout << "(A - Q*L) norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	delete [] QQ;
	delete [] Q;
	delete [] B;
	delete [] Afac;
	delete [] A;
	delete [] tau;
	delete [] work;
}
コード例 #6
0
ファイル: PHX2.cpp プロジェクト: mordimerr/Phx
void Joint::update(float delta)
{

	Vec2 pa = pA;
	Vec2 pb = pB;

	RotMat mA(A->angle);
	RotMat mB(B->angle);

	pa.rotate(mA);
	pb.rotate(mB);

	pa.add(A->position);
	pb.add(B->position);


	Vec2 ra = pa - A->position;
	Vec2 rpa = Vec2(-ra.y, ra.x);

	Vec2 rb = pb - B->position;
	Vec2 rpb(-rb.y, rb.x);


	float d = distance - (pa-pb).value();

	Vec2 n = pa-pb;

	if (n.value() == 0.0f)
		return;

	n.normalize();

	//position correction
	float s = d / (A->inv_mass + B->inv_mass );

	if(A->dynamic == true )
	A->translate(n * s * A->inv_mass );
	

	if(B->dynamic == true )
	B->translate( n * s * B->inv_mass * -1);

	//relative velocities
	Vec2 v1 = A->velocity + rpa*A->omega;
	Vec2 v2 = B->velocity + rpb*B->omega;

	Vec2 v = v1-v2;

	//calculate impulse
	float j = -Scalar(v, n) / (A->inv_mass  + B->inv_mass  + Scalar(rpa, n)*Scalar(rpa, n)*A->inv_inertia  + Scalar(rpb, n)*Scalar(rpb, n)*B->inv_inertia );

	

	//apply impulse
	if(A->dynamic == true )
	{
	A->velocity.add( n * A->inv_mass * j );
	A->omega += Scalar(rpa, n*j) * A->inv_inertia ;
	}

	if(B->dynamic == true )
	{
	B->velocity.add (n * B->inv_mass * j * -1 );
	B->omega -= Scalar(rpb, n*j) * B->inv_inertia ;
	}
	/*
  Vec2 AB = Vector(A->position , B->position );             // Wektor AB

  float translation = AB.value() - distance;                // wymagana translacja cia³

  AB.normalize();                                           // normalizuje wektor

  Vec2 RelVel = B->velocity - A->velocity  ;                // Wypadkowa prêdkoœæ cia³

  float vel = Scalar(RelVel , AB) + translation;
        
 vel = vel / ( A->inv_mass + B->inv_mass );

 AB = AB * vel;
 
 A->ApplyImpulse(AB);
 B->ApplyImpulse(AB * -1);
 */
}
コード例 #7
0
ファイル: test_rq_fat.cpp プロジェクト: victorliu/RNP2
void test_rq(size_t m, size_t n){
	typedef typename RNP::Traits<T>::real_type real_type;
	real_type rsnrm(1./((m*n) * RNP::Traits<real_type>::eps()));
	T *A = new T[m*n];
	for(size_t j = 0; j < n; ++j){
		RNP::Random::GenerateVector(RNP::Random::Distribution::Uniform_11, m, &A[0+j*m]);
	}
	T *Afac = new T[m*n];

	// Workspace
	T *B = new T[m*n];
	
	if(0){
		std::cout << "Original A:" << std::endl;
		RNP::Matrix<T> mA(m, n, A, m);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}
	
	T *tau = new T[m];
	T *work = NULL;
	size_t lwork = 0;
	RNP::BLAS::Copy(m, n, A, m, Afac, m);
	
	RNP::LA::RQ::Factor(m, n, Afac, m, tau, &lwork, work);
	//lwork = m;
	std::cout << "lwork = " << lwork << std::endl;
	work = new T[lwork];
	RNP::LA::RQ::Factor(m, n, Afac, m, tau, &lwork, work);
	//RNP::LA::RQ::Factor_unblocked(m, n, Afac, m, tau, work);
	//int info; zgerq2_(m, n, Afac, m, tau, work, &info);
	//int info;
	if(0){
		std::cout << "Factored A:" << std::endl;
		RNP::Matrix<T> mA(m, n, Afac, m);
		std::cout << RNP::IO::Chop(mA) << std::endl << std::endl;
	}
	
	// Apply Q' to the right of original A (use B for workspace)
	RNP::BLAS::Copy(m, n, A, m, B, m);
	delete [] work; work = NULL; lwork = 0;
	RNP::LA::RQ::MultQ("R", "C", m, n, m, Afac, m, tau, B, m, &lwork, work);
	//lwork = m;
	work = new T[lwork];
	RNP::LA::RQ::MultQ("R", "C", m, n, m, Afac, m, tau, B, m, &lwork, work);
	//RNP::LA::RQ::MultQ_unblocked("R", "C", m, n, n, &Afac[m-n+0*m], m, tau, B, m, work);
	//dormr2_("R", "T", m, n, n, &Afac[m-n+0*m], m, tau, B, m, work, &info);
	//zunmr2_("R", "C", m, n, n, &Afac[m-n+0*m], m, tau, B, m, work, &info);
	
	if(0){
		std::cout << "Q' * origA:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// Check to see if the upper trapezoid is correct
	if(1){
		T sum = 0;
		for(size_t j = n-m; j < n; ++j){
			size_t i;
			for(i = 0; i <= j-(n-m); ++i){
				sum += RNP::Traits<T>::abs(Afac[i+j*m] - B[i+j*m]);
			}
			for(; i < m; ++i){ // check for zero lower triangle
				sum += RNP::Traits<T>::abs(B[i+j*m]);
			}
		}
		std::cout << "R norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Apply Q to the right of R
	RNP::BLAS::Set(m, n, T(0), T(0), B, m);
	RNP::LA::Triangular::Copy("U", m, m, &Afac[0+(n-m)*m], m, &B[0+(n-m)*m], m);
	if(0){
		std::cout << "B = R:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::LA::RQ::MultQ("R", "N", m, n, m, Afac, m, tau, B, m, &lwork, work);
	
	if(0){
		std::cout << "B = R*Q:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover the original matrix
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - B[i+j*m]);
			}
		}
		std::cout << "(A - R*Q) norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Now treat B as a n-by-m matrix, and copy A' into it,
	// and apply Q from the left
	for(size_t j = 0; j < m; ++j){
		for(size_t i = 0; i < n; ++i){
			B[i+j*n] = RNP::Traits<T>::conj(A[j+i*m]);
		}
	}
	if(0){
		std::cout << "B = A':" << std::endl;
		RNP::Matrix<T> mB(n, m, B, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::LA::RQ::MultQ("L", "N", n, m, m, Afac, m, tau, B, n, &lwork, work);
	if(0){
		std::cout << "B = R':" << std::endl;
		RNP::Matrix<T> mB(n, m, B, n);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover R'
	if(1){
		T sum = 0;
		for(size_t j = n-m; j < n; ++j){
			for(size_t i = 0; i <= j-(n-m); ++i){
				sum += RNP::Traits<T>::abs(Afac[i+j*m] - RNP::Traits<T>::conj(B[j+i*n]));
			}
		}
		std::cout << "R' norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Now set B = L', and apply Q' from the left to get A'
	RNP::BLAS::Set(n, m, T(0), T(0), B, n);
	for(size_t j = n-m; j < n; ++j){
		for(size_t i = 0; i <= j-(n-m); ++i){
			B[j+i*n] = RNP::Traits<T>::conj(Afac[i+j*m]);
		}
	}
	RNP::LA::RQ::MultQ("L", "C", n, m, m, Afac, m, tau, B, n, &lwork, work);
	// We should recover A'
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - RNP::Traits<T>::conj(B[j+i*n]));
			}
		}
		std::cout << "A' norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	
	// Make Q
	T *Q = new T[n*n];
	RNP::BLAS::Copy(m, n, Afac, m, Q, m);
	delete [] work; work = NULL; lwork = 0;
	
	RNP::LA::RQ::GenerateQ(m, n, m, Q, m, tau, &lwork, work);
	//lwork = n;
	work = new T[lwork];
	RNP::LA::RQ::GenerateQ(m, n, m, Q, m, tau, &lwork, work);
	//RNP::LA::RQ::GenerateQ_unblocked(m, n, m, Q, m, tau, work);
	
	if(0){
		std::cout << "Q:" << std::endl;
		RNP::Matrix<T> mQ(m, n, Q, m);
		std::cout << RNP::IO::Chop(mQ) << std::endl << std::endl;
	}
	
	// Form Q'*Q
	T *QQ = new T[n*n];
	RNP::BLAS::MultMM("N", "C", m, m, n, 1., Q, m, Q, m, 0., QQ, m);
	
	if(0){
		std::cout << "Q' * Q:" << std::endl;
		RNP::Matrix<T> mQQ(m, m, QQ, m);
		std::cout << RNP::IO::Chop(mQQ) << std::endl << std::endl;
	}
	// Check to see if we get I
	if(1){
		T sum = 0;
		for(size_t j = 0; j < m; ++j){
			for(size_t i = 0; i < m; ++i){
				T delta = (i == j ? 1 : 0);
				sum += RNP::Traits<T>::abs(QQ[i+j*m] - delta);
			}
		}
		std::cout << "Q' * Q - I norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Form R*Q in B
	//  Form R in QQ
	RNP::BLAS::Set(m, m, T(0), T(0), QQ, m);
	RNP::LA::Triangular::Copy("U", m, m, &Afac[0+(n-m)*m], m, QQ, m);
	if(0){
		std::cout << "QQ = R:" << std::endl;
		RNP::Matrix<T> mB(m, m, QQ, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	RNP::BLAS::MultMM("N", "N", m, n, m, T(1), QQ, m, Q, m, T(0), B, m);
	
	if(0){
		std::cout << "B = R*Q:" << std::endl;
		RNP::Matrix<T> mB(m, n, B, m);
		std::cout << RNP::IO::Chop(mB) << std::endl << std::endl;
	}
	// We should recover the original matrix
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(A[i+j*m] - B[i+j*m]);
			}
		}
		std::cout << "(A - R*Q) norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	// Generate the rows of Q corresponding to the nullspace of A
	RNP::BLAS::Set(n, n, T(0), T(1), Q, n);
	RNP::LA::RQ::MultQ("L", "N", n, n, m, Afac, m, tau, Q, n, &lwork, work);
	if(0){
		std::cout << "Q:" << std::endl;
		RNP::Matrix<T> mQ(n, n, Q, n);
		std::cout << RNP::IO::Chop(mQ) << std::endl << std::endl;
	}
	// The first n-m rows of Q span the nullspace of A
	RNP::BLAS::MultMM("N", "C", m, n-m, n, T(1), A, m, Q, n, T(0), QQ, m);
	
	if(1){
		T sum = 0;
		for(size_t j = 0; j < n-m; ++j){
			for(size_t i = 0; i < m; ++i){
				sum += RNP::Traits<T>::abs(QQ[i+j*m]);
			}
		}
		std::cout << "A*Qn norm-1 error: " << std::abs(sum)*rsnrm << std::endl;
	}
	
	delete [] QQ;
	delete [] Q;
	delete [] B;
	delete [] Afac;
	delete [] A;
	delete [] tau;
	delete [] work;
}
コード例 #8
0
void ScatteringPower::SetBij(const size_t &idx, const REAL newB)
{
    mClock.Click();
    mIsIsotropic=false;
    mB(idx) = newB;
}
コード例 #9
0
REAL ScatteringPower::GetBij(const size_t &idx) const
{
    return mB(idx);
}
コード例 #10
0
antlr::RefToken FMTLexer::nextToken()
{
	antlr::RefToken theRetToken;
	for (;;) {
		antlr::RefToken theRetToken;
		int _ttype = antlr::Token::INVALID_TYPE;
		resetText();
		try {   // for lexical and char stream error handling
			switch ( LA(1)) {
			case 0x22 /* '\"' */ :
			case 0x27 /* '\'' */ :
			{
				mSTRING(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x28 /* '(' */ :
			{
				mLBRACE(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x29 /* ')' */ :
			{
				mRBRACE(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x2f /* '/' */ :
			{
				mSLASH(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x2c /* ',' */ :
			{
				mCOMMA(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x41 /* 'A' */ :
			case 0x61 /* 'a' */ :
			{
				mA(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x3a /* ':' */ :
			{
				mTERM(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x24 /* '$' */ :
			{
				mNONL(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x46 /* 'F' */ :
			case 0x66 /* 'f' */ :
			{
				mF(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x44 /* 'D' */ :
			case 0x64 /* 'd' */ :
			{
				mD(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x45 /* 'E' */ :
			case 0x65 /* 'e' */ :
			{
				mE(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x47 /* 'G' */ :
			case 0x67 /* 'g' */ :
			{
				mG(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x49 /* 'I' */ :
			case 0x69 /* 'i' */ :
			{
				mI(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x4f /* 'O' */ :
			case 0x6f /* 'o' */ :
			{
				mO(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x42 /* 'B' */ :
			case 0x62 /* 'b' */ :
			{
				mB(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x5a /* 'Z' */ :
			{
				mZ(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x7a /* 'z' */ :
			{
				mZZ(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x51 /* 'Q' */ :
			case 0x71 /* 'q' */ :
			{
				mQ(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x48 /* 'H' */ :
			case 0x68 /* 'h' */ :
			{
				mH(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x54 /* 'T' */ :
			case 0x74 /* 't' */ :
			{
				mT(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x4c /* 'L' */ :
			case 0x6c /* 'l' */ :
			{
				mL(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x52 /* 'R' */ :
			case 0x72 /* 'r' */ :
			{
				mR(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x58 /* 'X' */ :
			case 0x78 /* 'x' */ :
			{
				mX(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x2e /* '.' */ :
			{
				mDOT(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x9 /* '\t' */ :
			case 0x20 /* ' ' */ :
			{
				mWHITESPACE(true);
				theRetToken=_returnToken;
				break;
			}
			case 0x2b /* '+' */ :
			case 0x2d /* '-' */ :
			case 0x30 /* '0' */ :
			case 0x31 /* '1' */ :
			case 0x32 /* '2' */ :
			case 0x33 /* '3' */ :
			case 0x34 /* '4' */ :
			case 0x35 /* '5' */ :
			case 0x36 /* '6' */ :
			case 0x37 /* '7' */ :
			case 0x38 /* '8' */ :
			case 0x39 /* '9' */ :
			{
				mNUMBER(true);
				theRetToken=_returnToken;
				break;
			}
			default:
				if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x4d /* 'M' */ ) && (LA(3) == 0x4f /* 'O' */ ) && (LA(4) == 0x41 /* 'A' */ )) {
					mCMOA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x4d /* 'M' */ ) && (LA(3) == 0x4f /* 'O' */ ) && (LA(4) == 0x49 /* 'I' */ )) {
					mCMOI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x4d /* 'M' */ ) && (LA(3) == 0x6f /* 'o' */ )) {
					mCMoA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x44 /* 'D' */ ) && (LA(3) == 0x49 /* 'I' */ )) {
					mCDI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x4d /* 'M' */ ) && (LA(3) == 0x49 /* 'I' */ )) {
					mCMI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x53 /* 'S' */ ) && (LA(3) == 0x49 /* 'I' */ )) {
					mCSI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x53 /* 'S' */ ) && (LA(3) == 0x46 /* 'F' */ )) {
					mCSF(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x44 /* 'D' */ ) && (LA(3) == 0x57 /* 'W' */ )) {
					mCDWA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x44 /* 'D' */ ) && (LA(3) == 0x77 /* 'w' */ )) {
					mCDwA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x41 /* 'A' */ ) && (LA(3) == 0x50 /* 'P' */ )) {
					mCAPA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x41 /* 'A' */ ) && (LA(3) == 0x70 /* 'p' */ )) {
					mCApA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x25 /* '%' */ ) && (LA(2) == 0x22 /* '\"' */  || LA(2) == 0x27 /* '\'' */ )) {
					mCSTRING(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x6d /* 'm' */ )) {
					mCmoA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x59 /* 'Y' */ )) {
					mCYI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x48 /* 'H' */ )) {
					mCHI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x68 /* 'h' */ )) {
					mChI(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x64 /* 'd' */ )) {
					mCdwA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */ ) && (LA(2) == 0x61 /* 'a' */ )) {
					mCapA(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x43 /* 'C' */  || LA(1) == 0x63 /* 'c' */ ) && (true)) {
					mC(true);
					theRetToken=_returnToken;
				}
				else if ((LA(1) == 0x25 /* '%' */ ) && (true)) {
					mPERCENT(true);
					theRetToken=_returnToken;
				}
			else {
				if (LA(1)==EOF_CHAR)
				{
					uponEOF();
					_returnToken = makeToken(antlr::Token::EOF_TYPE);
				}
				else {throw antlr::NoViableAltForCharException(LA(1), getFilename(), getLine(), getColumn());}
			}
			}
			if ( !_returnToken )
				goto tryAgain; // found SKIP token

			_ttype = _returnToken->getType();
			_ttype = testLiteralsTable(_ttype);
			_returnToken->setType(_ttype);
			return _returnToken;
		}
		catch (antlr::RecognitionException& e) {
				throw antlr::TokenStreamRecognitionException(e);
		}
		catch (antlr::CharStreamIOException& csie) {
			throw antlr::TokenStreamIOException(csie.io);
		}
		catch (antlr::CharStreamException& cse) {
			throw antlr::TokenStreamException(cse.getMessage());
		}
tryAgain:;
	}
}