예제 #1
0
a2de::Matrix3x3 Matrix3x3::Inverse(const Matrix3x3& mat) {

    //Minors, Cofactors, Adjugates method.
    //See http://www.mathsisfun.com/algebra/matrix-inverse-minors-cofactors-adjugate.html
    
    //[00 01 02] [0 1 2]
    //[10 11 12] [3 4 5]
    //[20 21 22] [6 7 8]

    //Calculate minors
    double m00 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[4], mat._indicies[5], mat._indicies[7], mat._indicies[8]));
    double m01 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[3], mat._indicies[5], mat._indicies[6], mat._indicies[7]));
    double m02 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[3], mat._indicies[4], mat._indicies[6], mat._indicies[7]));

    double m10 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[1], mat._indicies[2], mat._indicies[7], mat._indicies[8]));
    double m11 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[0], mat._indicies[2], mat._indicies[6], mat._indicies[7]));
    double m12 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[0], mat._indicies[1], mat._indicies[6], mat._indicies[7]));

    double m20 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[1], mat._indicies[2], mat._indicies[4], mat._indicies[5]));
    double m21 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[0], mat._indicies[2], mat._indicies[3], mat._indicies[5]));
    double m22 = Matrix2x2::CalculateDeterminant(Matrix2x2(mat._indicies[0], mat._indicies[1], mat._indicies[3], mat._indicies[4]));

    Matrix3x3 cofactors(m00, -m01, m02,
                       -m10, m11, -m12,
                        m20, -m21, m22);

    Matrix3x3 adjugate(Matrix3x3::Transpose(cofactors));

    double det_mat = mat.CalculateDeterminant();
    double inv_det = 1.0 / det_mat;

    return inv_det * adjugate;
}
예제 #2
0
Matrix::value_type Matrix::determinant(size_t row) const {
  if (m_size == 1) {
    return m_data[0][0];
  }
  Matrix::value_type result = 0;  // implicit cast
  for (size_t col = 0; col < m_size; ++col) {
    Matrix adjugate(*this, row, col);
    result += std::pow(-1.0, col) * m_data[row][col] * adjugate.determinant();
  }
  return result;
}
예제 #3
0
Matrix Matrix::getInversed() const {
  Matrix::value_type det = determinant();
  if (util::equals(det, 0.0)) {
    ERR("Determinant is zero!");
    throw MatrixInversionException();
  }
  Matrix inversed(m_size);
  for (size_t row = 0; row < m_size; ++row) {
    for (size_t col = 0; col < m_size; ++col) {
      Matrix adjugate(*this, col, row);  // note the inversion of 'row' and 'col'
      Matrix::value_type cofactor = adjugate.determinant();
      double sign = util::equals(cofactor, 0.0) ? 1.0 : std::pow(-1.0, row + col);
      inversed[row][col] = sign * cofactor / det;
    }
  }
  return inversed;
}
LatticeMinimizer::LatticeMinimizer(Everything& e) : e(e), Rorig(e.gInfo.R)
{
	logPrintf("\n--------- Lattice Minimization ---------\n");
	
	//Ensure that lattice-move-scale is commensurate with symmetries:
	std::vector<matrix3<int>> sym = e.symm.getMatrices();
	for(const matrix3<int>& m: sym)
		for(int i=0; i<3; i++)
			for(int j=0; j<3; j++)
				if(m(i,j) && e.cntrl.lattMoveScale[i] != e.cntrl.lattMoveScale[j])
					die("latt-move-scale is not commensurate with symmetries:\n"
						"\t(Lattice vectors #%d and #%d are connected by symmetry,\n"
						"\tbut have different move scale factors %lg != %lg).\n",
						i, j, e.cntrl.lattMoveScale[i], e.cntrl.lattMoveScale[j]);
	
	//Check which lattice vectors can be altered:
	vector3<bool> isFixed, isTruncated = e.coulombParams.isTruncated();
	for(int k=0; k<3; k++)
		isFixed[k] = (e.cntrl.lattMoveScale[k]==0.) || isTruncated[k];
	
	//Create a orthonormal basis for strain commensurate with symmetries:
	for(int k=0; k<6; k++)
	{	//Initialize a basis element for arbitrary symmetric matrices:
		matrix3<int> s; //all zero:
		if(k<3) //diagonal strain
		{	s(k,k) = 1;
			if(isFixed[k]) continue; //strain alters fixed direction
		}
		else //off-diagonal strain
		{	int i=(k+1)%3;
			int j=(k+2)%3;
			s(i,j) = s(j,i) = 1;
			if(isFixed[i] || isFixed[j]) continue;  //strain alters fixed direction
		}
		//Symmetrize:
		matrix3<int> sSym;
		for(const matrix3<int>& m: sym)
		{	matrix3<int> mInv = det(m) * adjugate(m); //since |det(m)| = 1
			sSym += mInv * s * m;
		}
		//Orthonormalize w.r.t previous basis elements:
		matrix3<> strain(sSym); //convert from integer to double matrix
		for(const matrix3<>& sPrev: strainBasis)
			strain -= sPrev * dot(sPrev, strain);
		double strainNorm = nrm2(strain);
		if(strainNorm < symmThresholdSq) continue; //linearly dependent
		strainBasis.push_back((1./strainNorm) * strain);
	}
	if(!strainBasis.size())
		die("All lattice-vectors are constrained by coulomb truncation and/or\n"
			"latt-move-scale: please disable lattice minimization.\n");
	
	//Print initialization status:
	e.latticeMinParams.nDim = strainBasis.size();
	logPrintf("Minimization of dimension %lu over strains spanned by:\n", strainBasis.size());
	for(const matrix3<>& s: strainBasis)
	{	s.print(globalLog, " %lg ");
		logPrintf("\n");
	}

	h = 1e-5;
}