template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
  typedef typename MatrixType::Index Index;
  /* this test covers the following files:
     EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
  */
  Index rows = m.rows();
  Index cols = m.cols();

  typedef typename MatrixType::Scalar Scalar;
  typedef typename NumTraits<Scalar>::Real RealScalar;

  RealScalar largerEps = 10*test_precision<RealScalar>();

  MatrixType a = MatrixType::Random(rows,cols);
  MatrixType a1 = MatrixType::Random(rows,cols);
  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
  MatrixType symmC = symmA;
  
  svd_fill_random(symmA,Symmetric);

  symmA.template triangularView<StrictlyUpper>().setZero();
  symmC.template triangularView<StrictlyUpper>().setZero();

  MatrixType b = MatrixType::Random(rows,cols);
  MatrixType b1 = MatrixType::Random(rows,cols);
  MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
  symmB.template triangularView<StrictlyUpper>().setZero();
  
  CALL_SUBTEST( selfadjointeigensolver_essential_check(symmA) );

  SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
  // generalized eigen pb
  GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);

  SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
  VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
  VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
  
  // generalized eigen problem Ax = lBx
  eiSymmGen.compute(symmC, symmB,Ax_lBx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
          symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem BAx = lx
  eiSymmGen.compute(symmC, symmB,BAx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem ABx = lx
  eiSymmGen.compute(symmC, symmB,ABx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));


  eiSymm.compute(symmC);
  MatrixType sqrtSymmA = eiSymm.operatorSqrt();
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
  VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());

  MatrixType id = MatrixType::Identity(rows, cols);
  VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));

  SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  eiSymmUninitialized.compute(symmA, false);
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  // test Tridiagonalization's methods
  Tridiagonalization<MatrixType> tridiag(symmC);
  VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal());
  VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>());
  Matrix<RealScalar,Dynamic,Dynamic> T = tridiag.matrixT();
  if(rows>1 && cols>1) {
    // FIXME check that upper and lower part are 0:
    //VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView<Upper>().isZero());
  }
  VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal());
  VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>());
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT() * tridiag.matrixQ().adjoint());
  
  // Test computation of eigenvalues from tridiagonal matrix
  if(rows > 1)
  {
    SelfAdjointEigenSolver<MatrixType> eiSymmTridiag;
    eiSymmTridiag.computeFromTridiagonal(tridiag.matrixT().diagonal(), tridiag.matrixT().diagonal(-1), ComputeEigenvectors);
    VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmTridiag.eigenvalues());
    VERIFY_IS_APPROX(tridiag.matrixT(), eiSymmTridiag.eigenvectors().real() * eiSymmTridiag.eigenvalues().asDiagonal() * eiSymmTridiag.eigenvectors().real().transpose());
  }

  if (rows > 1 && rows < 20)
  {
    // Test matrix with NaN
    symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
    SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
    VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
  }

  // regression test for bug 1098
  {
    SelfAdjointEigenSolver<MatrixType> eig(a.adjoint() * a);
    eig.compute(a.adjoint() * a);
  }

  // regression test for bug 478
  {
    a.setZero();
    SelfAdjointEigenSolver<MatrixType> ei3(a);
    VERIFY_IS_EQUAL(ei3.info(), Success);
    VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1));
    VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity());
  }
}
//Simulate the price process:
void
Sim_Price_Disc::simulate(int retries) {

	//If we are re-simulating, we need to reset:
	if (simulated) {
	
		//X.simulate(retries);			//Re-simulate X
		X.simulate2();
		
		//Clear Y
		Y.clear();
		Y.push_back(y_0);
		
		//Reset the SBM Z:
		Z.reset();						
	
	}
	else {
	
		simulated = true;
		
		//Simulate volatility if necessary:
		if (X.is_simulated() == false) {
	
			//X.simulate(retries);
			X.simulate2();
		
		}
		
	}

	//Get the timestep vector from the volatility:
	vector<double> timestep = X.get_timestep();
	
	double h_t, drift;
	matrix vola(n,n), tmp(n,n);
	
	//For each discretisation step:
	for (unsigned int i=1; i<timestep.size(); ++i) {
	
		//Compute stepsize of the Brownian motion (in case this has changed):
		h_t = timestep[i] - timestep[i-1];

		//Even though we are not interested in eigenvalues anymore, we need the decomposition
		// for the matrix square root.
		matrix V_x(n,n), D_x(n,n);
		eig(V_x, D_x, X.get_X_i(i-1));
		
		//Compute next simulation point using the discretisation formula (see Section 5)
		matrix V_r(n,n), D_r(n,n);
		eig(V_r, D_r, (eye(n) - R * flens::transpose(R)));

		drift = (r - 0.5*trace(X.get_X_i(i-1))) * h_t;

		mat4mult(vola, Z.increment(h_t), V_r, matsqrt_diag(D_r), flens::transpose(V_r));
		tmp = X.get_W_incr_i(i) * flens::transpose(R) + vola;

		mat4mult(vola, V_x, matsqrt_diag(D_x), flens::transpose(V_x), tmp);

		//Add simulation point to storage:
    	Y.push_back(Y[i-1] + drift + trace(vola));

	}
	
}	
void bug_1204()
{
  SparseMatrix<double> A(2,2);
  A.setIdentity();
  SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A);
}
示例#4
0
GURLS_EXPORT void eig(const gMat2D<float>& A, gMat2D<float>& V, gVec<float>& W){
    gVec<float> tmp(W.getSize());
    tmp = 0;
    eig(A, V, W, tmp);
}
示例#5
0
文件: gmath.cpp 项目: elen4/GURLS
GURLS_EXPORT void eig(const gMat2D<float>& A, gVec<float>& W)
{
    gVec<float> tmp = W;
    eig(A, W, tmp);
}
// -----------------------------------------------------------------------------
//
// -----------------------------------------------------------------------------
void CalculateTriangleGroupCurvatures::operator()() const
{

  // Get the Triangles Array
//  DREAM3D::SurfaceMesh::FaceList_t::Pointer trianglesPtr = m_SurfaceMeshDataContainer->getFaces();
//  DREAM3D::SurfaceMesh::Face_t* triangles = trianglesPtr->GetPointer(0);

  IDataArray::Pointer flPtr = m_SurfaceMeshDataContainer->getFaceData(DREAM3D::FaceData::SurfaceMeshFaceLabels);
  DataArray<int32_t>* faceLabelsPtr = DataArray<int32_t>::SafePointerDownCast(flPtr.get());
  int32_t* faceLabels = faceLabelsPtr->GetPointer(0);

  // Instantiate a FindNRingNeighbors class to use during the loop
  FindNRingNeighbors::Pointer nRingNeighborAlg = FindNRingNeighbors::New();

  // Make Sure we have triangle centroids calculated
  IDataArray::Pointer centroidPtr = m_SurfaceMeshDataContainer->getFaceData(DREAM3D::FaceData::SurfaceMeshFaceCentroids);
  if (NULL == centroidPtr.get())
  {
    std::cout << "Triangle Centroids are required for this algorithm" << std::endl;
    return;
  }
  DataArray<double>* centroids = DataArray<double>::SafePointerDownCast(centroidPtr.get());

  // Make sure we have triangle normals calculated
  IDataArray::Pointer normalPtr = m_SurfaceMeshDataContainer->getFaceData(DREAM3D::FaceData::SurfaceMeshFaceNormals);
  if (NULL == normalPtr.get())
  {
    std::cout << "Triangle Normals are required for this algorithm" << std::endl;
    return;
  }
  DataArray<double>* normals = DataArray<double>::SafePointerDownCast(normalPtr.get());


  int32_t* fl = faceLabels + m_TriangleIds[0] * 2;
  int grain0 = 0;
  int grain1 = 0;
  if (fl[0] < fl[1])
  {
    grain0 = fl[0];
    grain1 = fl[1];
  }
  else
  {
    grain0 = fl[1];
    grain1 = fl[0];
  }

  bool computeGaussian = (m_GaussianCurvature.get() != NULL);
  bool computeMean = (m_MeanCurvature.get() != NULL);
  bool computeDirection = (m_PrincipleDirection1.get() != NULL);

  std::stringstream ss;
  std::vector<int>::size_type tCount = m_TriangleIds.size();
  // For each triangle in the group
  for(std::vector<int>::size_type i = 0; i < tCount; ++i)
  {
    if (m_ParentFilter->getCancel() == true) { return; }
    int triId = m_TriangleIds[i];
    nRingNeighborAlg->setTriangleId(triId);
    nRingNeighborAlg->setRegionId0(grain0);
    nRingNeighborAlg->setRegionId1(grain1);
    nRingNeighborAlg->setRing(m_NRing);
    nRingNeighborAlg->setSurfaceMeshDataContainer(m_SurfaceMeshDataContainer);
    nRingNeighborAlg->generate();

    DREAM3D::SurfaceMesh::UniqueFaceIds_t triPatch = nRingNeighborAlg->getNRingTriangles();
    BOOST_ASSERT(triPatch.size() > 1);

    DataArray<double>::Pointer patchCentroids = extractPatchData(triId, triPatch, centroids->GetPointer(0), std::string("Patch_Centroids"));
    DataArray<double>::Pointer patchNormals = extractPatchData(triId, triPatch, normals->GetPointer(0), std::string("Patch_Normals"));

    // Translate the patch to the 0,0,0 origin
    double sub[3] = {patchCentroids->GetComponent(0,0),patchCentroids->GetComponent(0,1), patchCentroids->GetComponent(0,2)};
    subtractVector3d(patchCentroids, sub);

    double np[3] = {patchNormals->GetComponent(0,0), patchNormals->GetComponent(0,1), patchNormals->GetComponent(0, 2) };

    double seedCentroid[3] = {patchCentroids->GetComponent(0,0), patchCentroids->GetComponent(0,1), patchCentroids->GetComponent(0,2) };
    double firstCentroid[3] = {patchCentroids->GetComponent(1,0), patchCentroids->GetComponent(1,1), patchCentroids->GetComponent(1,2) };

    double temp[3] = {firstCentroid[0] - seedCentroid[0], firstCentroid[1] - seedCentroid[1], firstCentroid[2] - seedCentroid[2]};
    double vp[3] = {0.0, 0.0, 0.0};

    // Cross Product of np and temp
    MatrixMath::NormalizeVector(np);
    MatrixMath::CrossProduct(np, temp, vp);
    MatrixMath::NormalizeVector(vp);

    // get the third orthogonal vector
    double up[3] = {0.0, 0.0, 0.0};
    MatrixMath::CrossProduct(vp, np, up);

    // this constitutes a rotation matrix to a local coordinate system
    double rot[3][3] = {{up[0], up[1], up[2]},
                        {vp[0], vp[1], vp[2]},
                        {np[0], np[1], np[2]} };
    double out[3];
    // Transform all centroids and normals to new coordinate system
    for(size_t m = 0; m < patchCentroids->GetNumberOfTuples(); ++m)
    {
      ::memcpy(out, patchCentroids->GetPointer(m*3), 3*sizeof(double));
      MatrixMath::Multiply3x3with3x1(rot, patchCentroids->GetPointer(m*3), out);
      ::memcpy(patchCentroids->GetPointer(m*3), out, 3*sizeof(double));

      ::memcpy(out, patchNormals->GetPointer(m*3), 3*sizeof(double));
      MatrixMath::Multiply3x3with3x1(rot, patchNormals->GetPointer(m*3), out);
      ::memcpy(patchNormals->GetPointer(m*3), out, 3*sizeof(double));

      // We rotate the normals now but we dont use them yet. If we start using part 3 of Goldfeathers paper then we
      // will need the normals.
    }

    {
      // Solve the Least Squares fit
      static const unsigned int NO_NORMALS = 3;
      static const unsigned int USE_NORMALS = 7;
      int cols = NO_NORMALS;
      if (m_UseNormalsForCurveFitting == true)
      { cols = USE_NORMALS; }
      int rows = patchCentroids->GetNumberOfTuples();
      Eigen::MatrixXd A(rows, cols);
      Eigen::VectorXd b(rows);
      double x, y, z;
      for(int m = 0; m < rows; ++m)
      {
        x = patchCentroids->GetComponent(m, 0);
        y = patchCentroids->GetComponent(m, 1);
        z = patchCentroids->GetComponent(m, 2);

        A(m) = 0.5 * x * x;  // 1/2 x^2
        A(m + rows) = x * y; // x*y
        A(m + rows*2) = 0.5 * y * y;  // 1/2 y^2
        if (m_UseNormalsForCurveFitting == true)
        {
          A(m + rows*3) = x*x*x;
          A(m + rows*4) = x*x*y;
          A(m + rows*5) = x*y*y;
          A(m + rows*6) = y*y*y;
        }
        b[m] = z; // The Z Values
      }

      Eigen::Matrix2d M;

      if (false == m_UseNormalsForCurveFitting)
      {
      typedef Eigen::Matrix<double, NO_NORMALS, 1> Vector3d;
        Vector3d sln1 = A.colPivHouseholderQr().solve(b);
        // Now that we have the A, B, C constants we can solve the Eigen value/vector problem
        // to get the principal curvatures and pricipal directions.
        M << sln1(0), sln1(1), sln1(1), sln1(2);
      }
      else
      {
        typedef Eigen::Matrix<double, USE_NORMALS, 1> Vector7d;
        Vector7d sln1 = A.colPivHouseholderQr().solve(b);
        // Now that we have the A, B, C, D, E, F & G constants we can solve the Eigen value/vector problem
        // to get the principal curvatures and pricipal directions.
        M << sln1(0), sln1(1), sln1(1), sln1(2);
      }

      Eigen::SelfAdjointEigenSolver<Eigen::Matrix2d> eig(M);
      Eigen::SelfAdjointEigenSolver<Eigen::Matrix2d>::RealVectorType eValues = eig.eigenvalues();
      Eigen::SelfAdjointEigenSolver<Eigen::Matrix2d>::MatrixType eVectors = eig.eigenvectors();

      // Kappa1 >= Kappa2
      double kappa1 = eValues(0) * -1;// Kappa 1
      double kappa2 = eValues(1) * -1; //kappa 2
      BOOST_ASSERT(kappa1 >= kappa2);
      m_PrincipleCurvature1->SetValue(triId, kappa1);
      m_PrincipleCurvature2->SetValue(triId, kappa2);

      if (computeGaussian == true)
      {
        m_GaussianCurvature->SetValue(triId, kappa1*kappa2);
      }
      if (computeMean == true)
      {
        m_MeanCurvature->SetValue(triId, (kappa1+kappa2)/2.0);
      }

      if (computeDirection == true)
      {
        Eigen::Matrix3d e_rot_T;
        e_rot_T.row(0) = Eigen::Vector3d(up[0], vp[0], np[0]);
        e_rot_T.row(1) = Eigen::Vector3d(up[1], vp[1], np[1]);
        e_rot_T.row(2) = Eigen::Vector3d(up[2], vp[2], np[2]);

        // Rotate our principal directions back into the original coordinate system
        Eigen::Vector3d dir1 ( eVectors.col(0)(0),  eVectors.col(0)(1), 0.0 );
        dir1 = e_rot_T * dir1;
        ::memcpy(m_PrincipleDirection1->GetPointer(triId * 3), dir1.data(), 3*sizeof(double) );

        Eigen::Vector3d dir2 ( eVectors.col(1)(0),  eVectors.col(1)(1), 0.0 );
        dir2 = e_rot_T * dir2;
        ::memcpy(m_PrincipleDirection2->GetPointer(triId * 3), dir2.data(), 3*sizeof(double) );
      }
    }

  } // End Loop over this triangle

  m_ParentFilter->tbbTaskProgress();
}
示例#7
0
/**
 * Compute the scale factor and bias associated with a vector observer
 * according to the equation:
 * B_k = (I_{3x3} + D)^{-1} \times (O^T A_k H_k + b + \epsilon_k)
 * where k is the sample index,
 *       B_k is the kth measurement
 *       I_{3x3} is a 3x3 identity matrix
 *       D is a 3x3 symmetric matrix of scale factors
 *       O is the orthogonal alignment matrix
 *       A_k is the attitude at the kth sample
 *       b is the bias in the global reference frame
 *       \epsilon_k is the noise at the kth sample
 *
 * After computing the scale factor and bias matrices, the optimal estimate is
 * given by
 * \tilde{B}_k = (I_{3x3} + D)B_k - b
 *
 * This implementation makes the assumption that \epsilon is a constant white,
 * gaussian noise source that is common to all k.  The misalignment matrix O
 * is not computed.
 *
 * @param bias[out] The computed bias, in the global frame
 * @param scale[out] The computed scale factor matrix, in the sensor frame.
 * @param samples[in] An array of measurement samples.
 * @param n_samples The number of samples in the array.
 * @param referenceField The magnetic field vector at this location.
 * @param noise The one-sigma squared standard deviation of the observed noise
 * in the sensor.
 */
void twostep_bias_scale(Vector3f & bias,
                        Matrix3f & scale,
                        const Vector3f samples[],
                        const size_t n_samples,
                        const Vector3f & referenceField,
                        const float noise)
{
    // Define L_k by eq 51 for k = 1 .. n_samples
    Matrix<double, Dynamic, 9> fullSamples(n_samples, 9);
    // \hbar{L} by eq 52, simplified by observing that the common noise term
    // makes this a simple average.
    Matrix<double, 1, 9> centerSample = Matrix<double, 1, 9>::Zero();
    // Define the sample differences z_k by eq 23 a)
    double sampleDeltaMag[n_samples];
    // The center value \hbar{z}
    double sampleDeltaMagCenter = 0;
    // The squared norm of the reference vector
    double refSquaredNorm = referenceField.squaredNorm();

    for (size_t i = 0; i < n_samples; ++i) {
        fullSamples.row(i) << 2 * samples[i].transpose().cast<double>(),
            -(samples[i][0] * samples[i][0]),
            -(samples[i][1] * samples[i][1]),
            -(samples[i][2] * samples[i][2]),
            -2 * (samples[i][0] * samples[i][1]),
            -2 * (samples[i][0] * samples[i][2]),
            -2 * (samples[i][1] * samples[i][2]);

        centerSample += fullSamples.row(i);

        sampleDeltaMag[i]     = samples[i].squaredNorm() - refSquaredNorm;
        sampleDeltaMagCenter += sampleDeltaMag[i];
    }
    sampleDeltaMagCenter /= n_samples;
    centerSample /= n_samples;

    // Define \tilde{L}_k for k = 0 .. n_samples
    Matrix<double, Dynamic, 9> centeredSamples(n_samples, 9);
    // Define \tilde{z}_k for k = 0 .. n_samples
    double centeredMags[n_samples];
    // Compute the term under the summation of eq 57a
    Matrix<double, 9, 1> estimateSummation = Matrix<double, 9, 1>::Zero();
    for (size_t i = 0; i < n_samples; ++i) {
        centeredSamples.row(i) = fullSamples.row(i) - centerSample;
        centeredMags[i] = sampleDeltaMag[i] - sampleDeltaMagCenter;
        estimateSummation += centeredMags[i] * centeredSamples.row(i).transpose();
    }
    estimateSummation /= noise;

    // By eq 57b
    Matrix<double, 9, 9> P_theta_theta_inv = (1.0f / noise) *
                                             centeredSamples.transpose() * centeredSamples;

#ifdef PRINTF_DEBUGGING
    SelfAdjointEigenSolver<Matrix<double, 9, 9> > eig(P_theta_theta_inv);
    std::cout << "P_theta_theta_inverse: \n" << P_theta_theta_inv << "\n\n";
    std::cout << "P_\\tt^-1 eigenvalues: " << eig.eigenvalues().transpose()
              << "\n";
    std::cout << "P_\\tt^-1 eigenvectors:\n" << eig.eigenvectors() << "\n";
#endif

    // The current value of the estimate.  Initialized to \tilde{\theta}^*
    Matrix<double, 9, 1> estimate;
    // By eq 57a
    P_theta_theta_inv.ldlt().solve(estimateSummation, &estimate);

    // estimate i+1 = estimate_i - Fisher^{-1}(at estimate_i)*gradient(theta)
    // Fisher^{-1} = \tilde{Fisher}^-1 + \hbar{Fisher}^{-1}
    size_t count = 0;
    double eta   = 10000;
    while (count++ < 200 && eta > 1e-8) {
        static bool warned = false;
        if (hasNaN(estimate)) {
            std::cout << "WARNING: found NaN at time " << count << "\n";
            warned = true;
        }
#if 0
        SelfAdjointEigenSolver<Matrix3d> eig_E(E_theta(estimate));
        Vector3d S = eig_E.eigenvalues();
        Vector3d W; W << -1 + sqrt(1 + S.coeff(0)),
            -1 + sqrt(1 + S.coeff(1)),
            -1 + sqrt(1 + S.coeff(2));
        scale = (eig_E.eigenvectors() * W.asDiagonal() *
                 eig_E.eigenvectors().transpose()).cast<float>();

        (Matrix3f::Identity() + scale).ldlt().solve(
            estimate.start<3>().cast<float>(), &bias);
        std::cout << "\n\nestimated bias: " << bias.transpose()
                  << "\nestimated scale:\n" << scale;
#endif

        Matrix<double, 1, 9> db_dtheta = dnormb_dtheta(estimate);

        Matrix<double, 9, 1> dJ_dtheta = ::dJ_dtheta(centerSample,
                                                     sampleDeltaMagCenter,
                                                     estimate,
                                                     db_dtheta,
                                                     -3 * noise,
                                                     noise / n_samples);

        // Eq 59, with reused storage on db_dtheta
        db_dtheta = centerSample - db_dtheta;
        Matrix<double, 9, 9> scale = P_theta_theta_inv +
                                     (double(n_samples) / noise) * db_dtheta.transpose() * db_dtheta;

        // eq 14b, mutatis mutandis (grumble, grumble)
        Matrix<double, 9, 1> increment;
        scale.ldlt().solve(dJ_dtheta, &increment);
        estimate -= increment;
        eta = increment.dot(scale * increment);
        std::cout << "eta: " << eta << "\n";
    }
    std::cout << "terminated at eta = " << eta
              << " after " << count << " iterations\n";

    if (!isnan(eta) && !isinf(eta)) {
        // Transform the estimated parameters from [c | E] back into [b | D].
        // See eq 63-65
        SelfAdjointEigenSolver<Matrix3d> eig_E(E_theta(estimate));
        Vector3d S = eig_E.eigenvalues();
        Vector3d W; W << -1 + sqrt(1 + S.coeff(0)),
            -1 + sqrt(1 + S.coeff(1)),
            -1 + sqrt(1 + S.coeff(2));
        scale = (eig_E.eigenvectors() * W.asDiagonal() *
                 eig_E.eigenvectors().transpose()).cast<float>();

        (Matrix3f::Identity() + scale).ldlt().solve(
            estimate.start<3>().cast<float>(), &bias);
    } else {
        // return nonsense data.  The eigensolver can fall ingo
        // an infinite loop otherwise.
        // TODO: Add error code return
        scale = Matrix3f::Ones() * std::numeric_limits<float>::quiet_NaN();
        bias  = Vector3f::Zero();
    }
}
示例#8
0
/**
 * Compute the scale factor and bias associated with a vector observer
 * according to the equation:
 * B_k = (I_{3x3} + D)^{-1} \times (O^T A_k H_k + b + \epsilon_k)
 * where k is the sample index,
 *       B_k is the kth measurement
 *       I_{3x3} is a 3x3 identity matrix
 *       D is a 3x3 diagonal matrix of scale factors
 *       O is the orthogonal alignment matrix
 *       A_k is the attitude at the kth sample
 *       b is the bias in the global reference frame
 *       \epsilon_k is the noise at the kth sample
 * This implementation makes the assumption that \epsilon is a constant white,
 * gaussian noise source that is common to all k.  The misalignment matrix O
 * is not computed, and the off-diagonal elements of D, corresponding to the
 * misalignment scale factors are not either.
 *
 * @param bias[out] The computed bias, in the global frame
 * @param scale[out] The computed scale factor, in the sensor frame
 * @param samples[in] An array of measurement samples.
 * @param n_samples The number of samples in the array.
 * @param referenceField The magnetic field vector at this location.
 * @param noise The one-sigma squared standard deviation of the observed noise
 * in the sensor.
 */
void twostep_bias_scale(Vector3f & bias,
                        Vector3f & scale,
                        const Vector3f samples[],
                        const size_t n_samples,
                        const Vector3f & referenceField,
                        const float noise)
{
    // Initial estimate for gradiant descent starts at eq 37a of ref 2.

    // Define L_k by eq 30 and 28 for k = 1 .. n_samples
    Matrix<double, Dynamic, 6> fullSamples(n_samples, 6);
    // \hbar{L} by eq 33, simplified by obesrving that the
    Matrix<double, 1, 6> centerSample = Matrix<double, 1, 6>::Zero();
    // Define the sample differences z_k by eq 23 a)
    double sampleDeltaMag[n_samples];
    // The center value \hbar{z}
    double sampleDeltaMagCenter = 0;
    double refSquaredNorm = referenceField.squaredNorm();

    for (size_t i = 0; i < n_samples; ++i) {
        fullSamples.row(i) << 2 * samples[i].transpose().cast<double>(),
            -(samples[i][0] * samples[i][0]),
            -(samples[i][1] * samples[i][1]),
            -(samples[i][2] * samples[i][2]);
        centerSample += fullSamples.row(i);

        sampleDeltaMag[i]     = samples[i].squaredNorm() - refSquaredNorm;
        sampleDeltaMagCenter += sampleDeltaMag[i];
    }
    sampleDeltaMagCenter /= n_samples;
    centerSample /= n_samples;

    // True for all k.
    // double mu = -3*noise;
    // The center value of mu, \bar{mu}
    // double centerMu = mu;
    // The center value of mu, \tilde{mu}
    // double centeredMu = 0;

    // Define \tilde{L}_k for k = 0 .. n_samples
    Matrix<double, Dynamic, 6> centeredSamples(n_samples, 6);
    // Define \tilde{z}_k for k = 0 .. n_samples
    double centeredMags[n_samples];
    // Compute the term under the summation of eq 37a
    Matrix<double, 6, 1> estimateSummation = Matrix<double, 6, 1>::Zero();
    for (size_t i = 0; i < n_samples; ++i) {
        centeredSamples.row(i) = fullSamples.row(i) - centerSample;
        centeredMags[i] = sampleDeltaMag[i] - sampleDeltaMagCenter;
        estimateSummation += centeredMags[i] * centeredSamples.row(i).transpose();
    }
    estimateSummation /= noise; // note: paper supplies 1/noise

    // By eq 37 b).  Note, paper supplies 1/noise here
    Matrix<double, 6, 6> P_theta_theta_inv = (1.0f / noise) *
                                             centeredSamples.transpose() * centeredSamples;
#ifdef PRINTF_DEBUGGING
    SelfAdjointEigenSolver<Matrix<double, 6, 6> > eig(P_theta_theta_inv);
    std::cout << "P_theta_theta_inverse: \n" << P_theta_theta_inv << "\n\n";
    std::cout << "P_\\tt^-1 eigenvalues: " << eig.eigenvalues().transpose()
              << "\n";
    std::cout << "P_\\tt^-1 eigenvectors:\n" << eig.eigenvectors() << "\n";
#endif

    // The Fisher information matrix has a small eigenvalue, with a
    // corresponding eigenvector of about [1e-2 1e-2 1e-2 0.55, 0.55,
    // 0.55].  This means that there is relatively little information
    // about the common gain on the scale factor, which has a
    // corresponding effect on the bias, too. The fixup is performed by
    // the first iteration of the second stage of TWOSTEP, as documented in
    // [1].
    Matrix<double, 6, 1> estimate;
    // By eq 37 a), \tilde{Fisher}^-1
    P_theta_theta_inv.ldlt().solve(estimateSummation, &estimate);

#ifdef PRINTF_DEBUGGING
    Matrix<double, 6, 6> P_theta_theta;
    P_theta_theta_inv.ldlt().solve(Matrix<double, 6, 6>::Identity(), &P_theta_theta);
    SelfAdjointEigenSolver<Matrix<double, 6, 6> > eig2(P_theta_theta);
    std::cout << "eigenvalues: " << eig2.eigenvalues().transpose() << "\n";
    std::cout << "eigenvectors: \n" << eig2.eigenvectors() << "\n";
    std::cout << "estimate summation: \n" << estimateSummation.normalized()
              << "\n";
#endif

    // estimate i+1 = estimate_i - Fisher^{-1}(at estimate_i)*gradiant(theta)
    // Fisher^{-1} = \tilde{Fisher}^-1 + \hbar{Fisher}^{-1}
    size_t count = 3;
    while (count-- > 0) {
        // eq 40
        Matrix<double, 1, 6> db_dtheta = dnormb_dtheta(estimate);

        Matrix<double, 6, 1> dJ_dtheta = dJdb(centerSample,
                                              sampleDeltaMagCenter,
                                              estimate,
                                              db_dtheta,
                                              -3 * noise,
                                              noise / n_samples);


        // Eq 39 (with double-negation on term inside parens)
        db_dtheta = centerSample - db_dtheta;
        Matrix<double, 6, 6> scale = P_theta_theta_inv +
                                     (double(n_samples) / noise) * db_dtheta.transpose() * db_dtheta;

        // eq 14b, mutatis mutandis (grumble, grumble)
        Matrix<double, 6, 1> increment;
        scale.ldlt().solve(dJ_dtheta, &increment);
        estimate -= increment;
    }

    // Transform the estimated parameters from [c | e] back into [b | d].
    for (size_t i = 0; i < 3; ++i) {
        scale.coeffRef(i) = -1 + sqrt(1 + estimate.coeff(3 + i));
        bias.coeffRef(i)  = estimate.coeff(i) / sqrt(1 + estimate.coeff(3 + i));
    }
}
示例#9
0
文件: eigsym.cpp 项目: LijieTu/hfcxx
Eigsym::Eigsym(const MatDoub &A, MatDoub &V, VecDoub &lambda) {
	eig(A,V,lambda);
}
/* Function Definitions */
void magCali(const real_T data[6000], creal_T A_i[9], creal_T B[3])
{
  real_T D[20000];
  int32_T i;
  int32_T i0;
  real_T S[100];
  int32_T ind;
  real_T L_S22[16];
  real_T L_S22_i[16];
  real_T b_L_S22[16];
  real_T S22_i[16];
  real_T A[36];
  real_T b_S[24];
  real_T c_S[36];
  real_T norm_v1;
  static const real_T b_A[36] = { 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5,
    0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.25, 0.0, 0.0,
    0.0, 0.0, 0.0, 0.0, -0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.25 };

  creal_T L[36];
  creal_T V[36];
  creal_T b_max;
  creal_T v1[6];
  real_T norm_VV2;
  creal_T b_S22_i[24];
  creal_T v2[4];
  creal_T v[100];
  creal_T Q[9];
  creal_T L_Q[9];
  creal_T L_Q_i[9];
  creal_T b_L_Q[9];
  creal_T b_v[3];
  creal_T QB[3];
  creal_T b_Q[9];
  real_T norm_VV3;
  creal_T b_L_Q_i[9];
  real_T ar;
  real_T ai;

  /*  magnetometer calibration */
  /*   */
  /*  Jungtaek Kim */
  /*  [email protected] */
  /*   */
  memset(&D[0], 0, 20000U * sizeof(real_T));
  for (i = 0; i < 2000; i++) {
    D[i] = data[i] * data[i];
    D[2000 + i] = data[2000 + i] * data[2000 + i];
    D[4000 + i] = data[4000 + i] * data[4000 + i];
    D[6000 + i] = 2.0 * data[2000 + i] * data[4000 + i];
    D[8000 + i] = 2.0 * data[i] * data[4000 + i];
    D[10000 + i] = 2.0 * data[i] * data[2000 + i];
    D[12000 + i] = 2.0 * data[i];
    D[14000 + i] = 2.0 * data[2000 + i];
    D[16000 + i] = 2.0 * data[4000 + i];
    D[18000 + i] = 1.0;
  }

  for (i0 = 0; i0 < 10; i0++) {
    for (i = 0; i < 10; i++) {
      S[i0 + 10 * i] = 0.0;
      for (ind = 0; ind < 2000; ind++) {
        S[i0 + 10 * i] += D[ind + 2000 * i0] * D[ind + 2000 * i];
      }
    }
  }

  for (i0 = 0; i0 < 4; i0++) {
    for (i = 0; i < 4; i++) {
      L_S22[i + (i0 << 2)] = S[(i + 10 * (6 + i0)) + 6];
    }
  }

  chol(L_S22);
  inv(L_S22, L_S22_i);
  for (i0 = 0; i0 < 4; i0++) {
    for (i = 0; i < 4; i++) {
      b_L_S22[i + (i0 << 2)] = L_S22[i0 + (i << 2)];
    }
  }

  inv(b_L_S22, L_S22);
  for (i0 = 0; i0 < 4; i0++) {
    for (i = 0; i < 4; i++) {
      S22_i[i0 + (i << 2)] = 0.0;
      for (ind = 0; ind < 4; ind++) {
        S22_i[i0 + (i << 2)] += L_S22[i0 + (ind << 2)] * L_S22_i[ind + (i << 2)];
      }
    }
  }

  /*      S22_i = pinv(S22);%I4\S22; */
  for (i0 = 0; i0 < 6; i0++) {
    for (i = 0; i < 4; i++) {
      b_S[i0 + 6 * i] = 0.0;
      for (ind = 0; ind < 4; ind++) {
        b_S[i0 + 6 * i] += S[i0 + 10 * (6 + ind)] * S22_i[ind + (i << 2)];
      }
    }
  }

  for (i0 = 0; i0 < 6; i0++) {
    for (i = 0; i < 6; i++) {
      norm_v1 = 0.0;
      for (ind = 0; ind < 4; ind++) {
        norm_v1 += b_S[i0 + 6 * ind] * S[(ind + 10 * i) + 6];
      }

      c_S[i0 + 6 * i] = S[i0 + 10 * i] - norm_v1;
    }
  }

  for (i0 = 0; i0 < 6; i0++) {
    for (i = 0; i < 6; i++) {
      A[i0 + 6 * i] = 0.0;
      for (ind = 0; ind < 6; ind++) {
        A[i0 + 6 * i] += b_A[i0 + 6 * ind] * c_S[ind + 6 * i];
      }
    }
  }

  eig(A, V, L);
  b_max = L[0];
  ind = 0;
  for (i = 0; i < 6; i++) {
    if (b_max.re < L[i + 6 * i].re) {
      b_max = L[i + 6 * i];
      ind = i;
    }
  }

  memcpy(&v1[0], &V[6 * ind], 6U * sizeof(creal_T));
  if (V[6 * ind].re < 0.0) {
    for (i0 = 0; i0 < 6; i0++) {
      v1[i0].re = -V[i0 + 6 * ind].re;
      v1[i0].im = -V[i0 + 6 * ind].im;
    }
  }

  norm_v1 = norm(v1);
  for (i0 = 0; i0 < 6; i0++) {
    norm_VV2 = v1[i0].im;
    if (v1[i0].im == 0.0) {
      v1[i0].re /= norm_v1;
      v1[i0].im = 0.0;
    } else if (v1[i0].re == 0.0) {
      v1[i0].re = 0.0;
      v1[i0].im = norm_VV2 / norm_v1;
    } else {
      v1[i0].re /= norm_v1;
      v1[i0].im = norm_VV2 / norm_v1;
    }
  }

  for (i0 = 0; i0 < 4; i0++) {
    for (i = 0; i < 6; i++) {
      norm_v1 = 0.0;
      for (ind = 0; ind < 4; ind++) {
        norm_v1 += S22_i[i0 + (ind << 2)] * S[(ind + 10 * i) + 6];
      }

      b_S22_i[i0 + (i << 2)].re = norm_v1;
      b_S22_i[i0 + (i << 2)].im = 0.0;
    }
  }

  for (i0 = 0; i0 < 4; i0++) {
    v2[i0].re = 0.0;
    v2[i0].im = 0.0;
    for (i = 0; i < 6; i++) {
      v2[i0].re += b_S22_i[i0 + (i << 2)].re * v1[i].re - 0.0 * v1[i].im;
      v2[i0].im += b_S22_i[i0 + (i << 2)].re * v1[i].im + 0.0 * v1[i].re;
    }
  }

  for (i0 = 0; i0 < 100; i0++) {
    v[i0].re = 0.0;
    v[i0].im = 0.0;
  }

  v[0] = v1[0];
  v[1] = v1[1];
  v[2] = v1[2];
  v[3] = v1[3];
  v[4] = v1[4];
  v[5] = v1[5];
  v[6].re = -v2[0].re;
  v[6].im = -v2[0].im;
  v[7].re = -v2[1].re;
  v[7].im = -v2[1].im;
  v[8].re = -v2[2].re;
  v[8].im = -v2[2].im;
  v[9].re = -v2[3].re;
  v[9].im = -v2[3].im;
  Q[0] = v[0];
  Q[3] = v[5];
  Q[6] = v[4];
  Q[1] = v[5];
  Q[4] = v[1];
  Q[7] = v[3];
  Q[2] = v[4];
  Q[5] = v[3];
  Q[8] = v[2];
  memcpy(&L_Q[0], &Q[0], 9U * sizeof(creal_T));
  b_chol(L_Q);
  b_inv(L_Q, L_Q_i);
  for (i0 = 0; i0 < 3; i0++) {
    for (i = 0; i < 3; i++) {
      b_L_Q[i + 3 * i0].re = L_Q[i0 + 3 * i].re;
      b_L_Q[i + 3 * i0].im = -L_Q[i0 + 3 * i].im;
    }
  }

  b_inv(b_L_Q, L_Q);

  /*      Q_i = pinv(Q);%I3\Q; */
  for (i0 = 0; i0 < 3; i0++) {
    for (i = 0; i < 3; i++) {
      b_L_Q[i0 + 3 * i].re = 0.0;
      b_L_Q[i0 + 3 * i].im = 0.0;
      for (ind = 0; ind < 3; ind++) {
        b_L_Q[i0 + 3 * i].re += L_Q[i0 + 3 * ind].re * L_Q_i[ind + 3 * i].re -
          L_Q[i0 + 3 * ind].im * L_Q_i[ind + 3 * i].im;
        b_L_Q[i0 + 3 * i].im += L_Q[i0 + 3 * ind].re * L_Q_i[ind + 3 * i].im +
          L_Q[i0 + 3 * ind].im * L_Q_i[ind + 3 * i].re;
      }
    }
  }

  b_v[0] = v[6];
  b_v[1] = v[7];
  b_v[2] = v[8];
  for (i0 = 0; i0 < 3; i0++) {
    norm_v1 = 0.0;
    norm_VV2 = 0.0;
    for (i = 0; i < 3; i++) {
      norm_v1 += b_L_Q[i0 + 3 * i].re * b_v[i].re - b_L_Q[i0 + 3 * i].im * b_v[i]
        .im;
      norm_VV2 += b_L_Q[i0 + 3 * i].re * b_v[i].im + b_L_Q[i0 + 3 * i].im *
        b_v[i].re;
    }

    B[i0].re = -norm_v1;
    B[i0].im = -norm_VV2;
  }

  for (i0 = 0; i0 < 3; i0++) {
    QB[i0].re = 0.0;
    QB[i0].im = 0.0;
    for (i = 0; i < 3; i++) {
      QB[i0].re += Q[i0 + 3 * i].re * B[i].re - Q[i0 + 3 * i].im * B[i].im;
      QB[i0].im += Q[i0 + 3 * i].re * B[i].im + Q[i0 + 3 * i].im * B[i].re;
    }

    b_v[i0].re = B[i0].re;
    b_v[i0].im = -B[i0].im;
  }

  b_max.re = 0.0;
  b_max.im = 0.0;
  for (i = 0; i < 3; i++) {
    b_max.re += b_v[i].re * QB[i].re - b_v[i].im * QB[i].im;
    b_max.im += b_v[i].re * QB[i].im + b_v[i].im * QB[i].re;
  }

  b_max.re -= v[9].re;
  b_max.im -= v[9].im;
  b_sqrt(&b_max);
  memcpy(&b_Q[0], &Q[0], 9U * sizeof(creal_T));
  b_eig(b_Q, L_Q, Q);
  norm_v1 = b_norm(*(creal_T (*)[3])&L_Q[0]);
  norm_VV2 = b_norm(*(creal_T (*)[3])&L_Q[3]);
  norm_VV3 = b_norm(*(creal_T (*)[3])&L_Q[6]);
  for (i0 = 0; i0 < 3; i0++) {
    if (L_Q[i0].im == 0.0) {
      L_Q_i[i0].re = L_Q[i0].re / norm_v1;
      L_Q_i[i0].im = 0.0;
    } else if (L_Q[i0].re == 0.0) {
      L_Q_i[i0].re = 0.0;
      L_Q_i[i0].im = L_Q[i0].im / norm_v1;
    } else {
      L_Q_i[i0].re = L_Q[i0].re / norm_v1;
      L_Q_i[i0].im = L_Q[i0].im / norm_v1;
    }

    if (L_Q[3 + i0].im == 0.0) {
      L_Q_i[3 + i0].re = L_Q[3 + i0].re / norm_VV2;
      L_Q_i[3 + i0].im = 0.0;
    } else if (L_Q[3 + i0].re == 0.0) {
      L_Q_i[3 + i0].re = 0.0;
      L_Q_i[3 + i0].im = L_Q[3 + i0].im / norm_VV2;
    } else {
      L_Q_i[3 + i0].re = L_Q[3 + i0].re / norm_VV2;
      L_Q_i[3 + i0].im = L_Q[3 + i0].im / norm_VV2;
    }

    if (L_Q[6 + i0].im == 0.0) {
      L_Q_i[6 + i0].re = L_Q[6 + i0].re / norm_VV3;
      L_Q_i[6 + i0].im = 0.0;
    } else if (L_Q[6 + i0].re == 0.0) {
      L_Q_i[6 + i0].re = 0.0;
      L_Q_i[6 + i0].im = L_Q[6 + i0].im / norm_VV3;
    } else {
      L_Q_i[6 + i0].re = L_Q[6 + i0].re / norm_VV3;
      L_Q_i[6 + i0].im = L_Q[6 + i0].im / norm_VV3;
    }

    for (i = 0; i < 3; i++) {
      b_L_Q[i0 + 3 * i].re = 0.0;
      b_L_Q[i0 + 3 * i].im = 0.0;
      for (ind = 0; ind < 3; ind++) {
        b_L_Q[i0 + 3 * i].re += L_Q_i[i0 + 3 * ind].re * Q[ind + 3 * i].re -
          L_Q_i[i0 + 3 * ind].im * Q[ind + 3 * i].im;
        b_L_Q[i0 + 3 * i].im += L_Q_i[i0 + 3 * ind].re * Q[ind + 3 * i].im +
          L_Q_i[i0 + 3 * ind].im * Q[ind + 3 * i].re;
      }
    }
  }

  for (i0 = 0; i0 < 3; i0++) {
    for (i = 0; i < 3; i++) {
      b_L_Q_i[i0 + 3 * i].re = 0.0;
      b_L_Q_i[i0 + 3 * i].im = 0.0;
      for (ind = 0; ind < 3; ind++) {
        b_L_Q_i[i0 + 3 * i].re += b_L_Q[i0 + 3 * ind].re * L_Q_i[i + 3 * ind].re
          - b_L_Q[i0 + 3 * ind].im * -L_Q_i[i + 3 * ind].im;
        b_L_Q_i[i0 + 3 * i].im += b_L_Q[i0 + 3 * ind].re * -L_Q_i[i + 3 * ind].
          im + b_L_Q[i0 + 3 * ind].im * L_Q_i[i + 3 * ind].re;
      }
    }
  }

  for (i0 = 0; i0 < 3; i0++) {
    for (i = 0; i < 3; i++) {
      ar = b_L_Q_i[i + 3 * i0].re * 0.569;
      ai = b_L_Q_i[i + 3 * i0].im * 0.569;
      if (b_max.im == 0.0) {
        if (ai == 0.0) {
          A_i[i + 3 * i0].re = ar / b_max.re;
          A_i[i + 3 * i0].im = 0.0;
        } else if (ar == 0.0) {
          A_i[i + 3 * i0].re = 0.0;
          A_i[i + 3 * i0].im = ai / b_max.re;
        } else {
          A_i[i + 3 * i0].re = ar / b_max.re;
          A_i[i + 3 * i0].im = ai / b_max.re;
        }
      } else if (b_max.re == 0.0) {
        if (ar == 0.0) {
          A_i[i + 3 * i0].re = ai / b_max.im;
          A_i[i + 3 * i0].im = 0.0;
        } else if (ai == 0.0) {
          A_i[i + 3 * i0].re = 0.0;
          A_i[i + 3 * i0].im = -(ar / b_max.im);
        } else {
          A_i[i + 3 * i0].re = ai / b_max.im;
          A_i[i + 3 * i0].im = -(ar / b_max.im);
        }
      } else {
        norm_VV3 = fabs(b_max.re);
        norm_v1 = fabs(b_max.im);
        if (norm_VV3 > norm_v1) {
          norm_v1 = b_max.im / b_max.re;
          norm_VV2 = b_max.re + norm_v1 * b_max.im;
          A_i[i + 3 * i0].re = (ar + norm_v1 * ai) / norm_VV2;
          A_i[i + 3 * i0].im = (ai - norm_v1 * ar) / norm_VV2;
        } else if (norm_v1 == norm_VV3) {
          norm_v1 = b_max.re > 0.0 ? 0.5 : -0.5;
          norm_VV2 = b_max.im > 0.0 ? 0.5 : -0.5;
          A_i[i + 3 * i0].re = (ar * norm_v1 + ai * norm_VV2) / norm_VV3;
          A_i[i + 3 * i0].im = (ai * norm_v1 - ar * norm_VV2) / norm_VV3;
        } else {
          norm_v1 = b_max.re / b_max.im;
          norm_VV2 = b_max.im + norm_v1 * b_max.re;
          A_i[i + 3 * i0].re = (norm_v1 * ar + ai) / norm_VV2;
          A_i[i + 3 * i0].im = (norm_v1 * ai - ar) / norm_VV2;
        }
      }
    }
  }
}