// ------------------------------------------------------------------------
void transformPointToPatient(const ImageType::Pointer &reference,
		const SeriesTransform &series, const std::vector<int> &roiOffset,
		const ImageType::PointType &input, ImageType::PointType &output)
{
	// rotation is easy
	MatrixType rotTemp = reference->GetDirection().GetVnlMatrix();
	MatrixType rotation;
	rotation.set_size(3,3);
	for(unsigned int i = 0; i < 3; i++)
	{
		rotation(i,0) = rotTemp(i,1);
		rotation(i,1) = rotTemp(i,0);
		rotation(i,2) = rotTemp(i,2);
	}

	VectorType refOrig = reference->GetOrigin().GetVnlVector();
	VectorType offset  = vnl_inverse(rotation) * refOrig;

	vnl_vector<double> vnl_output;

	// translate the point
	vnl_output = vnl_inverse(rotation) * input.GetVnlVector();
	vnl_output = vnl_output - offset;
	vnl_output /= reference->GetSpacing()[0];
	for(unsigned int i = 0; i < 3; i++)
	{
		vnl_output[i] -= roiOffset[i];
		vnl_output[i] -= series.translation[i];
	}

	output[0] = vnl_output[0];
	output[1] = vnl_output[1];
	output[2] = vnl_output[2];
}
mitk::DiffusionImageCorrectionFilter::TransformMatrixType
mitk::DiffusionImageCorrectionFilter
::GetRotationComponent(const TransformMatrixType &A)
{
  TransformMatrixType B;

  B = A * A.transpose();

  // get the eigenvalues and eigenvectors
  typedef double MType;
  vnl_vector< MType > eigvals;
  vnl_matrix< MType > eigvecs;
  vnl_symmetric_eigensystem_compute< MType > ( B, eigvecs, eigvals );

  vnl_matrix_fixed< MType, 3, 3 > eigvecs_fixed;
  eigvecs_fixed.set_columns(0, eigvecs );

  TransformMatrixType C;
  C.set_identity();

  vnl_vector_fixed< MType, 3 > eigvals_sqrt;
  for(unsigned int i=0; i<3; i++)
  {
    C(i,i) = std::sqrt( eigvals[i] );
  }

  TransformMatrixType S = vnl_inverse( eigvecs_fixed * C * vnl_inverse( eigvecs_fixed )) * A;

  return S;
}
Beispiel #3
0
	std::vector<vgl_point_2d<double> > ScannerImage(const vector<vgl_point_3d<double> > &Points, const vgl_point_3d<double> &ScannerLocation)
	{
		std::vector<vgl_point_3d<double> > Intersections(Points.size());
		
		//get all directions
		std::vector<vgl_vector_3d<double> > Directions(Points.size());
		for(unsigned int i = 0; i < Points.size(); i++)
		{
			Directions[i] = Points[i] - ScannerLocation;
		}
		
		vgl_vector_3d<double> ProjectionPlaneNormal = VXLHelpers::normalize(AverageDirection(Directions));
			
		vgl_point_3d<double> ProjectionPlanePoint = ScannerLocation + ProjectionPlaneNormal;
		
		//find the 3d coords of the points projected on the plane
		for(unsigned int i = 0; i < Points.size(); i++)
		{
			vgl_line_3d_2_points<double> Line(ScannerLocation, Points[i]);
	
			//vgl_plane_3d<double> Plane(ScannerForward, ScannerLocation + ScannerForward);
			vgl_plane_3d<double> Plane(ProjectionPlaneNormal, ProjectionPlanePoint);
	
			vgl_point_3d<double> Intersection = vgl_intersection(Line, Plane);
	
			Intersections[i] = Intersection;
		}
		
		vgl_vector_3d<double> b = VXLHelpers::normalize(Intersections[0] - ProjectionPlanePoint);
		
		//WriteAxisFile(vgl_point_3d<double> (0,0,0), cross_product(ProjectionPlaneNormal, b), b, ProjectionPlaneNormal, "BeforeAxis.vtp");
		
		vnl_double_3x3 R = FindAligningRotation(cross_product(ProjectionPlaneNormal, b), b, ProjectionPlaneNormal);
		
		/*
		//write out axes after transformation
		vnl_double_3 a1 = vnl_inverse(R) * vgl_vector_to_vnl_vector(cross_product(ProjectionPlaneNormal, b));
		vnl_double_3 a2 = vnl_inverse(R) * vgl_vector_to_vnl_vector(b);
		vnl_double_3 a3 = vnl_inverse(R) * vgl_vector_to_vnl_vector(ProjectionPlaneNormal);
		WriteAxisFile(vgl_point_3d<double> (0,0,0), vnl_vector_to_vgl_vector(a1), vnl_vector_to_vgl_vector(a2), vnl_vector_to_vgl_vector(a3), "AfterAxis.vtp");
		*/
		
		std::vector<vgl_point_2d<double> > Points2d(Points.size());
		
		for(unsigned int i = 0; i < Points.size(); i++)
		{
			vnl_double_3 temp = vnl_inverse(R) * VXLHelpers::vgl_point_to_vnl_vector(Intersections[i]);
			Points2d[i] = vgl_point_2d<double> (temp(0), temp(1));
		}
		
		return Points2d;
	}
Beispiel #4
0
bool arlCore::ICP::solve( void )
{
    const double Tau = 10e-8;
    const bool Verbose = false;
    m_startError = -1.0;
    m_nbIterations = 0;
#ifdef ANN
    if(!m_point2PointMode) return false;
    m_endError = FLT_MAX/2.0;
    double previousRMS = FLT_MAX;
    if(!m_initialization)
        if(!initialization())
            return false;
//  http://csdl2.computer.org/persagen/DLAbsToc.jsp?resourcePath=/dl/trans/tp/&toc=comp/trans/tp/1992/02/i2toc.xml&DOI=10.1109/34.121791
//  http://ieeexplore.ieee.org/iel1/34/3469/00121791.pdf?tp=&arnumber=121791&isnumber=3469
    unsigned int i, j, k;
    vnl_quaternion<double> qr(0,0,0,1);
    vnl_matrix_fixed<double,3,3> Id3, Ricp, Spy;
    Id3.set_identity();
    vnl_matrix_fixed<double,4,4> QSpy;
    vnl_vector_fixed<double,3> Delta;
    while( fabs(m_endError-previousRMS)>Tau && m_nbIterations<m_maxIterations ) //&& RMS>RMSMax )
    {
        ++m_nbIterations;
        std::cout<<m_nbIterations<<" ";
        previousRMS = m_endError;
        computeError(); // step 1
        if(m_startError<0) m_startError = m_endError;
        // step 2
        // Calculate the cross-variance matrix between m_Pk and Yo
        // Cov( P0, m_Yk ) = 1/n*somme(P0*m_Yk') - ComP0*Comm_Yk'
        Spy.set_identity();
        for( i=0 ; i<m_cloudSize ; ++i )
          for( j=0 ; j<3 ; ++j )
            for( k=0 ; k<3 ; ++k )
            Spy[j][k]+= m_Pi[i][j]*m_Yk[i][k];
        for( i=0 ; i<3 ; ++i )
          for( j=0 ; j<3 ; ++j )
            Spy[i][j] = Spy[i][j]/(double)m_cloudSize - m_cloudGravity[i]*m_modelGravity[j];
        // Delta = [A23 A31 A12] with Amn = Spy[m][n]-Spy[n][m]
        Delta[0] = Spy[1][2]-Spy[2][1];
        Delta[1] = Spy[2][0]-Spy[0][2];
        Delta[2] = Spy[0][1]-Spy[1][0];
        // calculate the symmetric 4x4 matrix needed to determine
        // the max eigenvalue
        QSpy[0][0] = vnl_trace( Spy );
        for( i=1 ; i<4 ; ++i )
        {
            QSpy[i][0] = Delta[i-1];
            for( j = 1; j < 4; ++j )
            {
                QSpy[0][j] = Delta[j-1];
                QSpy[i][j] = Spy[i-1][j-1]+Spy[j-1][i-1] - QSpy[0][0]*Id3[i-1][j-1];
            }
        }
        vnl_symmetric_eigensystem<double> eigQSpy(QSpy);
        // Optimal rotation matrix calculate from the quaternion
        // vector qr=[q0 q1 q2 q3] obtained by obtained the max eigen value
        // http://paine.wiau.man.ac.uk/pub/doc_vxl/core/vnl/html/vnl__symmetric__eigensystem_8h.html
        qr.update(eigQSpy.get_eigenvector(3));
        qr = vnl_quaternion<double>(qr[1], qr[2], qr[3], qr[0]);
        Ricp = vnl_transpose(qr.rotation_matrix_transpose()).asMatrix();
        // Optimal translation vector Ticp T = ComY - Ricp.ComP
        vnl_vector<double> TraTemp = m_modelGravity - Ricp*m_cloudGravity;
        // step 3 : Application of the transformation
        for( i=0 ; i<m_cloudSize ; ++i )
          for( j=0 ; j<3 ; ++j )
            m_Pk[i][j] = Ricp[j][0]*m_Pi[i][0] + Ricp[j][1]*m_Pi[i][1] + Ricp[j][2]*m_Pi[i][2] + TraTemp[j];
    }
    // Give the transformation from model ==> cloud
    // First we add the first transformation to Rend
    vnl_matrix_fixed<double,3,3> Rend = Ricp.transpose();
    vnl_vector_fixed<double,3> Tend;
    for( i=0 ; i<3 ; ++i )
        Tend[i] = m_cloudGravity[i]-(Rend[i][0]*m_modelGravity[0]+Rend[i][1]*m_modelGravity[1]+Rend[i][2]*m_modelGravity[2]);
    // Initial matrix [m_rotInit ; m_traInit ; 0 0 0 1]
    // Found matrix [Rend ; Tend ; 0 0 0 1]
    m_solution.vnl_matrix_fixed<double,4,4>::update(vnl_inverse(m_rotInit)*Rend);
    m_solution.set_column(3,vnl_inverse(m_rotInit)*(Tend-m_traInit));
    m_solution[3][3] = 1.0;
    m_solution.setRMS( m_endError );
    if(Verbose)
    {
        std::cout<<"ICP registration\n";
        std::cout<<"First RMS="<<m_startError<<"\nLast RMS="<<m_endError<<"\nIterations="<<m_nbIterations<<"\nPtsModel="<<m_modelSize<<"\nPtsCloud="<<m_cloudSize<<"\n";
        std::cout<<"Tau ="<<fabs(m_endError-previousRMS)<<"\n";
        std::cout<<"Matrix ="<<m_solution<<"\n";
    }
    return true;
#else // ANN
    m_endError = -1.0;
    return false;
#endif // ANN
}
Beispiel #5
0
bool mitk::WiiMoteInteractor::DynamicRotationAndTranslation(Geometry3D* geometry)
{
   // computation of the delta transformation
  if(m_SurfaceInteractionMode == 1)
  {
    // necessary because the wiimote has
    // a different orientation when loaded
    // as an object file
    ScalarType temp = m_yAngle;
    m_yAngle = m_zAngle;
    m_zAngle = temp;
  }

  //vnl_quaternion<double> Rx(m_OrientationX
  //  ,m_OrientationY
  //  ,m_OrientationZ
  //  , m_xAngle);

  //vnl_quaternion<double> Ry(Rx.axis()[0]
  //  , Rx.axis()[1]
  //  , Rx.axis()[2]
  //  , m_yAngle);

  //vnl_quaternion<double> Rz(Ry.axis()[0]
  //  , Ry.axis()[1]
  //  , Ry.axis()[2]
  //  , m_zAngle);


  vnl_quaternion<double> q(
    vtkMath::RadiansFromDegrees( m_xAngle ),
    vtkMath::RadiansFromDegrees( m_yAngle ),
    vtkMath::RadiansFromDegrees( m_zAngle ) );

  //q = Rz * Ry * Rx;
  //q.normalize();

  vnl_matrix_fixed<double, 4, 4> deltaTransformMat = q.rotation_matrix_transpose_4();

  // fill translation column
  deltaTransformMat(0,3) = m_xVelocity;
  deltaTransformMat(1,3) = m_yVelocity;
  deltaTransformMat(2,3) = m_zVelocity;

  // invert matrix to apply
  // correct order for the transformation
  deltaTransformMat = vnl_inverse(deltaTransformMat);

  vtkMatrix4x4* deltaTransform = vtkMatrix4x4::New();

  // copy into matrix
  for(size_t i=0; i<4; ++i)
    for(size_t j=0; j<4; ++j)
      deltaTransform->SetElement(i,j, deltaTransformMat(i,j));

  vtkMatrix4x4* objectTransform = vtkMatrix4x4::New();

  if(m_SurfaceInteractionMode == 2)
  {
    // additional computation for transformation
    // relative to the camera view

    // get renderer
    const RenderingManager::RenderWindowVector& renderWindows
      = RenderingManager::GetInstance()->GetAllRegisteredRenderWindows();
    for ( RenderingManager::RenderWindowVector::const_iterator iter = renderWindows.begin();
      iter != renderWindows.end();
      ++iter )
    {
      if ( mitk::BaseRenderer::GetInstance((*iter))->GetMapperID() == BaseRenderer::Standard3D )
      {
        m_BaseRenderer = mitk::BaseRenderer::GetInstance((*iter));
      }
    }

    vtkCamera* camera
      = m_BaseRenderer->GetVtkRenderer()->GetActiveCamera();

    //vtkMatrix4x4* cameraMat = vtkMatrix4x4::New();
    vnl_matrix_fixed<double, 4, 4> cameraMat;
    vnl_matrix_fixed<double, 4, 4> objectMat;

    // copy object matrix
    for(size_t i=0; i<4; ++i)
      for(size_t j=0; j<4; ++j)
        objectMat.put(i,j, geometry->GetVtkTransform()->GetMatrix()->GetElement(i,j));

    cameraMat = this->ComputeCurrentCameraPosition(camera);

    vnl_matrix_fixed<double, 4, 4> newObjectMat;
    vnl_matrix_fixed<double, 4, 4> objectToCameraMat;

    objectToCameraMat = vnl_inverse(cameraMat) * objectMat;

    newObjectMat = vnl_inverse(objectToCameraMat)
      * deltaTransformMat
      * objectToCameraMat
      * vnl_inverse(objectMat);

    newObjectMat = vnl_inverse(newObjectMat);
    newObjectMat.put(0,3,objectMat(0,3)+deltaTransformMat(0,3));
    newObjectMat.put(1,3,objectMat(1,3)+deltaTransformMat(1,3));
    newObjectMat.put(2,3,objectMat(2,3)+deltaTransformMat(2,3));


    // copy result
    for(size_t i=0; i<4; ++i)
      for(size_t j=0; j<4; ++j)
        objectTransform->SetElement(i,j, newObjectMat(i,j));
  }

  //copy m_vtkMatrix to m_VtkIndexToWorldTransform
  geometry->TransferItkToVtkTransform();

  vtkTransform* vtkTransform = vtkTransform::New();

  if(m_SurfaceInteractionMode == 1)
  {
    //m_VtkIndexToWorldTransform as vtkLinearTransform*
    vtkTransform->SetMatrix( geometry->GetVtkTransform()->GetMatrix() );

    vtkTransform->Concatenate( deltaTransform );

    geometry->SetIndexToWorldTransformByVtkMatrix( vtkTransform->GetMatrix() );

  }
  else
  {
    geometry->SetIndexToWorldTransformByVtkMatrix( objectTransform );
  }

  geometry->Modified();
  m_DataNode->Modified();

  vtkTransform->Delete();
  objectTransform->Delete();

  deltaTransform->Delete();

  //update rendering
  mitk::RenderingManager::GetInstance()->RequestUpdateAll();

  return true;
}
void mitk::RegistrationWrapper::ApplyTransformationToImage(mitk::Image::Pointer img, const mitk::RegistrationWrapper::RidgidTransformType &transformation,double* offset, mitk::Image* resampleReference,  bool binary)
{
  typedef mitk::DiffusionImage<short> DiffusionImageType;

  if (dynamic_cast<DiffusionImageType*> (img.GetPointer()) == NULL)
  {

    ItkImageType::Pointer itkImage = ItkImageType::New();


    MITK_ERROR << "imgCopy  0 " <<  "/" << img->GetReferenceCount();
    MITK_ERROR << "pixel type  " << img->GetPixelType().GetComponentTypeAsString();

    CastToItkImage(img, itkImage);



    typedef itk::Euler3DTransform< double > RigidTransformType;
    RigidTransformType::Pointer rtransform = RigidTransformType::New();
    RigidTransformType::ParametersType parameters(RigidTransformType::ParametersDimension);

    for (int i = 0; i<6;++i)
      parameters[i] = transformation[i];

    rtransform->SetParameters( parameters );

    mitk::Point3D origin = itkImage->GetOrigin();
    origin[0]-=offset[0];
    origin[1]-=offset[1];
    origin[2]-=offset[2];

    mitk::Point3D newOrigin = rtransform->GetInverseTransform()->TransformPoint(origin);

    itk::Matrix<double,3,3> dir = itkImage->GetDirection();
    itk::Matrix<double,3,3> transM  ( vnl_inverse(rtransform->GetMatrix().GetVnlMatrix()));
    itk::Matrix<double,3,3> newDirection = transM * dir;

    itkImage->SetOrigin(newOrigin);
    itkImage->SetDirection(newDirection);

    // Perform Resampling if reference image is provided
    if (resampleReference != NULL)
    {
      typedef itk::ResampleImageFilter<ItkImageType, ItkImageType>  ResampleFilterType;

      ItkImageType::Pointer itkReference = ItkImageType::New();
      CastToItkImage(resampleReference,itkReference);

      typedef itk::WindowedSincInterpolateImageFunction< ItkImageType, 3> WindowedSincInterpolatorType;
      WindowedSincInterpolatorType::Pointer sinc_interpolator = WindowedSincInterpolatorType::New();

      typedef itk::NearestNeighborInterpolateImageFunction< ItkImageType, double > NearestNeighborInterpolatorType;
      NearestNeighborInterpolatorType::Pointer nn_interpolator = NearestNeighborInterpolatorType::New();


      ResampleFilterType::Pointer resampler = ResampleFilterType::New();
      resampler->SetInput(itkImage);
      resampler->SetReferenceImage( itkReference );
      resampler->UseReferenceImageOn();
      if (binary)
        resampler->SetInterpolator(nn_interpolator);
      else
        resampler->SetInterpolator(sinc_interpolator);

      resampler->Update();

      GrabItkImageMemory(resampler->GetOutput(), img);
    }
    else
    {
      // !! CastToItk behaves very differently depending on the original data type
      // if the target type is the same as the original, only a pointer to the data is set
      // and an additional GrabItkImageMemory will cause a segfault when the image is destroyed
      // GrabItkImageMemory - is not necessary in this case since we worked on the original data
      // See Bug 17538.
      if (img->GetPixelType().GetComponentTypeAsString() != "double")
        img = GrabItkImageMemory(itkImage);
    }
  }
  else
  {
    DiffusionImageType::Pointer diffImages = dynamic_cast<DiffusionImageType*>(img.GetPointer());

    typedef itk::Euler3DTransform< double > RigidTransformType;
    RigidTransformType::Pointer rtransform = RigidTransformType::New();
    RigidTransformType::ParametersType parameters(RigidTransformType::ParametersDimension);

    for (int i = 0; i<6;++i)
    {
      parameters[i] = transformation[i];
    }

    rtransform->SetParameters( parameters );

    mitk::Point3D b0origin = diffImages->GetVectorImage()->GetOrigin();
    b0origin[0]-=offset[0];
    b0origin[1]-=offset[1];
    b0origin[2]-=offset[2];

    mitk::Point3D newOrigin = rtransform->GetInverseTransform()->TransformPoint(b0origin);

    itk::Matrix<double,3,3> dir = diffImages->GetVectorImage()->GetDirection();
    itk::Matrix<double,3,3> transM  ( vnl_inverse(rtransform->GetMatrix().GetVnlMatrix()));
    itk::Matrix<double,3,3> newDirection = transM * dir;

    diffImages->GetVectorImage()->SetOrigin(newOrigin);
    diffImages->GetVectorImage()->SetDirection(newDirection);
    diffImages->Modified();

    mitk::DiffusionImageCorrectionFilter<short>::Pointer correctionFilter = mitk::DiffusionImageCorrectionFilter<short>::New();

    // For Diff. Images: Need to rotate the gradients (works in-place)
    correctionFilter->SetImage(diffImages);
    correctionFilter->CorrectDirections(transM.GetVnlMatrix());
    img = diffImages;
  }
}
SEXP invariantSimilarityHelper(
  typename itk::Image< float , ImageDimension >::Pointer image1,
  typename itk::Image< float , ImageDimension >::Pointer image2,
  SEXP r_thetas, SEXP r_lsits, SEXP r_WM, SEXP r_scale,
  SEXP r_doreflection, SEXP r_txfn  )
{
  unsigned int mibins = 20;
  unsigned int localSearchIterations =
    Rcpp::as< unsigned int >( r_lsits ) ;
  std::string whichMetric = Rcpp::as< std::string >( r_WM );
  std::string txfn = Rcpp::as< std::string >( r_txfn );
  bool useprincaxis = true;
  typedef typename itk::ImageMaskSpatialObject<ImageDimension>::ImageType
    maskimagetype;
  typename maskimagetype::Pointer mask = ITK_NULLPTR;
  Rcpp::NumericVector thetas( r_thetas );
  Rcpp::NumericVector vector_r( r_thetas ) ;
  Rcpp::IntegerVector dims( 1 );
  Rcpp::IntegerVector doReflection( r_doreflection );
  unsigned int vecsize = thetas.size();
  dims[0]=0;
  typedef float  PixelType;
  typedef double RealType;
  RealType bestscale = Rcpp::as< RealType >( r_scale ) ;
  typedef itk::Image< PixelType , ImageDimension > ImageType;
  if( image1.IsNotNull() & image2.IsNotNull() )
    {
    typedef typename itk::ImageMomentsCalculator<ImageType> ImageCalculatorType;
    typedef itk::AffineTransform<RealType, ImageDimension> AffineType0;
    typedef itk::AffineTransform<RealType, ImageDimension> AffineType;
    typedef typename ImageCalculatorType::MatrixType       MatrixType;
    typedef itk::Vector<float, ImageDimension>  VectorType;
    VectorType ccg1;
    VectorType cpm1;
    MatrixType cpa1;
    VectorType ccg2;
    VectorType cpm2;
    MatrixType cpa2;
    typename ImageCalculatorType::Pointer calculator1 =
      ImageCalculatorType::New();
    typename ImageCalculatorType::Pointer calculator2 =
      ImageCalculatorType::New();
    calculator1->SetImage(  image1 );
    calculator2->SetImage(  image2 );
    typename ImageCalculatorType::VectorType fixed_center;
    fixed_center.Fill(0);
    typename ImageCalculatorType::VectorType moving_center;
    moving_center.Fill(0);
    try
      {
      calculator1->Compute();
      fixed_center = calculator1->GetCenterOfGravity();
      ccg1 = calculator1->GetCenterOfGravity();
      cpm1 = calculator1->GetPrincipalMoments();
      cpa1 = calculator1->GetPrincipalAxes();
      try
        {
        calculator2->Compute();
        moving_center = calculator2->GetCenterOfGravity();
        ccg2 = calculator2->GetCenterOfGravity();
        cpm2 = calculator2->GetPrincipalMoments();
        cpa2 = calculator2->GetPrincipalAxes();
        }
      catch( ... )
        {
        fixed_center.Fill(0);
        }
      }
    catch( ... )
      {
      // Rcpp::Rcerr << " zero image1 error ";
      }
    if ( vnl_math_abs( bestscale - 1.0 ) < 1.e-6 )
      {
      RealType volelt1 = 1;
      RealType volelt2 = 1;
      for ( unsigned int d=0; d<ImageDimension; d++)
        {
        volelt1 *= image1->GetSpacing()[d];
        volelt2 *= image2->GetSpacing()[d];
        }
      bestscale =
        ( calculator2->GetTotalMass() * volelt2 )/
        ( calculator1->GetTotalMass() * volelt1 );
      RealType powlev = 1.0 / static_cast<RealType>(ImageDimension);
      bestscale = vcl_pow( bestscale , powlev );
    }
    unsigned int eigind1 = 1;
    unsigned int eigind2 = 1;
    if( ImageDimension == 3 )
      {
      eigind1 = 2;
      }
    typedef vnl_vector<RealType> EVectorType;
    typedef vnl_matrix<RealType> EMatrixType;
    EVectorType evec1_2ndary = cpa1.GetVnlMatrix().get_row( eigind2 );
    EVectorType evec1_primary = cpa1.GetVnlMatrix().get_row( eigind1 );
    EVectorType evec2_2ndary  = cpa2.GetVnlMatrix().get_row( eigind2 );
    EVectorType evec2_primary = cpa2.GetVnlMatrix().get_row( eigind1 );
    /** Solve Wahba's problem http://en.wikipedia.org/wiki/Wahba%27s_problem */
    EMatrixType B = outer_product( evec2_primary, evec1_primary );
    if( ImageDimension == 3 )
      {
      B = outer_product( evec2_2ndary, evec1_2ndary )
        + outer_product( evec2_primary, evec1_primary );
      }
    vnl_svd<RealType>    wahba( B );
    vnl_matrix<RealType> A_solution = wahba.V() * wahba.U().transpose();
    A_solution = vnl_inverse( A_solution );
    RealType det = vnl_determinant( A_solution  );
    if( ( det < 0 ) )
      {
      vnl_matrix<RealType> id( A_solution );
      id.set_identity();
      for( unsigned int i = 0; i < ImageDimension; i++ )
        {
        if( A_solution( i, i ) < 0 )
          {
          id( i, i ) = -1.0;
          }
        }
      A_solution =  A_solution * id.transpose();
      }
    if ( doReflection[0] == 1 ||  doReflection[0] == 3 )
      {
        vnl_matrix<RealType> id( A_solution );
        id.set_identity();
        id = id - 2.0 * outer_product( evec2_primary , evec2_primary  );
        A_solution = A_solution * id;
      }
    if ( doReflection[0] > 1 )
      {
        vnl_matrix<RealType> id( A_solution );
        id.set_identity();
        id = id - 2.0 * outer_product( evec1_primary , evec1_primary  );
        A_solution = A_solution * id;
      }
    typename AffineType::Pointer affine1 = AffineType::New();
    typename AffineType::OffsetType trans = affine1->GetOffset();
    itk::Point<RealType, ImageDimension> trans2;
    for( unsigned int i = 0; i < ImageDimension; i++ )
      {
      trans[i] = moving_center[i] - fixed_center[i];
      trans2[i] =  fixed_center[i] * ( 1 );
      }
    affine1->SetIdentity();
    affine1->SetOffset( trans );
    if( useprincaxis )
      {
      affine1->SetMatrix( A_solution );
      }
    affine1->SetCenter( trans2 );
    if( ImageDimension > 3  )
      {
      return EXIT_SUCCESS;
      }
    vnl_vector<RealType> evec_tert;
    if( ImageDimension == 3 )
      { // try to rotate around tertiary and secondary axis
      evec_tert = vnl_cross_3d( evec1_primary, evec1_2ndary );
      }
    if( ImageDimension == 2 )
      { // try to rotate around tertiary and secondary axis
      evec_tert = evec1_2ndary;
      evec1_2ndary = evec1_primary;
      }
    itk::Vector<RealType, ImageDimension> axis2;
    itk::Vector<RealType, ImageDimension> axis1;
    for( unsigned int d = 0; d < ImageDimension; d++ )
      {
      axis1[d] = evec_tert[d];
      axis2[d] = evec1_2ndary[d];
      }
    typename AffineType::Pointer simmer = AffineType::New();
    simmer->SetIdentity();
    simmer->SetCenter( trans2 );
    simmer->SetOffset( trans );
    typename AffineType0::Pointer affinesearch = AffineType0::New();
    affinesearch->SetIdentity();
    affinesearch->SetCenter( trans2 );
    typedef  itk::MultiStartOptimizerv4         OptimizerType;
    typename OptimizerType::MetricValuesListType metricvalues;
    typename OptimizerType::Pointer  mstartOptimizer = OptimizerType::New();
    typedef itk::CorrelationImageToImageMetricv4
      <ImageType, ImageType, ImageType> GCMetricType;
    typedef itk::MattesMutualInformationImageToImageMetricv4
      <ImageType, ImageType, ImageType> MetricType;
    typename MetricType::ParametersType newparams(  affine1->GetParameters() );
    typename GCMetricType::Pointer gcmetric = GCMetricType::New();
    gcmetric->SetFixedImage( image1 );
    gcmetric->SetVirtualDomainFromImage( image1 );
    gcmetric->SetMovingImage( image2 );
    gcmetric->SetMovingTransform( simmer );
    gcmetric->SetParameters( newparams );
    typename MetricType::Pointer mimetric = MetricType::New();
    mimetric->SetNumberOfHistogramBins( mibins );
    mimetric->SetFixedImage( image1 );
    mimetric->SetMovingImage( image2 );
    mimetric->SetMovingTransform( simmer );
    mimetric->SetParameters( newparams );
    if( mask.IsNotNull() )
      {
      typename itk::ImageMaskSpatialObject<ImageDimension>::Pointer so =
        itk::ImageMaskSpatialObject<ImageDimension>::New();
      so->SetImage( const_cast<maskimagetype *>( mask.GetPointer() ) );
      mimetric->SetFixedImageMask( so );
      gcmetric->SetFixedImageMask( so );
      }
    typedef  itk::ConjugateGradientLineSearchOptimizerv4 LocalOptimizerType;
    typename LocalOptimizerType::Pointer  localoptimizer =
      LocalOptimizerType::New();
    RealType     localoptimizerlearningrate = 0.1;
    localoptimizer->SetLearningRate( localoptimizerlearningrate );
    localoptimizer->SetMaximumStepSizeInPhysicalUnits(
      localoptimizerlearningrate );
    localoptimizer->SetNumberOfIterations( localSearchIterations );
    localoptimizer->SetLowerLimit( 0 );
    localoptimizer->SetUpperLimit( 2 );
    localoptimizer->SetEpsilon( 0.1 );
    localoptimizer->SetMaximumLineSearchIterations( 50 );
    localoptimizer->SetDoEstimateLearningRateOnce( true );
    localoptimizer->SetMinimumConvergenceValue( 1.e-6 );
    localoptimizer->SetConvergenceWindowSize( 5 );
    if( true )
      {
      typedef typename MetricType::FixedSampledPointSetType PointSetType;
      typedef typename PointSetType::PointType              PointType;
      typename PointSetType::Pointer      pset(PointSetType::New());
      unsigned int ind=0;
      unsigned int ct=0;
      itk::ImageRegionIteratorWithIndex<ImageType> It(image1,
        image1->GetLargestPossibleRegion() );
      for( It.GoToBegin(); !It.IsAtEnd(); ++It )
        {
        // take every N^th point
        if ( ct % 10 == 0  )
          {
          PointType pt;
          image1->TransformIndexToPhysicalPoint( It.GetIndex(), pt);
          pset->SetPoint(ind, pt);
          ind++;
          }
          ct++;
        }
      mimetric->SetFixedSampledPointSet( pset );
      mimetric->SetUseFixedSampledPointSet( true );
      gcmetric->SetFixedSampledPointSet( pset );
      gcmetric->SetUseFixedSampledPointSet( true );
    }
    if ( whichMetric.compare("MI") == 0  ) {
      mimetric->Initialize();
      typedef itk::RegistrationParameterScalesFromPhysicalShift<MetricType>
      RegistrationParameterScalesFromPhysicalShiftType;
      typename RegistrationParameterScalesFromPhysicalShiftType::Pointer
      shiftScaleEstimator =
      RegistrationParameterScalesFromPhysicalShiftType::New();
      shiftScaleEstimator->SetMetric( mimetric );
      shiftScaleEstimator->SetTransformForward( true );
      typename RegistrationParameterScalesFromPhysicalShiftType::ScalesType
      movingScales( simmer->GetNumberOfParameters() );
      shiftScaleEstimator->EstimateScales( movingScales );
      mstartOptimizer->SetScales( movingScales );
      mstartOptimizer->SetMetric( mimetric );
      localoptimizer->SetMetric( mimetric );
      localoptimizer->SetScales( movingScales );
    }
    if ( whichMetric.compare("MI") != 0  ) {
      gcmetric->Initialize();
      typedef itk::RegistrationParameterScalesFromPhysicalShift<GCMetricType>
        RegistrationParameterScalesFromPhysicalShiftType;
      typename RegistrationParameterScalesFromPhysicalShiftType::Pointer
        shiftScaleEstimator =
        RegistrationParameterScalesFromPhysicalShiftType::New();
      shiftScaleEstimator->SetMetric( gcmetric );
      shiftScaleEstimator->SetTransformForward( true );
      typename RegistrationParameterScalesFromPhysicalShiftType::ScalesType
      movingScales( simmer->GetNumberOfParameters() );
      shiftScaleEstimator->EstimateScales( movingScales );
      mstartOptimizer->SetScales( movingScales );
      mstartOptimizer->SetMetric( gcmetric );
      localoptimizer->SetMetric( gcmetric );
      localoptimizer->SetScales( movingScales );
    }
    typename OptimizerType::ParametersListType parametersList =
      mstartOptimizer->GetParametersList();
    affinesearch->SetIdentity();
    affinesearch->SetCenter( trans2 );
    affinesearch->SetOffset( trans );
    for ( unsigned int i = 0; i < vecsize; i++ )
      {
      RealType ang1 = thetas[i];
      RealType ang2 = 0; // FIXME should be psi
      vector_r[ i ]=0;
      if( ImageDimension == 3 )
        {
        for ( unsigned int jj = 0; jj < vecsize; jj++ )
        {
        ang2=thetas[jj];
        affinesearch->SetIdentity();
        affinesearch->SetCenter( trans2 );
        affinesearch->SetOffset( trans );
        if( useprincaxis )
          {
          affinesearch->SetMatrix( A_solution );
          }
        affinesearch->Rotate3D(axis1, ang1, 1);
        affinesearch->Rotate3D(axis2, ang2, 1);
        affinesearch->Scale( bestscale );
        simmer->SetMatrix(  affinesearch->GetMatrix() );
        parametersList.push_back( simmer->GetParameters() );
        }
        }
      if( ImageDimension == 2 )
        {
        affinesearch->SetIdentity();
        affinesearch->SetCenter( trans2 );
        affinesearch->SetOffset( trans );
        if( useprincaxis )
          {
          affinesearch->SetMatrix( A_solution );
          }
        affinesearch->Rotate2D( ang1, 1);
        affinesearch->Scale( bestscale );
        simmer->SetMatrix(  affinesearch->GetMatrix() );
        typename AffineType::ParametersType pp =
          simmer->GetParameters();
        //pp[1]=ang1;
        //pp[0]=bestscale;
        parametersList.push_back( simmer->GetParameters() );
        }
      }
    mstartOptimizer->SetParametersList( parametersList );
    if( localSearchIterations > 0 )
      {
      mstartOptimizer->SetLocalOptimizer( localoptimizer );
      }
    mstartOptimizer->StartOptimization();
    typename AffineType::Pointer bestaffine = AffineType::New();
    bestaffine->SetCenter( trans2 );
    bestaffine->SetParameters( mstartOptimizer->GetBestParameters() );
    if ( txfn.length() > 3 )
      {
      typename AffineType::Pointer bestaffine = AffineType::New();
      bestaffine->SetCenter( trans2 );
      bestaffine->SetParameters( mstartOptimizer->GetBestParameters() );
      typedef itk::TransformFileWriter TransformWriterType;
      typename TransformWriterType::Pointer transformWriter =
        TransformWriterType::New();
      transformWriter->SetInput( bestaffine );
      transformWriter->SetFileName( txfn.c_str() );
      transformWriter->Update();
      }
    metricvalues = mstartOptimizer->GetMetricValuesList();
    for ( unsigned int k = 0; k < metricvalues.size(); k++ )
      {
      vector_r[k] = metricvalues[k];
      }
    dims[0] = vecsize;
    vector_r.attr( "dim" ) = vecsize;
    return Rcpp::wrap( vector_r );
    }
  else
    {
    return Rcpp::wrap( vector_r );
    }
}
Beispiel #8
0
 void Transform::Invert()
 {
   vnl_matrix_fixed<mitk::ScalarType, 4, 4> tmp(this->GetMatrix());
   this->SetMatrix( vnl_inverse( tmp ) );
 }
Beispiel #9
0
SEXP fsl2antsrTransform( SEXP r_matrix, SEXP r_reference, SEXP r_moving, short flag )
{
  typedef vnl_matrix_fixed<double, 4, 4>              MatrixType;
  typedef itk::Image<PixelType, Dimension>            ImageType;
  typedef itk::Matrix<double, 4,4>                    TransformMatrixType;

  typedef itk::AffineTransform<double, 3> AffTran;

  typedef typename ImageType::Pointer     ImagePointerType;

  typedef itk::Transform<double,3,3>                    TransformBaseType;
  typedef typename TransformBaseType::Pointer           TransformBasePointerType;

  ImagePointerType ref = Rcpp::as<ImagePointerType>( r_reference );
  ImagePointerType mov = Rcpp::as<ImagePointerType>( r_moving );

  MatrixType m_fsl, m_spcref, m_spcmov, m_swpref, m_swpmov, mat, m_ref, m_mov;

  Rcpp::NumericMatrix matrix(r_matrix);
  for ( unsigned int i=0; i<matrix.nrow(); i++)
    for ( unsigned int j=0; j<matrix.ncol(); j++)
      m_fsl(i,j) = matrix(i,j);

  // Set the ref/mov matrices
  m_ref = GetVoxelSpaceToRASPhysicalSpaceMatrix<ImageType, TransformMatrixType>( ref ).GetVnlMatrix();
  m_mov = GetVoxelSpaceToRASPhysicalSpaceMatrix<ImageType, TransformMatrixType>( mov ).GetVnlMatrix();

  // Set the swap matrices
  m_swpref.set_identity();
  if(vnl_det(m_ref) > 0)
    {
    m_swpref(0,0) = -1.0;
    m_swpref(0,3) = (ref->GetBufferedRegion().GetSize(0) - 1) * ref->GetSpacing()[0];
    }

  m_swpmov.set_identity();
  if(vnl_det(m_mov) > 0)
    {
    m_swpmov(0,0) = -1.0;
    m_swpmov(0,3) = (mov->GetBufferedRegion().GetSize(0) - 1) * mov->GetSpacing()[0];
    }

  // Set the spacing matrices
  m_spcref.set_identity();
  m_spcmov.set_identity();
  for(size_t i = 0; i < 3; i++)
    {
    m_spcref(i,i) = ref->GetSpacing()[i];
    m_spcmov(i,i) = mov->GetSpacing()[i];
    }

  // Compute the output matrix
  //if (flag == FSL_TO_RAS)
    mat =
    m_mov * vnl_inverse(m_spcmov) * m_swpmov *
    vnl_inverse(m_fsl) *
    m_swpref * m_spcref * vnl_inverse(m_ref);

  // Add access to this
  // NOTE: m_fsl is really m_ras here
  //if (flag == RAS_TO_FSL)
  //  mat =
  //   vnl_inverse(vnl_inverse(m_swpmov) * m_spcmov* vnl_inverse(m_mov) *
  //  m_fsl *
  //  m_ref*vnl_inverse(m_spcref)*vnl_inverse(m_swpref));

  ///////////////

  // Flip the entries that must be flipped
  mat(2,0) *= -1; mat(2,1) *= -1;
  mat(0,2) *= -1; mat(1,2) *= -1;
  mat(0,3) *= -1; mat(1,3) *= -1;

  // Create an ITK affine transform
  AffTran::Pointer atran = AffTran::New();

  // Populate its matrix
  AffTran::MatrixType amat = atran->GetMatrix();
  AffTran::OffsetType aoff = atran->GetOffset();

  for(size_t r = 0; r < 3; r++)
    {
    for(size_t c = 0; c < 3; c++)
      {
      amat(r,c) = mat(r,c);
      }
    aoff[r] = mat(r,3);
    }

  atran->SetMatrix(amat);
  atran->SetOffset(aoff);

  TransformBasePointerType itkTransform = dynamic_cast<TransformBaseType*>( atran.GetPointer() );
  return Rcpp::wrap( itkTransform );
}
Beispiel #10
0
// ------------------------------------------------------------------------
void NormaliseImage(const ImageType::Pointer &input, 
		const ImageType::Pointer &reference,	
		ImageType::Pointer &output,
		SeriesTransform &trans)
{
	// flip the x and y axis 
	typedef itk::PermuteAxesImageFilter<ImageType> FlipperType;
	FlipperType::Pointer flipper = FlipperType::New();
	itk::FixedArray<unsigned int, 3> order;
	order[0] = 1;
	order[1] = 0;
	order[2] = 2;

	flipper->SetOrder(order);
	flipper->SetInput(input);
	flipper->Update();

	output = flipper->GetOutput();

	// get the reference offset
	vnl_vector<double> refOrigin(3);
	refOrigin[0] = reference->GetOrigin()[0];// + trans.translation[0];
	refOrigin[1] = reference->GetOrigin()[1];// + trans.translation[1];
	refOrigin[2] = reference->GetOrigin()[2];// + trans.translation[2];

	vnl_matrix<double> refRot = reference->GetDirection().GetVnlMatrix();
	vnl_matrix<double> refRotInv = vnl_inverse(refRot);
	vnl_vector<double> refOffset = refRotInv * refOrigin;

	// get the image origin
	vnl_vector<double> origin(3);
	origin[0] = output->GetOrigin()[0];
	origin[1] = output->GetOrigin()[1];
	origin[2] = output->GetOrigin()[2];
	
	// apply the rotation to the origin
	origin = refRotInv * origin;

	// apply the offset to the origin
	origin = origin - refOffset;

	// apply the scaling 
	origin /= reference->GetSpacing()[0];

	// put the values into the output image
	ImageType::PointType itkOrigin;
	itkOrigin[0] = origin[0];
	itkOrigin[1] = origin[1];
	itkOrigin[2] = origin[2];

	ImageType::SpacingType spacing;
	spacing.Fill(output->GetSpacing()[0] / reference->GetSpacing()[0]);

	// get the new direction
	ImageType::DirectionType dirOut = output->GetDirection();
	dirOut = reference->GetDirection().GetInverse() * dirOut.GetVnlMatrix();
	
	output->SetSpacing(spacing);
	output->SetDirection(dirOut);
	output->SetOrigin(itkOrigin);
}