int Invert( int argc , char* argv[] )
{
  //check arguments
  if( argc != 4 )
  {
    std::cout<< argv[ 0 ] << " " << argv[ 1 ] << " inputTransform outputTransform" << std::endl ;
    return 1 ;
  }
  typedef itk::MatrixOffsetTransformBase< double , 3 , 3 > TransformType ;
  itk::TransformFactory< TransformType >::RegisterTransform();
  itk::TransformFileReader::Pointer transformFile = itk::TransformFileReader::New() ;
  transformFile->SetFileName( argv[ 2 ] ) ;
  transformFile->Update() ;
  if( transformFile->GetTransformList()->size() != 1 )
  {
    std::cerr << "Please give a transform file containing only one transformation" << std::endl ;
    return 1 ;
  }
  TransformType::Pointer transform ;
  transform = dynamic_cast< TransformType* >
        ( transformFile->GetTransformList()->front().GetPointer() ) ;
  if( !transform )
  {
    std::cerr << "Transform type is not handled. Please convert your transform first" << std::endl ;
    return 1 ;
  }
  TransformType::Pointer inverse = TransformType::New() ;
  transform->GetInverse( inverse ) ;
  itk::TransformFileWriter::Pointer transformWriter = itk::TransformFileWriter::New() ;
  transformWriter->SetFileName( argv[ 3 ] ) ;
  transformWriter->AddTransform( inverse ) ;
  transformWriter->Update() ;
  return 0 ;
}
mitk::Vector3D
  mitk::SlicedGeometry3D::AdjustNormal( const mitk::Vector3D &normal ) const
{
  TransformType::Pointer inverse = TransformType::New();
  m_ReferenceGeometry->GetIndexToWorldTransform()->GetInverse( inverse );

  Vector3D transformedNormal = inverse->TransformVector( normal );

  transformedNormal.Normalize();
  return transformedNormal;
}
Exemple #3
0
	void initialiseFilters() {
		// resamplers
		transform = TransformType::New();
	  transform->SetIdentity();
    volumeInterpolator = VolumeInterpolatorType::New();
		maskVolumeInterpolator = MaskVolumeInterpolatorType::New();
		resampler = ResamplerType::New();
    resampler->SetInput( originalImage );
		resampler->SetInterpolator( volumeInterpolator );
		resampler->SetOutputSpacing( resamplerSpacing );
		resampler->SetSize( resamplerSize );
		resampler->SetTransform( transform );
		resampler->SetDefaultPixelValue( 127 );
		maskResampler = MaskResamplerType::New();
		maskResampler->SetInput( originalMask );
		maskResampler->SetInterpolator( maskVolumeInterpolator );
		maskResampler->SetOutputSpacing( resamplerSpacing );
		maskResampler->SetSize( resamplerSize );
		maskResampler->SetTransform( transform );
		
		// extract image filters
    sliceExtractor = SliceExtractorType::New();
    sliceExtractor->SetInput( resampler->GetOutput() );
		maskSliceExtractor = MaskSliceExtractorType::New();
    maskSliceExtractor->SetInput( maskResampler->GetOutput() );
    
    // masks
    for(unsigned int i=0; i<resamplerSize[2]; i++) {
  		masks2D.push_back( MaskType2D::New() );
    }		
	}
int main(int argc, char ** argv)
{
	// load the image and the bounding box
	BoundingBox::Pointer boundingBox = BoundingBox::New();
	boundingBox->SetInfation(atof(argv[3]));
	boundingBox->Load(argv[1]);
	
	// load hte images and compute the reference coordinates
	CMRFileExtractor::Pointer extractor = CMRFileExtractor::New();
	extractor->SetFolderName(argv[2]);
	extractor->Extract();

	ValveOriginFinder::Pointer originFinder = ValveOriginFinder::New();
	originFinder->Set2CImage(extractor->Get2CImage(0));
	originFinder->Set3CImage(extractor->Get3CImage(0));
	originFinder->SetImageStack(extractor->GetStackImage(0));
	originFinder->Compute();


	// apply the transform to the bounding box
	typedef itk::Similarity3DTransform<double> TransformType;
	TransformType::Pointer transform = TransformType::New();
	transform->SetMatrix(originFinder->GetRotation());
	transform->SetTranslation(originFinder->GetTranslation());
	boundingBox->TransformBoundingBox(transform);


	BoundingBox::MaskType::Pointer mask = BoundingBox::MaskType::New();
	boundingBox->ComputeImageMask(extractor->Get2CImage(0), 1, mask);


	utils::LabelVolumeIO::Write("mask.nrrd", mask);
	utils::ImageVolumeIO::Write("image.nrrd", extractor->Get2CImage(0));


	SimpleMRFSegmenter::Pointer segmenter = SimpleMRFSegmenter::New();
	segmenter->SetImage(extractor->Get2CImage(0));
	segmenter->SetSmoothnessCost(atof(argv[4]));
	segmenter->SetMask(mask);
	segmenter->Segment();


	utils::LabelVolumeIO::Write("seg.nrrd", segmenter->GetOutput());


	return 0;
}
void ImageScaleTransform::process()
    {
    foreach (const ElementBase *source, mSourceElementsReadySet)
        for (int i = 0; i < source->getFramesNo(); ++i)
            {
            const FrameBase *frame = source->getFrame(i);
            if (frame->getMaxDimension() == ColorImageFrame::Dimensions)
                {
                mImageFrame.setSourceName(frame->getSourceName());
                mSrcFrame.resizeAndCopyFrame(*frame);
                ColorImageFrame::ImageType::Pointer srcImg = mSrcFrame;

                typedef ScaleTransform<double, 2> TransformType;
                TransformType::Pointer scaleTransform = TransformType::New();
                FixedArray<float, 2> scale;
                scale[0] = property("widthScale").toDouble();
                scale[1] = property("heightScale").toDouble();
                scaleTransform->SetScale(scale);
                Point<float, 2> center;
                center[0] = srcImg->GetLargestPossibleRegion().GetSize()[0]/2;
                center[1] = srcImg->GetLargestPossibleRegion().GetSize()[1]/2;
                scaleTransform->SetCenter(center);

                typedef ResampleImageFilter<ColorImageFrame::ImageType, ColorImageFrame::ImageType> ResampleImageFilterType;
                ResampleImageFilterType::Pointer resampleFilter = ResampleImageFilterType::New();
                resampleFilter->SetTransform(scaleTransform);
                resampleFilter->SetInput(srcImg);
                resampleFilter->SetSize(srcImg->GetLargestPossibleRegion().GetSize());
                resampleFilter->Update();
                mImageFrame = resampleFilter->GetOutput();

                emit framesReady();
                break;
                }
            }
    }
void QAngioSubstractionExtension::computeAutomateSingleImage()
{
    QApplication::setOverrideCursor(Qt::WaitCursor);
    const    unsigned int          Dimension = 2;
    typedef  Volume::ItkPixelType  PixelType;

    typedef itk::Image< PixelType, Dimension >  FixedImageType;
    typedef itk::Image< PixelType, Dimension >  MovingImageType;
    typedef   float     InternalPixelType;
    typedef itk::Image< InternalPixelType, Dimension > InternalImageType;

    typedef itk::TranslationTransform< double, Dimension > TransformType;
    typedef itk::GradientDescentOptimizer                  OptimizerType;
    typedef itk::LinearInterpolateImageFunction< 
                                    InternalImageType,
                                    double             > InterpolatorType;
    typedef itk::ImageRegistrationMethod< 
                                    InternalImageType, 
                                    InternalImageType >  RegistrationType;
    typedef itk::MutualInformationImageToImageMetric< 
                                          InternalImageType, 
                                          InternalImageType >    MetricType;

    TransformType::Pointer      transform     = TransformType::New();
    OptimizerType::Pointer      optimizer     = OptimizerType::New();
    InterpolatorType::Pointer   interpolator  = InterpolatorType::New();
    RegistrationType::Pointer   registration  = RegistrationType::New();

    registration->SetOptimizer(optimizer);
    registration->SetTransform(transform);
    registration->SetInterpolator(interpolator);

    MetricType::Pointer         metric        = MetricType::New();
    registration->SetMetric(metric);
    metric->SetFixedImageStandardDeviation(0.4);
    metric->SetMovingImageStandardDeviation(0.4);
    metric->SetNumberOfSpatialSamples(50);

    typedef itk::ExtractImageFilter< Volume::ItkImageType, FixedImageType > FilterType;
    
    FilterType::Pointer extractFixedImageFilter = FilterType::New();
    Volume::ItkImageType::RegionType inputRegion = m_mainVolume->getItkData()->GetLargestPossibleRegion();
    Volume::ItkImageType::SizeType size = inputRegion.GetSize();
    //Dividim la mida per dos per tal de quedar-nos només amb la part central
    // ja que si no ens registre el background
    size[0] = size[0] / 2;
    size[1] = size[1] / 2;
    size[2] = 0;
    Volume::ItkImageType::IndexType start = inputRegion.GetIndex();
    const unsigned int sliceReference = m_imageSelectorSpinBox->value();
    //comencem a un quart de la imatge
    start[0] = size[0] / 2;
    start[1] = size[1] / 2;
    start[2] = sliceReference;
    Volume::ItkImageType::RegionType desiredRegion;
    desiredRegion.SetSize(size);
    desiredRegion.SetIndex(start);
    extractFixedImageFilter->SetExtractionRegion(desiredRegion);
    extractFixedImageFilter->SetInput(m_mainVolume->getItkData());
    extractFixedImageFilter->Update();

    FilterType::Pointer extractMovingImageFilter = FilterType::New();
    Volume::ItkImageType::IndexType startMoving = inputRegion.GetIndex();
    const unsigned int sliceNumber = m_2DView_1->getViewer()->getCurrentSlice();
    startMoving[0] = size[0] / 2;
    startMoving[1] = size[1] / 2;
    startMoving[2] = sliceNumber;
    Volume::ItkImageType::RegionType desiredMovingRegion;
    desiredMovingRegion.SetSize(size);
    desiredMovingRegion.SetIndex(startMoving);
    extractMovingImageFilter->SetExtractionRegion(desiredMovingRegion);
    extractMovingImageFilter->SetInput(m_mainVolume->getItkData());
    extractMovingImageFilter->Update();

    typedef itk::NormalizeImageFilter< 
                                FixedImageType, 
                                InternalImageType 
                                        > FixedNormalizeFilterType;

    typedef itk::NormalizeImageFilter< 
                                MovingImageType, 
                                InternalImageType 
                                              > MovingNormalizeFilterType;

    FixedNormalizeFilterType::Pointer fixedNormalizer = 
                                            FixedNormalizeFilterType::New();

    MovingNormalizeFilterType::Pointer movingNormalizer =
                                            MovingNormalizeFilterType::New();
    typedef itk::DiscreteGaussianImageFilter<
                                      InternalImageType, 
                                      InternalImageType
                                                    > GaussianFilterType;

    GaussianFilterType::Pointer fixedSmoother  = GaussianFilterType::New();
    GaussianFilterType::Pointer movingSmoother = GaussianFilterType::New();

    fixedSmoother->SetVariance(2.0);
    movingSmoother->SetVariance(2.0);
    fixedNormalizer->SetInput(extractFixedImageFilter->GetOutput());
    movingNormalizer->SetInput(extractMovingImageFilter->GetOutput());

    fixedSmoother->SetInput(fixedNormalizer->GetOutput());
    movingSmoother->SetInput(movingNormalizer->GetOutput());

    registration->SetFixedImage(fixedSmoother->GetOutput());
    registration->SetMovingImage(movingSmoother->GetOutput());

    fixedNormalizer->Update();
    registration->SetFixedImageRegion(
       fixedNormalizer->GetOutput()->GetBufferedRegion());

    typedef RegistrationType::ParametersType ParametersType;
    ParametersType initialParameters(transform->GetNumberOfParameters());

    initialParameters[0] = 0.0;  // Initial offset in mm along X
    initialParameters[1] = 0.0;  // Initial offset in mm along Y

    registration->SetInitialTransformParameters(initialParameters);

    optimizer->SetLearningRate(20.0);
    optimizer->SetNumberOfIterations(200);
    optimizer->MaximizeOn();

    try 
    { 
        registration->Update();
    } 
    catch(itk::ExceptionObject & err) 
    { 
        std::cout << "ExceptionObject caught !" << std::endl; 
        std::cout << err << std::endl; 
        return;
    } 

    ParametersType finalParameters = registration->GetLastTransformParameters();

    double TranslationAlongX = finalParameters[0];
    double TranslationAlongY = finalParameters[1];

    // Print out results
    //
    DEBUG_LOG(QString("Result = "));
    DEBUG_LOG(QString(" Translation X = %1").arg(TranslationAlongX));
    DEBUG_LOG(QString(" Translation Y = %1").arg(TranslationAlongY));
    DEBUG_LOG(QString(" Iterations    = %1").arg(optimizer->GetCurrentIteration()));
    DEBUG_LOG(QString(" Metric value  = %1").arg(optimizer->GetValue()));
    double spacing[3];
    m_mainVolume->getSpacing(spacing);
    DEBUG_LOG(QString(" Translation X (in px) = %1").arg(TranslationAlongX / spacing[0]));
    DEBUG_LOG(QString(" Translation Y (in px) = %1").arg(TranslationAlongY / spacing[1]));

    //Actualitzem les dades de la transdifference tool
    m_toolManager->triggerTool("TransDifferenceTool");
    TransDifferenceTool* tdTool = static_cast<TransDifferenceTool*> (m_2DView_2->getViewer()->getToolProxy()->getTool("TransDifferenceTool"));
    if(m_tdToolData == 0){
        m_tdToolData = static_cast<TransDifferenceToolData*> (tdTool->getToolData());
    }
    if(m_tdToolData->getInputVolume() != m_mainVolume){
        m_tdToolData->setInputVolume(m_mainVolume);
    }
    tdTool->setSingleDifferenceImage(TranslationAlongX / spacing[0],TranslationAlongY / spacing[1]);
    m_toolManager->triggerTool("SlicingTool");
    

/*    typedef itk::Image< PixelType, Dimension >  FixedImageType;
    typedef itk::Image< PixelType, Dimension >  MovingImageType;
    typedef itk::TranslationTransform< double, Dimension > TransformType;
    typedef itk::RegularStepGradientDescentOptimizer       OptimizerType;
    typedef itk::MattesMutualInformationImageToImageMetric< 
                                          FixedImageType, 
                                          MovingImageType >    MetricType;
    typedef itk:: LinearInterpolateImageFunction< 
                                    MovingImageType,
                                    double          >    InterpolatorType;
    typedef itk::ImageRegistrationMethod< 
                                    FixedImageType, 
                                    MovingImageType >    RegistrationType;

    MetricType::Pointer         metric        = MetricType::New();
    TransformType::Pointer      transform     = TransformType::New();
    OptimizerType::Pointer      optimizer     = OptimizerType::New();
    InterpolatorType::Pointer   interpolator  = InterpolatorType::New();
    RegistrationType::Pointer   registration  = RegistrationType::New();

    registration->SetMetric(metric);
    registration->SetOptimizer(optimizer);
    registration->SetTransform(transform);
    registration->SetInterpolator(interpolator);

    metric->SetNumberOfHistogramBins(50);
    metric->SetNumberOfSpatialSamples(10000);

    typedef itk::ExtractImageFilter< Volume::ItkImageType, FixedImageType > FilterType;
    
    FilterType::Pointer extractFixedImageFilter = FilterType::New();
    Volume::ItkImageType::RegionType inputRegion = m_mainVolume->getItkData()->GetLargestPossibleRegion();
    Volume::ItkImageType::SizeType size = inputRegion.GetSize();
    //Dividim la mida per dos per tal de quedar-nos només amb la part central
    // ja que si no ens registre el background
    size[0] = size[0] / 2;
    size[1] = size[1] / 2;
    size[2] = 0;
    Volume::ItkImageType::IndexType start = inputRegion.GetIndex();
    const unsigned int sliceReference = m_imageSelectorSpinBox->value();
    //comencem a un quart de la imatge
    start[0] = size[0] / 2;
    start[1] = size[1] / 2;
    start[2] = sliceReference;
    Volume::ItkImageType::RegionType desiredRegion;
    desiredRegion.SetSize(size);
    desiredRegion.SetIndex(start);
    extractFixedImageFilter->SetExtractionRegion(desiredRegion);
    extractFixedImageFilter->SetInput(m_mainVolume->getItkData());
    extractFixedImageFilter->Update();

    FilterType::Pointer extractMovingImageFilter = FilterType::New();
    Volume::ItkImageType::IndexType startMoving = inputRegion.GetIndex();
    const unsigned int sliceNumber = m_2DView_1->getViewer()->getCurrentSlice();
    startMoving[0] = size[0] / 2;
    startMoving[1] = size[1] / 2;
    startMoving[2] = sliceNumber;
    Volume::ItkImageType::RegionType desiredMovingRegion;
    desiredMovingRegion.SetSize(size);
    desiredMovingRegion.SetIndex(startMoving);
    extractMovingImageFilter->SetExtractionRegion(desiredMovingRegion);
    extractMovingImageFilter->SetInput(m_mainVolume->getItkData());
    extractMovingImageFilter->Update();

    registration->SetFixedImage(extractFixedImageFilter->GetOutput());
    registration->SetMovingImage(extractMovingImageFilter->GetOutput());

    typedef RegistrationType::ParametersType ParametersType;
    ParametersType initialParameters(transform->GetNumberOfParameters());

    //Potser seria millor posar la transformada que té actualment
    initialParameters[0] = 0.0;  // Initial offset in mm along X
    initialParameters[1] = 0.0;  // Initial offset in mm along Y

    registration->SetInitialTransformParameters(initialParameters);

    optimizer->SetMaximumStepLength(4.00);  
    optimizer->SetMinimumStepLength(0.005);

    optimizer->SetNumberOfIterations(200);

    try 
    { 
        registration->StartRegistration(); 
    } 
    catch(itk::ExceptionObject & err) 
    { 
        DEBUG_LOG(QString("ExceptionObject caught !"));
        std::cout<<err<<std::endl;
        return;
    } 
    ParametersType finalParameters = registration->GetLastTransformParameters();

    const double TranslationAlongX = finalParameters[0];
    const double TranslationAlongY = finalParameters[1];

    const unsigned int numberOfIterations = optimizer->GetCurrentIteration();

    const double bestValue = optimizer->GetValue();

    DEBUG_LOG(QString("Result = "));
    DEBUG_LOG(QString(" Translation X = %1").arg(TranslationAlongX));
    DEBUG_LOG(QString(" Translation Y = %1").arg(TranslationAlongY));
    DEBUG_LOG(QString(" Iterations    = %1").arg(numberOfIterations));
    DEBUG_LOG(QString(" Metric value  = %1").arg(bestValue));

    typedef  unsigned char  OutputPixelType;
    typedef itk::Image< OutputPixelType, Dimension > OutputImageType;
    typedef itk::RescaleIntensityImageFilter< FixedImageType, FixedImageType > RescaleFilterType;
    typedef itk::ResampleImageFilter< 
                            FixedImageType, 
                            FixedImageType >    ResampleFilterType;
    typedef itk::CastImageFilter< 
                        FixedImageType,
                        OutputImageType > CastFilterType;
    typedef itk::ImageFileWriter< OutputImageType >  WriterType;

    WriterType::Pointer      writer =  WriterType::New();
    CastFilterType::Pointer  caster =  CastFilterType::New();
    ResampleFilterType::Pointer resample = ResampleFilterType::New();
    RescaleFilterType::Pointer rescaler = RescaleFilterType::New();

    rescaler->SetOutputMinimum(0);
    rescaler->SetOutputMaximum(255);

    TransformType::Pointer finalTransform = TransformType::New();
    finalTransform->SetParameters(finalParameters);
    resample->SetTransform(finalTransform);
    resample->SetSize(extractMovingImageFilter->GetOutput()->GetLargestPossibleRegion().GetSize());
    resample->SetOutputOrigin(extractMovingImageFilter->GetOutput()->GetOrigin());
    resample->SetOutputSpacing(extractMovingImageFilter->GetOutput()->GetSpacing());
    resample->SetDefaultPixelValue(100);

    writer->SetFileName("prova.jpg");

    rescaler->SetInput(extractMovingImageFilter->GetOutput());
    resample->SetInput(rescaler->GetOutput());
    caster->SetInput(resample->GetOutput());
    writer->SetInput(caster->GetOutput());
    writer->Update();
*/

    QApplication::restoreOverrideCursor();

}
bool ShowSegmentationAsSmoothedSurface::ThreadedUpdateFunction()
{
  Image::Pointer image;
  GetPointerParameter("Input", image);

  float smoothing;
  GetParameter("Smoothing", smoothing);

  float decimation;
  GetParameter("Decimation", decimation);

  float closing;
  GetParameter("Closing", closing);

  int timeNr = 0;
  GetParameter("TimeNr", timeNr);

  if (image->GetDimension() == 4)
    MITK_INFO << "CREATING SMOOTHED POLYGON MODEL (t = " << timeNr << ')';
  else
    MITK_INFO << "CREATING SMOOTHED POLYGON MODEL";

  MITK_INFO << "  Smoothing  = " << smoothing;
  MITK_INFO << "  Decimation = " << decimation;
  MITK_INFO << "  Closing    = " << closing;

  Geometry3D::Pointer geometry = dynamic_cast<Geometry3D *>(image->GetGeometry()->Clone().GetPointer());

  // Make ITK image out of MITK image

  typedef itk::Image<unsigned char, 3> CharImageType;
  typedef itk::Image<unsigned short, 3> ShortImageType;
  typedef itk::Image<float, 3> FloatImageType;

  if (image->GetDimension() == 4)
  {
    ImageTimeSelector::Pointer imageTimeSelector = ImageTimeSelector::New();
    imageTimeSelector->SetInput(image);
    imageTimeSelector->SetTimeNr(timeNr);
    imageTimeSelector->UpdateLargestPossibleRegion();
    image = imageTimeSelector->GetOutput(0);
  }

  ImageToItk<CharImageType>::Pointer imageToItkFilter = ImageToItk<CharImageType>::New();

  try
  {
    imageToItkFilter->SetInput(image);
  }
  catch (const itk::ExceptionObject &e)
  {
    // Most probably the input image type is wrong. Binary images are expected to be
    // >unsigned< char images.
    MITK_ERROR << e.GetDescription() << endl;
    return false;
  }

  imageToItkFilter->Update();

  CharImageType::Pointer itkImage = imageToItkFilter->GetOutput();

  // Get bounding box and relabel

  MITK_INFO << "Extracting VOI...";

  int imageLabel = 1;
  bool roiFound = false;

  CharImageType::IndexType minIndex;
  minIndex.Fill(numeric_limits<CharImageType::IndexValueType>::max());

  CharImageType::IndexType maxIndex;
  maxIndex.Fill(numeric_limits<CharImageType::IndexValueType>::min());

  itk::ImageRegionIteratorWithIndex<CharImageType> iter(itkImage, itkImage->GetLargestPossibleRegion());

  for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter)
  {
    if (iter.Get() == imageLabel)
    {
      roiFound = true;
      iter.Set(1);

      CharImageType::IndexType currentIndex = iter.GetIndex();

      for (unsigned int dim = 0; dim < 3; ++dim)
      {
        minIndex[dim] = min(currentIndex[dim], minIndex[dim]);
        maxIndex[dim] = max(currentIndex[dim], maxIndex[dim]);
      }
    }
    else
    {
      iter.Set(0);
    }
  }

  if (!roiFound)
  {
    ProgressBar::GetInstance()->Progress(8);
    MITK_ERROR << "Didn't found segmentation labeled with " << imageLabel << "!" << endl;
    return false;
  }

  ProgressBar::GetInstance()->Progress(1);

  // Extract and pad bounding box

  typedef itk::RegionOfInterestImageFilter<CharImageType, CharImageType> ROIFilterType;

  ROIFilterType::Pointer roiFilter = ROIFilterType::New();
  CharImageType::RegionType region;
  CharImageType::SizeType size;

  for (unsigned int dim = 0; dim < 3; ++dim)
  {
    size[dim] = maxIndex[dim] - minIndex[dim] + 1;
  }

  region.SetIndex(minIndex);
  region.SetSize(size);

  roiFilter->SetInput(itkImage);
  roiFilter->SetRegionOfInterest(region);
  roiFilter->ReleaseDataFlagOn();
  roiFilter->ReleaseDataBeforeUpdateFlagOn();

  typedef itk::ConstantPadImageFilter<CharImageType, CharImageType> PadFilterType;

  PadFilterType::Pointer padFilter = PadFilterType::New();
  const PadFilterType::SizeValueType pad[3] = { 10, 10, 10 };

  padFilter->SetInput(roiFilter->GetOutput());
  padFilter->SetConstant(0);
  padFilter->SetPadLowerBound(pad);
  padFilter->SetPadUpperBound(pad);
  padFilter->ReleaseDataFlagOn();
  padFilter->ReleaseDataBeforeUpdateFlagOn();
  padFilter->Update();

  CharImageType::Pointer roiImage = padFilter->GetOutput();

  roiImage->DisconnectPipeline();
  roiFilter = nullptr;
  padFilter = nullptr;

  // Correct origin of real geometry (changed by cropping and padding)

  typedef Geometry3D::TransformType TransformType;

  TransformType::Pointer transform = TransformType::New();
  TransformType::OutputVectorType translation;

  for (unsigned int dim = 0; dim < 3; ++dim)
    translation[dim] = (int)minIndex[dim] - (int)pad[dim];

  transform->SetIdentity();
  transform->Translate(translation);
  geometry->Compose(transform, true);

  ProgressBar::GetInstance()->Progress(1);

  // Median

  MITK_INFO << "Median...";

  typedef itk::BinaryMedianImageFilter<CharImageType, CharImageType> MedianFilterType;

  MedianFilterType::Pointer medianFilter = MedianFilterType::New();
  CharImageType::SizeType radius = { 0 };

  medianFilter->SetRadius(radius);
  medianFilter->SetBackgroundValue(0);
  medianFilter->SetForegroundValue(1);
  medianFilter->SetInput(roiImage);
  medianFilter->ReleaseDataFlagOn();
  medianFilter->ReleaseDataBeforeUpdateFlagOn();
  medianFilter->Update();

  ProgressBar::GetInstance()->Progress(1);

  // Intelligent closing

  MITK_INFO << "Intelligent closing...";

  unsigned int surfaceRatio = (unsigned int)((1.0f - closing) * 100.0f);

  typedef itk::IntelligentBinaryClosingFilter<CharImageType, ShortImageType> ClosingFilterType;

  ClosingFilterType::Pointer closingFilter = ClosingFilterType::New();

  closingFilter->SetInput(medianFilter->GetOutput());
  closingFilter->ReleaseDataFlagOn();
  closingFilter->ReleaseDataBeforeUpdateFlagOn();
  closingFilter->SetSurfaceRatio(surfaceRatio);
  closingFilter->Update();

  ShortImageType::Pointer closedImage = closingFilter->GetOutput();

  closedImage->DisconnectPipeline();
  roiImage = nullptr;
  medianFilter = nullptr;
  closingFilter = nullptr;

  ProgressBar::GetInstance()->Progress(1);

  // Gaussian blur

  MITK_INFO << "Gauss...";

  typedef itk::BinaryThresholdImageFilter<ShortImageType, FloatImageType> BinaryThresholdToFloatFilterType;

  BinaryThresholdToFloatFilterType::Pointer binThresToFloatFilter = BinaryThresholdToFloatFilterType::New();

  binThresToFloatFilter->SetInput(closedImage);
  binThresToFloatFilter->SetLowerThreshold(1);
  binThresToFloatFilter->SetUpperThreshold(1);
  binThresToFloatFilter->SetInsideValue(100);
  binThresToFloatFilter->SetOutsideValue(0);
  binThresToFloatFilter->ReleaseDataFlagOn();
  binThresToFloatFilter->ReleaseDataBeforeUpdateFlagOn();

  typedef itk::DiscreteGaussianImageFilter<FloatImageType, FloatImageType> GaussianFilterType;

  // From the following line on, IntelliSense (VS 2008) is broken. Any idea how to fix it?
  GaussianFilterType::Pointer gaussFilter = GaussianFilterType::New();

  gaussFilter->SetInput(binThresToFloatFilter->GetOutput());
  gaussFilter->SetUseImageSpacing(true);
  gaussFilter->SetVariance(smoothing);
  gaussFilter->ReleaseDataFlagOn();
  gaussFilter->ReleaseDataBeforeUpdateFlagOn();

  typedef itk::BinaryThresholdImageFilter<FloatImageType, CharImageType> BinaryThresholdFromFloatFilterType;

  BinaryThresholdFromFloatFilterType::Pointer binThresFromFloatFilter = BinaryThresholdFromFloatFilterType::New();

  binThresFromFloatFilter->SetInput(gaussFilter->GetOutput());
  binThresFromFloatFilter->SetLowerThreshold(50);
  binThresFromFloatFilter->SetUpperThreshold(255);
  binThresFromFloatFilter->SetInsideValue(1);
  binThresFromFloatFilter->SetOutsideValue(0);
  binThresFromFloatFilter->ReleaseDataFlagOn();
  binThresFromFloatFilter->ReleaseDataBeforeUpdateFlagOn();
  binThresFromFloatFilter->Update();

  CharImageType::Pointer blurredImage = binThresFromFloatFilter->GetOutput();

  blurredImage->DisconnectPipeline();
  closedImage = nullptr;
  binThresToFloatFilter = nullptr;
  gaussFilter = nullptr;

  ProgressBar::GetInstance()->Progress(1);

  // Fill holes

  MITK_INFO << "Filling cavities...";

  typedef itk::ConnectedThresholdImageFilter<CharImageType, CharImageType> ConnectedThresholdFilterType;

  ConnectedThresholdFilterType::Pointer connectedThresFilter = ConnectedThresholdFilterType::New();

  CharImageType::IndexType corner;

  corner[0] = 0;
  corner[1] = 0;
  corner[2] = 0;

  connectedThresFilter->SetInput(blurredImage);
  connectedThresFilter->SetSeed(corner);
  connectedThresFilter->SetLower(0);
  connectedThresFilter->SetUpper(0);
  connectedThresFilter->SetReplaceValue(2);
  connectedThresFilter->ReleaseDataFlagOn();
  connectedThresFilter->ReleaseDataBeforeUpdateFlagOn();

  typedef itk::BinaryThresholdImageFilter<CharImageType, CharImageType> BinaryThresholdFilterType;

  BinaryThresholdFilterType::Pointer binThresFilter = BinaryThresholdFilterType::New();

  binThresFilter->SetInput(connectedThresFilter->GetOutput());
  binThresFilter->SetLowerThreshold(0);
  binThresFilter->SetUpperThreshold(0);
  binThresFilter->SetInsideValue(50);
  binThresFilter->SetOutsideValue(0);
  binThresFilter->ReleaseDataFlagOn();
  binThresFilter->ReleaseDataBeforeUpdateFlagOn();

  typedef itk::AddImageFilter<CharImageType, CharImageType, CharImageType> AddFilterType;

  AddFilterType::Pointer addFilter = AddFilterType::New();

  addFilter->SetInput1(blurredImage);
  addFilter->SetInput2(binThresFilter->GetOutput());
  addFilter->ReleaseDataFlagOn();
  addFilter->ReleaseDataBeforeUpdateFlagOn();
  addFilter->Update();

  ProgressBar::GetInstance()->Progress(1);

  // Surface extraction

  MITK_INFO << "Surface extraction...";

  Image::Pointer filteredImage = Image::New();
  CastToMitkImage(addFilter->GetOutput(), filteredImage);

  filteredImage->SetGeometry(geometry);

  ImageToSurfaceFilter::Pointer imageToSurfaceFilter = ImageToSurfaceFilter::New();

  imageToSurfaceFilter->SetInput(filteredImage);
  imageToSurfaceFilter->SetThreshold(50);
  imageToSurfaceFilter->SmoothOn();
  imageToSurfaceFilter->SetDecimate(ImageToSurfaceFilter::NoDecimation);

  m_Surface = imageToSurfaceFilter->GetOutput(0);

  ProgressBar::GetInstance()->Progress(1);

  // Mesh decimation

  if (decimation > 0.0f && decimation < 1.0f)
  {
    MITK_INFO << "Quadric mesh decimation...";

    vtkQuadricDecimation *quadricDecimation = vtkQuadricDecimation::New();
    quadricDecimation->SetInputData(m_Surface->GetVtkPolyData());
    quadricDecimation->SetTargetReduction(decimation);
    quadricDecimation->AttributeErrorMetricOn();
    quadricDecimation->GlobalWarningDisplayOff();
    quadricDecimation->Update();

    vtkCleanPolyData* cleaner = vtkCleanPolyData::New();
    cleaner->SetInputConnection(quadricDecimation->GetOutputPort());
    cleaner->PieceInvariantOn();
    cleaner->ConvertLinesToPointsOn();
    cleaner->ConvertStripsToPolysOn();
    cleaner->PointMergingOn();
    cleaner->Update();

    m_Surface->SetVtkPolyData(cleaner->GetOutput());
  }

  ProgressBar::GetInstance()->Progress(1);

  // Compute Normals

  vtkPolyDataNormals* computeNormals = vtkPolyDataNormals::New();
  computeNormals->SetInputData(m_Surface->GetVtkPolyData());
  computeNormals->SetFeatureAngle(360.0f);
  computeNormals->FlipNormalsOff();
  computeNormals->Update();

  m_Surface->SetVtkPolyData(computeNormals->GetOutput());

  return true;
}
Exemple #8
0
// perform B-spline registration for 2D image
void runBspline2D(StringVector& args) {
    typedef itk::BSplineTransform<double, 2, 3> TransformType;
    typedef itk::LBFGSOptimizer OptimizerType;
    typedef itk::MeanSquaresImageToImageMetric<RealImage2, RealImage2> MetricType;
    typedef itk:: LinearInterpolateImageFunction<RealImage2, double> InterpolatorType;
    typedef itk::ImageRegistrationMethod<RealImage2, RealImage2> RegistrationType;

    MetricType::Pointer         metric        = MetricType::New();
    OptimizerType::Pointer      optimizer     = OptimizerType::New();
    InterpolatorType::Pointer   interpolator  = InterpolatorType::New();
    RegistrationType::Pointer   registration  = RegistrationType::New();

    // The old registration framework has problems with multi-threading
    // For now, we set the number of threads to 1
    registration->SetNumberOfThreads(1);

    registration->SetMetric(        metric        );
    registration->SetOptimizer(     optimizer     );
    registration->SetInterpolator(  interpolator  );

    TransformType::Pointer  transform = TransformType::New();
    registration->SetTransform( transform );


    ImageIO<RealImage2> io;

    // Create the synthetic images
    RealImage2::Pointer  fixedImage  = io.ReadImage(args[0]);
    RealImage2::Pointer  movingImage  = io.ReadImage(args[1]);

    // Setup the registration
    registration->SetFixedImage(  fixedImage   );
    registration->SetMovingImage(   movingImage);

    RealImage2::RegionType fixedRegion = fixedImage->GetBufferedRegion();
    registration->SetFixedImageRegion( fixedRegion );

    TransformType::PhysicalDimensionsType   fixedPhysicalDimensions;
    TransformType::MeshSizeType             meshSize;
    for( unsigned int i=0; i < 2; i++ )
    {
        fixedPhysicalDimensions[i] = fixedImage->GetSpacing()[i] *
        static_cast<double>(
                            fixedImage->GetLargestPossibleRegion().GetSize()[i] - 1 );
    }
    unsigned int numberOfGridNodesInOneDimension = 18;
    meshSize.Fill( numberOfGridNodesInOneDimension - 3 );
    transform->SetTransformDomainOrigin( fixedImage->GetOrigin() );
    transform->SetTransformDomainPhysicalDimensions( fixedPhysicalDimensions );
    transform->SetTransformDomainMeshSize( meshSize );
    transform->SetTransformDomainDirection( fixedImage->GetDirection() );

    typedef TransformType::ParametersType     ParametersType;

    const unsigned int numberOfParameters =
    transform->GetNumberOfParameters();

    ParametersType parameters( numberOfParameters );

    parameters.Fill( 0.0 );

    transform->SetParameters( parameters );

    //  We now pass the parameters of the current transform as the initial
    //  parameters to be used when the registration process starts.

    registration->SetInitialTransformParameters( transform->GetParameters() );

    std::cout << "Intial Parameters = " << std::endl;
    std::cout << transform->GetParameters() << std::endl;

    //  Next we set the parameters of the LBFGS Optimizer.

    optimizer->SetGradientConvergenceTolerance( 0.005 );
    optimizer->SetLineSearchAccuracy( 0.9 );
    optimizer->SetDefaultStepLength( .1 );
    optimizer->TraceOn();
    optimizer->SetMaximumNumberOfFunctionEvaluations( 1000 );

    std::cout << std::endl << "Starting Registration" << std::endl;

    try
    {
        registration->Update();
        std::cout << "Optimizer stop condition = "
        << registration->GetOptimizer()->GetStopConditionDescription()
        << std::endl;
    }
    catch( itk::ExceptionObject & err )
    {
        std::cerr << "ExceptionObject caught !" << std::endl;
        std::cerr << err << std::endl;
        return;
    }

    OptimizerType::ParametersType finalParameters =
    registration->GetLastTransformParameters();

    std::cout << "Last Transform Parameters" << std::endl;
    std::cout << finalParameters << std::endl;

    transform->SetParameters( finalParameters );

    typedef itk::ResampleImageFilter<RealImage2, RealImage2> ResampleFilterType;

    ResampleFilterType::Pointer resample = ResampleFilterType::New();

    resample->SetTransform( transform );
    resample->SetInput( movingImage );

    resample->SetSize(    fixedImage->GetLargestPossibleRegion().GetSize() );
    resample->SetOutputOrigin(  fixedImage->GetOrigin() );
    resample->SetOutputSpacing( fixedImage->GetSpacing() );
    resample->SetOutputDirection( fixedImage->GetDirection() );
    resample->SetDefaultPixelValue( 100 );
    resample->Update();

    io.WriteImage(args[2], resample->GetOutput());
}
Exemple #9
0
/**
 * @brief      3D resample data to new grid size
 *
 * @param  M   Incoming data
 * @param  f   Resampling factor in all 3 dimensions
 * @param  im  Interpolation method (LINEAR|BSPLINE)
 *
 * @return     Resampled data
 */
template<class T> static Matrix<T> 
resample (const Matrix<T>& M, const Matrix<double>& f, const InterpMethod& im) {
	

	Matrix <T> res = M;
	
#ifdef HAVE_INSIGHT
	
	typedef typename itk::OrientedImage< T, 3 > InputImageType;
	typedef typename itk::OrientedImage< T, 3 > OutputImageType;
	typedef typename itk::IdentityTransform< double, 3 > TransformType;
	typedef typename itk::LinearInterpolateImageFunction< InputImageType, double > InterpolatorType;
	typedef typename itk::ResampleImageFilter< InputImageType, InputImageType > ResampleFilterType;
	
	typename InterpolatorType::Pointer linterp = InterpolatorType::New();
	
	TransformType::Pointer trafo = TransformType::New();
	trafo->SetIdentity();
	
	typename InputImageType::SpacingType space;
	space[0] = 1.0/f[0];
	space[1] = 1.0/f[1];
	space[2] = 1.0/f[2];
	
	typedef typename InputImageType::SizeType::SizeValueType SizeValueType;
	typename InputImageType::SizeType size; 
	size[0] = static_cast<SizeValueType>(res.Dim(0));
	size[1] = static_cast<SizeValueType>(res.Dim(1));
	size[2] = static_cast<SizeValueType>(res.Dim(2));
	
	typename itk::OrientedImage< T, 3 >::Pointer input = itk::OrientedImage< T, 3 >::New();
	typename itk::OrientedImage< T, 3 >::Pointer output = itk::OrientedImage< T, 3 >::New();
	
	typename itk::Image< T, 3 >::IndexType ipos;
	ipos[0] = 0; ipos[1] = 0; ipos[2] = 0;
	typename itk::Image< T, 3 >::IndexType opos;
	opos[0] = 0; opos[1] = 0; opos[2] = 0;
	
	typename itk::Image< T, 3 >::RegionType ireg;
	ireg.SetSize(size);
	ireg.SetIndex(ipos);
	input->SetRegions(ireg);
	input->Allocate();
	
	typename itk::Image< T, 3 >::RegionType oreg;
	oreg.SetSize(size);
	ireg.SetIndex(opos);
	output->SetRegions(oreg);
	output->Allocate();
	
	for (size_t z = 0; z < res.Dim(2); z++)
		for (size_t y = 0; y < res.Dim(1); y++)
			for (size_t x = 0; x < res.Dim(0); x++) {
				ipos[0] = x; ipos[1] = y; ipos[2] = z;
				input->SetPixel (ipos, res.At(x,y,z));
			}
	
	typename ResampleFilterType::Pointer rs = ResampleFilterType::New();
	rs->SetInput( input );
	rs->SetTransform( trafo );
	rs->SetInterpolator( linterp );
	rs->SetOutputOrigin ( input->GetOrigin());
	rs->SetOutputSpacing ( space );
	rs->SetOutputDirection ( input->GetDirection());
	rs->SetSize ( size );
	rs->Update ();
	
	output = rs->GetOutput();
	
	res = Matrix<T> (res.Dim(0)*f[0], res.Dim(1)*f[1], res.Dim(2)*f[2]);
	res.Res(0) = res.Res(0)/f[0];
	res.Res(1) = res.Dim(1)/f[1];
	res.Res(2) = res.Dim(2)/f[2];
	
	for (size_t z = 0; z < res.Dim(2); z++)
		for (size_t y = 0; y < res.Dim(1); y++)
			for (size_t x = 0; x < res.Dim(0); x++) {
				opos[0] = x; opos[1] = y; opos[2] = z;
				res.At(x,y,z) = output->GetPixel (opos);
			}
	
#else 
	
	printf ("ITK ERROR - Resampling not performed without ITK!\n");
	
#endif
	
	return res;
	
}
Exemple #10
0
int main (int argc, char **argv)
{
  int verbose=0, clobber=0,skip_grid=0;
  int order=2;
  std::string like_f,xfm_f,output_f,input_f;
  double uniformize=0.0;
  int invert=0;
  char *history = time_stamp(argc, argv); 
  
  static struct option long_options[] = {
		{"verbose", no_argument,       &verbose, 1},
		{"quiet",   no_argument,       &verbose, 0},
		{"clobber", no_argument,       &clobber, 1},
		{"like",    required_argument, 0, 'l'},
		{"transform",    required_argument, 0, 't'},
    {"order",    required_argument, 0, 'o'},
    {"uniformize",    required_argument, 0, 'u'},
    {"invert_transform", no_argument, &invert, 1},
		{0, 0, 0, 0}
		};
  
  for (;;) {
      /* getopt_long stores the option index here. */
      int option_index = 0;

      int c = getopt_long (argc, argv, "vqcl:t:o:u:", long_options, &option_index);

      /* Detect the end of the options. */
      if (c == -1) break;

      switch (c)
			{
			case 0:
				break;
			case 'v':
				cout << "Version: 0.1" << endl;
				return 0;
      case 'l':
        like_f=optarg; break;
      case 't':
        xfm_f=optarg; break;
      case 'o':
        order=atoi(optarg);break;
      case 'u':
        uniformize=atof(optarg);break;
			case '?':
				/* getopt_long already printed an error message. */
			default:
				show_usage (argv[0]);
				return 1;
			}
    }

	if ((argc - optind) < 2) {
		show_usage(argv[0]);
		return 1;
	}
  input_f=argv[optind];
  output_f=argv[optind+1];
  
  if (!clobber && !access (output_f.c_str (), F_OK))
  {
    std::cerr << output_f.c_str () << " Exists!" << std::endl;
    return 1;
  }
  
	try
  {
    itk::ObjectFactoryBase::RegisterFactory(itk::MincImageIOFactory::New());
    itk::ImageFileReader<minc::image3d >::Pointer reader = itk::ImageFileReader<minc::image3d >::New();
    
    //initializing the reader
    reader->SetFileName(input_f.c_str());
    reader->Update();
    
		minc::image3d::Pointer in=reader->GetOutput();

		FilterType::Pointer filter  = FilterType::New();
    
    //creating coordinate transformation objects
		TransformType::Pointer transform = TransformType::New();
    if(!xfm_f.empty())
    {
      //reading a minc style xfm file
      transform->OpenXfm(xfm_f.c_str());
      if(!invert) transform->Invert(); //should be inverted by default to walk through target space
      filter->SetTransform( transform );
    }

    //creating the interpolator
		InterpolatorType::Pointer interpolator = InterpolatorType::New();
		interpolator->SetSplineOrder(order);
		filter->SetInterpolator( interpolator );
		filter->SetDefaultPixelValue( 0 );
    
    //this is for processing using batch system
    filter->SetNumberOfThreads(1);
    
    if(!like_f.empty())
    {
      itk::ImageFileReader<minc::image3d >::Pointer reader = itk::ImageFileReader<minc::image3d >::New();
      reader->SetFileName(like_f.c_str());
      reader->Update();
      if(uniformize!=0.0)
      {
        generate_uniform_sampling(filter,reader->GetOutput(),uniformize);
      } else {
        filter->SetOutputParametersFromImage(reader->GetOutput());
        filter->SetOutputDirection(reader->GetOutput()->GetDirection());
      }
    }
    else
    {
      if(uniformize!=0.0)
      {
        generate_uniform_sampling(filter,in,uniformize);
      } else {
        //we are using original sampling
        filter->SetOutputParametersFromImage(in);
        filter->SetOutputDirection(in->GetDirection());
      }
    }
    
		filter->SetInput(in);
    filter->Update();
    //copy the metadate information, for some reason it is not preserved
    //filter->GetOutput()->SetMetaDataDictionary(reader->GetOutput()->GetMetaDataDictionary());
    minc::image3d::Pointer out=filter->GetOutput();
    minc::copy_metadata(out,in);
    minc::append_history(out,history);
    free(history);
    
    //generic file writer
    itk::ImageFileWriter< minc::image3d >::Pointer writer = itk::ImageFileWriter<minc::image3d >::New();
    writer->SetFileName(output_f.c_str());
     
    writer->SetInput( out );
    //writer->UseInputMetaDataDictionaryOn();
    
    writer->Update();
    
		return 0;
	} catch (const minc::generic_error & err) {
    cerr << "Got an error at:" << err.file () << ":" << err.line () << endl;
    return 1;
  }
  catch( itk::ExceptionObject & err )
  {
    std::cerr << "ExceptionObject caught !" << std::endl;
    std::cerr << err << std::endl;
    return 2;
  }
	return 0;
};
Exemple #11
0
int main( int argc, char *argv[] )
{
  string input_name;
  string output_dir;
  if (argc == 3) {
    input_name = argv[1];
    output_dir = argv[2];
  }

  const     unsigned int   Dimension = 3;
  const     unsigned int   OutDimension = 2;
  typedef short InputPixelType;
  typedef int FilterPixelType;
  typedef itk::Image< InputPixelType,  Dimension >   InputImageType;
  typedef itk::Image< FilterPixelType, Dimension >   FilterImageType;
  typedef itk::Image< FilterPixelType, OutDimension >   OutFilterImageType;

  InputImageType::Pointer image;
  itk::MetaDataDictionary dict;


  if (input_name.size() && output_dir.size()) 
    {
      if (boost::filesystem::is_regular_file( input_name )) {
	typedef itk::ImageFileReader< InputImageType >  ReaderType;
	ReaderType::Pointer reader = ReaderType::New();
	reader->SetFileName( input_name );
	try 
	  { 
	  reader->Update();
	  } 
	catch( itk::ExceptionObject & err ) 
	  { 
	  std::cerr << "ERROR: ExceptionObject caught !" << std::endl; 
	  std::cerr << err << std::endl; 
	  return EXIT_FAILURE;
	  } 
	image = reader->GetOutput();
	dict = reader->GetMetaDataDictionary();
      } else if (boost::filesystem::is_directory( input_name )) {
        itkBasic::SeriesReader sreader( input_name );
	sreader.readSeriesData( 2 );
	try 
	{
	    itkBasic::ReaderType::Pointer imageReader = itkBasic::ReaderType::New();
	    itkBasic::FileNamesContainer fc;
	    sreader.getSeriesFileNames(0, fc);
	    image = itkBasic::getDicomSerie( fc, imageReader, 1 ); 
	    dict = *((*imageReader->GetMetaDataDictionaryArray())[0]);
	}
	catch( itk::ExceptionObject & err ) 
	  { 
	  std::cerr << "ERROR: ExceptionObject caught !" << std::endl; 
	  std::cerr << err << std::endl; 
	  return EXIT_FAILURE;
	  } 
      }
    }
    
    if (!image) {
	std::cerr << argv[0] << ": input output" << std::endl;
	exit(1);
    }
  

  typedef itk::SigmoidImageFilter< InputImageType, FilterImageType > SigmoidCasterType;
  SigmoidCasterType::Pointer sigmoidcaster = SigmoidCasterType::New();
  
  sigmoidcaster->SetInput( image );
  sigmoidcaster->SetOutputMaximum( 4000 );
  sigmoidcaster->SetOutputMinimum( 1000 );

  
  typedef itk::AccumulateImageFilter< FilterImageType, FilterImageType > AccumulateFilter;
  AccumulateFilter::Pointer accumulator = AccumulateFilter::New();
  accumulator->SetAccumulateDimension(1);
  accumulator->SetInput( sigmoidcaster->GetOutput() );

  typedef itk::ExtractImageFilter< FilterImageType, OutFilterImageType > ExtractFilter;
  ExtractFilter::Pointer extractor = ExtractFilter::New();
  extractor->SetInput( accumulator->GetOutput() );
  FilterImageType::Pointer accuOut = accumulator->GetOutput();
  accuOut->UpdateOutputInformation();
  FilterImageType::RegionType extractRegion = accuOut->GetLargestPossibleRegion();
  
  extractRegion.SetSize(1,0);
  
  extractor->SetExtractionRegion( extractRegion );

  typedef itk::ResampleImageFilter<OutFilterImageType, OutFilterImageType > ResampleFilter;
  ResampleFilter::Pointer resampler = ResampleFilter::New();
  resampler->SetInput( extractor->GetOutput() );
  
  typedef itk::BSplineInterpolateImageFunction< OutFilterImageType > InterpolatorType;
  InterpolatorType::Pointer interpolator = InterpolatorType::New();
  interpolator->SetSplineOrder(3);
  
  resampler->SetInterpolator( interpolator );
  OutFilterImageType::Pointer exOut = extractor->GetOutput();
  exOut->UpdateOutputInformation();
  
  typedef itk::CenteredRigid2DTransform< double > TransformType;
  TransformType::Pointer transform = TransformType::New();
  transform->SetIdentity();
  OutFilterImageType::PointType exOutCenter = exOut->GetOrigin();
  exOutCenter[0] += (exOut->GetLargestPossibleRegion().GetSize()[0]-1) * exOut->GetSpacing()[0] *.5;
  exOutCenter[1] += (exOut->GetLargestPossibleRegion().GetSize()[1]-1) * exOut->GetSpacing()[1] *.5;
  transform->SetCenter( exOutCenter );
  transform->SetAngleInDegrees( 180 );
  resampler->SetTransform( transform );
  resampler->SetOutputParametersFromImage( exOut );

  OutFilterImageType::SpacingType resampleSpacing = exOut->GetSpacing();
  resampleSpacing.Fill( std::min( resampleSpacing[0], resampleSpacing[1] ) );
  OutFilterImageType::SizeType resampleSize;
  resampleSize[0] = exOut->GetLargestPossibleRegion().GetSize()[0] * exOut->GetSpacing()[0] / resampleSpacing[0];
  resampleSize[1] = exOut->GetLargestPossibleRegion().GetSize()[1] * exOut->GetSpacing()[1] / resampleSpacing[1];
  resampler->SetSize( resampleSize );
  resampler->SetOutputSpacing( resampleSpacing );
  
  OutFilterImageType::Pointer result = resampler->GetOutput();
  
  sigmoidcaster->SetBeta( -500 );
  sigmoidcaster->SetAlpha( 5 );
  result->Update();

  int outDicomIndex = 0;
  itk::EncapsulateMetaData( dict, "0008|0008", string("DERIVED\\SECONDARY\\AXIAL"));
  
  boost::filesystem::path outpath = output_dir;
  outpath = outpath / "IM%06d";
  
  std::vector< itk::MetaDataDictionary* > dictArray;
  dictArray.push_back(&dict);
  
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "lung" ), 1, 0); // Auto Level

  sigmoidcaster->SetBeta( 1000 );
  sigmoidcaster->SetAlpha( 300 );
  result->Update();
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "bone" ), 1, 0); // Auto Level
  
  sigmoidcaster->SetBeta( 0 );
  sigmoidcaster->SetAlpha( 2000 );
  result->Update();
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "normal" ), 1, 0); // Auto Level
}
int main(int argc, char *argv[])
{
	std::string inputFilenamesFilename = argv[1];
	double keyPointIntensityThreshold = atof(argv[2]);
	double dogSplitsPerOctave = atof(argv[3]);
	double statingScale = atof(argv[4]);
	double eLocation = atof(argv[5]);
	double eScale = std::log(atof(argv[6]));
	double eOrientation = atof(argv[7]);
	double gammaValue = atof(argv[8]);

	std::string distanceMapFilenamesFilename = argv[9];
	double extractionDistanceThreshold = atof(argv[10]);




	// load up the set of aligned images
	FilenamesReader::FilenamesType inputFilenames = FilenamesReader::Read(inputFilenamesFilename);
	FilenamesReader::FilenamesType distanceMapFilenames   = FilenamesReader::Read(distanceMapFilenamesFilename);
	ImageVolumeList images;
	RealVolumeList distanceMaps;
	for(unsigned int i = 0; i < inputFilenames.size(); i++)
	{
		ImageVolume::Pointer image = ImageVolumeIO::Read(inputFilenames[i]);
		images.push_back(image);
		RealVolume::Pointer distMap = RealVolumeIO::Read(distanceMapFilenames[i]);
		distanceMaps.push_back(distMap);
	}


	unsigned int sliceToTest = 7;

	// for each slice we want to learn the features
	const unsigned int sliceNum = images.front()->GetLargestPossibleRegion().GetSize()[2];
	for(unsigned int slice = sliceToTest; slice < sliceNum; slice++)
	{

		// get the set of slices that have some image data in them
		ImageSliceList validImages;
		RealSliceList validDistanceMaps;
		
		for(unsigned int im = 0; im < images.size(); im++)
		{
			ImageSlice::Pointer extractedSlice = ImageSlice::New();
			RealSlice::Pointer distanceSlice = RealSlice::New();
			ExtractSlice<ImageVolume, ImageSlice>(images[im], slice, extractedSlice);
			ExtractSlice<RealVolume, RealSlice>(distanceMaps[im], slice, distanceSlice);

			if(ImageContainsData(extractedSlice))
			{
				validDistanceMaps.push_back(distanceSlice);
				validImages.push_back(extractedSlice);
			}
		}

		/*
		if(validImages.size() < 3)
			continue;
		*/

		std::cout << "Slice Num: " << slice << " Image Number: " << validImages.size() << std::endl;



		typedef itk::Vector<double, 2> VectorType;
		typedef itk::Image<VectorType, 2> GradientType;
		typedef filter::HistogramOfGradeintsFeatureExtractor<GradientType> FeatureBuilderType;
		typedef FeatureBuilderType::FeatureType HoGFeatureType;
		std::vector<HoGFeatureType> allFeatures;
		std::vector<HoGFeatureType> allFeatures1;
		std::vector<HoGFeatureType> allFeatures2;

		

		unsigned int featureCount = 0;
		for(unsigned int im = 0; im < validImages.size(); im++)
		{
			ImageSlice::Pointer extractedSlice = validImages[im];

			// first we extract all of the keypoints points
			typedef filter::DoGKeyPointExtractor<utils::ImageSlice> ExtractorType;
			ExtractorType::Pointer extractor = ExtractorType::New();
			extractor->SetInput(extractedSlice);
			extractor->SetKeypointThreshold(keyPointIntensityThreshold);
			extractor->SetSplitsPerOctave(dogSplitsPerOctave);
			extractor->SetStartingSigma(statingScale);
			extractor->SetDistanceMap(validDistanceMaps[im]);
			extractor->SetDistanceThreshold(extractionDistanceThreshold);
			extractor->Update();

			// orientate the feature points
			typedef filter::KeyPointOrientator<utils::ImageSlice> Orientator;
			Orientator::Pointer orientator  = Orientator::New();
			orientator->SetInput(extractedSlice);
			orientator->SetKeyPoints(extractor->GetOutput());
			orientator->SetHistogramBins(32);
			orientator->SetSigmaScale(2);
			orientator->SetSampleRadius(5);
			orientator->Update();

			Orientator::OrientatedKeyPointMap orientateKeyPoints = orientator->GetOutput();


			

			// now we go through the features and compute the HOG descriptors
			Orientator::OrientatedKeyPointMap::iterator keyPointIt = orientateKeyPoints.begin();
			std::cout << orientateKeyPoints.size() << std::endl;
			while(keyPointIt != orientateKeyPoints.end())
			{
				double sigma = keyPointIt->first;
				Orientator::OrientatedKeyPointList keyPoints = keyPointIt->second;

				// smooth the image to the sigma level
				typedef itk::DiscreteGaussianImageFilter<utils::ImageSlice, utils::RealSlice> Smoother;
				Smoother::Pointer smoother = Smoother::New();
				smoother->SetInput(extractedSlice);
				smoother->SetVariance(sigma*sigma);
				smoother->SetUseImageSpacingOn();

				typedef itk::GradientRecursiveGaussianImageFilter<RealSlice, GradientType> GradientFilterType;
				GradientFilterType::Pointer gradientFilter = GradientFilterType::New();
				gradientFilter->SetInput(smoother->GetOutput());
				//gradientFilter->SetSigma(sigma);
				gradientFilter->Update();



		
				


				std::cout << "Doing Sigma " << sigma << " Key Point Number: " << keyPoints.size() << std::endl;



				for(unsigned int fnum = 0; fnum < keyPoints.size(); fnum++)
				{
					Orientator::OrientatedKeyPoint keyPoint = keyPoints[fnum];

					// build the tranform
					typedef itk::CenteredRigid2DTransform<double> TransformType;
					TransformType::Pointer transform = TransformType::New();
					transform->SetCenter(keyPoint.location);
					transform->SetAngleInDegrees(360-keyPoint.degrees);

					// extract the patch from the gradient image
					typedef filter::ImagePatchExtractor<GradientType> PatchExtractorType;
					PatchExtractorType::Pointer patchExtractor = PatchExtractorType::New();
					patchExtractor->SetInput(gradientFilter->GetOutput());
					patchExtractor->SetTransform(transform);
					patchExtractor->SetScale(keyPoint.scale*2);

					PatchExtractorType::SizeType patchSize;
					patchSize.Fill(10);
					patchExtractor->SetPatchSize(patchSize);
					patchExtractor->SetInputPoint(keyPoint.location);
					patchExtractor->Update();


					/*
					// validate the keypoint
					typedef filter::StructureTensorKeyPointValidator<utils::ImageSlice> ValidatorType;
					ValidatorType::Pointer validator = ValidatorType::New();
					validator->SetInput(extractedSlice);	
					validator->SetKeyPoint(keyPoint);
					validator->SetRatio(validatorBeta);
					validator->Update();


					bool valid = validator->IsValid();
					*/


					// create the descriptor
					FeatureBuilderType::Pointer builder = FeatureBuilderType::New();
					builder->SetInput(patchExtractor->GetOutput());
					builder->SetOrientationBins(8);
					builder->SetKeyPoint(keyPoint);
					builder->Update();


					// add the feature to the list
					FeatureBuilderType::FeatureType feature = builder->GetOutput();
					feature.featureId = featureCount;
					allFeatures.push_back(feature);

					featureCount++;

				}



				
				keyPointIt++;
			}
		}

		




		std::cout << "Computing Distance Matrix" << std::endl;
		// compute the distance matrix
		typedef utils::DoubleMatrixType MatrixType;
		MatrixType distanceMatrix = MatrixType::Zero(allFeatures.size(), allFeatures.size());
		ComputeDifferenceMatrix(allFeatures, distanceMatrix);


		std::cout << "Grouping Features" << std::endl;
		// now we group the features by thier geometry
		typedef filter::FeaturePointGrouper<2> GrouperType;
		GrouperType::Pointer grouper = GrouperType::New();
		grouper->SetInput(allFeatures);
		grouper->SetAngleThreshold(eOrientation);
		grouper->SetLocationThreshold(eLocation);
		grouper->SetScaleThreshold(eScale);
		grouper->Update();





		std::cout << "Creating Clusters" << std::endl;
		GrouperType::FeatureGroupList clusters = grouper->GetOutput();
		std::sort(clusters.begin(), clusters.end());

		GrouperType::FeatureGroupList newClusters;

		for(unsigned int i = 0; i < clusters.size(); i++)
		{
			typedef filter::FeatureClusterLearner<2> ClusterLearnerType;
			ClusterLearnerType::Pointer learner = ClusterLearnerType::New();
			learner->SetInput(clusters[i]);
			learner->SetFeatures(allFeatures);
			learner->SetDistanceMatrix(distanceMatrix);
			learner->SetGamma(gammaValue);
			learner->Update();
			
			ClusterLearnerType::ClusterType newCluster = learner->GetOutput();
			newClusters.push_back(newCluster);



		}

		std::cout << "Culling Clusters" << std::endl;
		
		typedef filter::FeatureClusterCuller<2> Culler;
		Culler::Pointer culler = Culler::New();
		culler->SetInput(newClusters);
		culler->Update();


		Culler::ClusterList culledClusters = culler->GetOutput();
		std::sort(culledClusters.begin(), culledClusters.end());
		for(unsigned int i = 0; i < culledClusters.size(); i++)
		{
			typedef filter::ClusterDistributionGenerator<2> DistributionGenerator;
			DistributionGenerator::Pointer generator = DistributionGenerator::New();
			generator->SetInput(culledClusters[i]);
			generator->Update();

			exit(1);
		}



		/*


		ImageSlice::Pointer extractedSlice = ImageSlice::New();
		ExtractSlice<ImageVolume, ImageSlice>(images[0], sliceToTest, extractedSlice);

		std::vector<std::pair<int, ImageSlice::PointType> > testOut;
		for(unsigned int i = 0; i < culledClusters.size(); i++)
		{
			for(unsigned int j = 0; j < culledClusters[i].clusterMembers.size(); j++)
			{
			
				std::pair<int, ImageSlice::PointType> p(i, culledClusters[i].clusterMembers[j].keyPoint.location);
				testOut.push_back(p);				
			}			
		}




		utils::DoubleMatrixType pOut = utils::DoubleMatrixType::Zero(testOut.size(), 2);
		utils::IntMatrixType iOut = utils::IntMatrixType::Zero(testOut.size(),1);
		for(unsigned int i = 0; i < testOut.size(); i++)
		{
			itk::ContinuousIndex<double, 2> contIndex;
			extractedSlice->TransformPhysicalPointToContinuousIndex(testOut[i].second, contIndex);

			pOut(i,0) = contIndex[0];			
			pOut(i,1) = contIndex[1];

			iOut(i,0) = testOut[i].first;			
		}


	


		utils::MatrixDataSet::Pointer dout = utils::MatrixDataSet::New();
		dout->AddData("locations", pOut);
		dout->AddData("index", iOut);
		utils::MatrixWriter::Pointer writer = utils::MatrixWriter::New();
		writer->SetInput(dout);
		writer->SetFilename("data.hdf5");
		writer->Update();



		exit(1);
		*/

		


		/*
		// compute the affinity matrix between all of the features
		unsigned int numFeatures = allFeatures.size();
		double sum = 0.0;
		int count = 0;
		int max = 0;
		int maxId = 0;
		std::vector<int> counts;

		std::vector<Cluster> allGroups;

		// groupd all the features that have a similar location / scale / orientation
		for(unsigned int i = 0; i < numFeatures; i++)
		{
			Cluster cluster;
			cluster.featureIndex = i;
			cluster.feature = allFeatures[i];
			cluster.e = 0.0;
			cluster.members.push_back(allFeatures[i]);
			
			for(unsigned int j = 0; j < numFeatures; j++)
			{
				if(i == j) continue;
				if(AreSimilar(allFeatures[i], allFeatures[j],
							eLocation, eOrientation, eScale))
				{
					cluster.members.push_back(allFeatures[j]);
				}
			}
			
			counts.push_back(cluster.members.size());
			if(cluster.members.size() > max)
			{
				max = cluster.members.size();
				maxId = i;
			}

			allGroups.push_back(cluster);
			sum += cluster.members.size();
			std::sort(cluster.members.begin(), cluster.members.end(), member_sort);
			count++;
		}



		std::sort(counts.begin(), counts.end());
		for(unsigned int i = 0; i < counts.size(); i++)
		{
			std::cout << counts[i] << std::endl;
		}


		// compute the difference matrix
		utils::DoubleMatrixType diffMat;
		ComputeDifferenceMatrix(allFeatures, diffMat);

		// loop through the groups to form the clusters
		std::vector<Cluster> allClusters;
		for(unsigned int i = 0; i < allGroups.size(); i++)
		{
			Cluster cluster;
			CreateCluster(allGroups[i], allFeatures, diffMat, cluster);
			allClusters.push_back(cluster);
		}

		// remove duplicates
		std::vector<int> toRemove;
		for(unsigned int i = 0; i < allClusters.size(); i++)
		{
			bool duplicate = false;
			for(unsigned int j = i; j < allClusters.size(); j++)
			{
				if(i == j) continue;
				if(allClusters[i].members.size() != allClusters[j].members.size())
					continue;

				bool sameMembers = true;
				for(unsigned int k = 0; k < allClusters[i].members.size(); k++)
				{
					if(allClusters[i].members[k].index != allClusters[j].members[k].index)
					{
						sameMembers = false;				
						break;
					}
				}

				if(sameMembers)
				{
					duplicate = true;
				}

				if(duplicate) break;
			}
			if(duplicate) toRemove.push_back(i);

		}

		
		std::cout << allClusters.size() << std::endl;
		for(unsigned int i = 0; i < toRemove.size(); i++)
		{
	//		allClusters.erase(allGroups.begin()+(toRemove[i]-i));
		}
		std::cout << allClusters.size() << std::endl;

		// trim the clusters
		std::vector<Cluster> trimmedClusters;
		TrimClusters(allClusters, trimmedClusters);
		std::cout << trimmedClusters.size() << std::endl;

		std::vector<std::pair<int, ImageSlice::PointType> > testOut;


		for(unsigned int i = 0; i < trimmedClusters.size(); i++)
		{
			for(unsigned int j = 0; j < trimmedClusters[i].members.size(); j++)
			{
				std::pair<int, ImageSlice::PointType> p(i, trimmedClusters[i].members[j].location);
				testOut.push_back(p);				
			}			
			std::cout << trimmedClusters[i].members.size() << std::endl;
		}

		ImageSlice::Pointer extractedSlice = ImageSlice::New();
		ExtractSlice<ImageVolume, ImageSlice>(images[0], sliceToTest, extractedSlice);


		utils::DoubleMatrixType pOut = utils::DoubleMatrixType::Zero(testOut.size(), 2);
		utils::IntMatrixType iOut = utils::IntMatrixType::Zero(testOut.size(),1);
		for(unsigned int i = 0; i < testOut.size(); i++)
		{
			itk::ContinuousIndex<double, 2> contIndex;
			extractedSlice->TransformPhysicalPointToContinuousIndex(testOut[i].second, contIndex);

			pOut(i,0) = contIndex[0];			
			pOut(i,1) = contIndex[1];
			
			// compute the image indexes


			
	
			iOut(i,0) = testOut[i].first;			
		}


	


		utils::MatrixDataSet::Pointer dout = utils::MatrixDataSet::New();
		dout->AddData("locations", pOut);
		dout->AddData("index", iOut);
		utils::MatrixWriter::Pointer writer = utils::MatrixWriter::New();
		writer->SetInput(dout);
		writer->SetFilename("data.hdf5");
		writer->Update();
		exit(1);
		*/
	}

	return 0;
}
int main( int argc, char *argv[] )
{
if( argc < 4 )
{
std::cerr << "Missing Parameters " << std::endl;
std::cerr << "Usage: " << argv[0];
std::cerr << " fixedImageFile movingImageFile ";
std::cerr << " outputImagefile [differenceBeforeRegistration] ";
std::cerr << " [differenceAfterRegistration] ";
std::cerr << " [sliceBeforeRegistration] ";
std::cerr << " [sliceDifferenceBeforeRegistration] ";
std::cerr << " [sliceDifferenceAfterRegistration] ";
std::cerr << " [sliceAfterRegistration] " << std::endl;
return EXIT_FAILURE;
}
const unsigned int Dimension = 3;
typedef float PixelType;
typedef itk::Image< PixelType, Dimension > FixedImageType;
typedef itk::Image< PixelType, Dimension > MovingImageType;
// Software Guide : BeginLatex
//
// The Transform class is instantiated using the code below. The only
// template parameter to this class is the representation type of the
// space coordinates.
//
// \index{itk::Versor\-Rigid3D\-Transform!Instantiation}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet

// Software Guide : EndCodeSnippet


typedef itk:: LinearInterpolateImageFunction< MovingImageType, double > InterpolatorType;
typedef itk::ImageRegistrationMethod< FixedImageType, MovingImageType > RegistrationType;

MetricType::Pointer metric = MetricType::New();
OptimizerType::Pointer optimizer = OptimizerType::New();
InterpolatorType::Pointer interpolator = InterpolatorType::New();
RegistrationType::Pointer registration = RegistrationType::New();
registration->SetMetric( metric );
registration->SetOptimizer( optimizer );
registration->SetInterpolator( interpolator );
// Software Guide : BeginLatex
//
// The transform object is constructed below and passed to the registration
// method.
//
// \index{itk::Versor\-Rigid3D\-Transform!New()}
// \index{itk::Versor\-Rigid3D\-Transform!Pointer}
// \index{itk::Registration\-Method!SetTransform()}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
TransformType::Pointer transform = TransformType::New();
registration->SetTransform( transform );
// Software Guide : EndCodeSnippet
typedef itk::ImageFileReader< FixedImageType > FixedImageReaderType;
typedef itk::ImageFileReader< MovingImageType > MovingImageReaderType;
FixedImageReaderType::Pointer fixedImageReader = FixedImageReaderType::New();
MovingImageReaderType::Pointer movingImageReader = MovingImageReaderType::New();
fixedImageReader->SetFileName( argv[1] );
movingImageReader->SetFileName( argv[2] );
registration->SetFixedImage( fixedImageReader->GetOutput() );
registration->SetMovingImage( movingImageReader->GetOutput() );
fixedImageReader->Update();
registration->SetFixedImageRegion(
fixedImageReader->GetOutput()->GetBufferedRegion() );
// Software Guide : BeginLatex
//
// The input images are taken from readers. It is not necessary here to
// explicitly call \code{Update()} on the readers since the
// \doxygen{CenteredTransformInitializer} will do it as part of its
// computations. The following code instantiates the type of the
// initializer. This class is templated over the fixed and moving image type
// as well as the transform type. An initializer is then constructed by
// calling the \code{New()} method and assigning the result to a smart
// pointer.
//
// \index{itk::Centered\-Transform\-Initializer!Instantiation}
// \index{itk::Centered\-Transform\-Initializer!New()}
// \index{itk::Centered\-Transform\-Initializer!SmartPointer}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
// Software Guide : BeginLatex
//
// Let's execute this example over some of the images available in the ftp
// site
//
// \url{ftp://public.kitware.com/pub/itk/Data/BrainWeb}
//
// Note that the images in the ftp site are compressed in \code{.tgz} files.
// You should download these files an uncompress them in your local system.
// After decompressing and extracting the files you could take a pair of
// volumes, for example the pair:
//
// \begin{itemize}
// \item \code{brainweb1e1a10f20.mha}
// \item \code{brainweb1e1a10f20Rot10Tx15.mha}
// \end{itemize}
//
// The second image is the result of intentionally rotating the first image
// by $10$ degrees around the origin and shifting it $15mm$ in $X$. The
// registration takes $24$ iterations and produces:
//
// \begin{center}
// \begin{verbatim}
// [-6.03744e-05, 5.91487e-06, -0.0871932, 2.64659, -17.4637, -0.00232496]
// \end{verbatim}
// \end{center}
//
// That are interpreted as
//
// \begin{itemize}
// \item Versor = $(-6.03744e-05, 5.91487e-06, -0.0871932)$
// \item Translation = $(2.64659, -17.4637, -0.00232496)$ millimeters
// \end{itemize}
//
// This Versor is equivalent to a rotation of $9.98$ degrees around the $Z$
// axis.
//
// Note that the reported translation is not the translation of $(15.0,0.0,0.0)$
// that we may be naively expecting. The reason is that the
// \code{VersorRigid3DTransform} is applying the rotation around the center
// found by the \code{CenteredTransformInitializer} and then adding the
// translation vector shown above.
//
// It is more illustrative in this case to take a look at the actual
// rotation matrix and offset resulting form the $6$ parameters.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
transform->SetParameters( finalParameters );
TransformType::MatrixType matrix = transform->GetMatrix();
TransformType::OffsetType offset = transform->GetOffset();
std::cout << "Matrix = " << std::endl << matrix << std::endl;
std::cout << "Offset = " << std::endl << offset << std::endl;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// The output of this print statements is
//
// \begin{center}
// \begin{verbatim}
// Matrix =
// 0.984795 0.173722 2.23132e-05
// -0.173722 0.984795 0.000119257
// -1.25621e-06 -0.00012132 1
//
// Offset =
// [-15.0105, -0.00672343, 0.0110854]
// \end{verbatim}
// \end{center}
//
// From the rotation matrix it is possible to deduce that the rotation is
// happening in the X,Y plane and that the angle is on the order of
// $\arcsin{(0.173722)}$ which is very close to 10 degrees, as we expected.
//
// Software Guide : EndLatex
// Software Guide : BeginLatex
//
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceBorder20}
// \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceR10X13Y17}
// \itkcaption[CenteredTransformInitializer input images]{Fixed and moving image
// provided as input to the registration method using
// CenteredTransformInitializer.}
// \label{fig:FixedMovingImageRegistration8}
// \end{figure}
//
//
// \begin{figure}
// \center
// \includegraphics[width=0.32\textwidth]{ImageRegistration8Output}
// \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceBefore}
// \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceAfter}
// \itkcaption[CenteredTransformInitializer output images]{Resampled moving
// image (left). Differences between fixed and moving images, before (center)
// and after (right) registration with the
// CenteredTransformInitializer.}
// \label{fig:ImageRegistration8Outputs}
// \end{figure}
//
// Figure \ref{fig:ImageRegistration8Outputs} shows the output of the
// registration. The center image in this figure shows the differences
// between the fixed image and the resampled moving image before the
// registration. The image on the right side presents the difference between
// the fixed image and the resampled moving image after the registration has
// been performed. Note that these images are individual slices extracted
// from the actual volumes. For details, look at the source code of this
// example, where the ExtractImageFilter is used to extract a slice from the
// the center of each one of the volumes. One of the main purposes of this
// example is to illustrate that the toolkit can perform registration on
// images of any dimension. The only limitations are, as usual, the amount of
// memory available for the images and the amount of computation time that it
// will take to complete the optimization process.
//
// \begin{figure}
// \center
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceMetric}
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceAngle}
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceTranslations}
// \itkcaption[CenteredTransformInitializer output plots]{Plots of the metric,
// rotation angle, center of rotation and translations during the
// registration using CenteredTransformInitializer.}
// \label{fig:ImageRegistration8Plots}
// \end{figure}
//
// Figure \ref{fig:ImageRegistration8Plots} shows the plots of the main
// output parameters of the registration process. The metric values at every
// iteration. The Z component of the versor is plotted as an indication of
// how the rotation progress. The X,Y translation components of the
// registration are plotted at every iteration too.
//
// Shell and Gnuplot scripts for generating the diagrams in
// Figure~\ref{fig:ImageRegistration8Plots} are available in the directory
//
// \code{InsightDocuments/SoftwareGuide/Art}
//
// You are strongly encouraged to run the example code, since only in this
// way you can gain a first hand experience with the behavior of the
// registration process. Once again, this is a simple reflection of the
// philosophy that we put forward in this book:
//
// \emph{If you can not replicate it, then it does not exist!}.
//
// We have seen enough published papers with pretty pictures, presenting
// results that in practice are impossible to replicate. That is vanity, not
// science.
//
// Software Guide : EndLatex
typedef itk::ResampleImageFilter<
MovingImageType,
FixedImageType > ResampleFilterType;
TransformType::Pointer finalTransform = TransformType::New();
finalTransform->SetCenter( transform->GetCenter() );
finalTransform->SetParameters( finalParameters );
finalTransform->SetFixedParameters( transform->GetFixedParameters() );
ResampleFilterType::Pointer resampler = ResampleFilterType::New();
resampler->SetTransform( finalTransform );
resampler->SetInput( movingImageReader->GetOutput() );
FixedImageType::Pointer fixedImage = fixedImageReader->GetOutput();
resampler->SetSize( fixedImage->GetLargestPossibleRegion().GetSize() );
resampler->SetOutputOrigin( fixedImage->GetOrigin() );
resampler->SetOutputSpacing( fixedImage->GetSpacing() );
resampler->SetOutputDirection( fixedImage->GetDirection() );
resampler->SetDefaultPixelValue( 100 );
typedef unsigned char OutputPixelType;
typedef itk::Image< OutputPixelType, Dimension > OutputImageType;
typedef itk::CastImageFilter< FixedImageType, OutputImageType > CastFilterType;
typedef itk::ImageFileWriter< OutputImageType > WriterType;
WriterType::Pointer writer = WriterType::New();
CastFilterType::Pointer caster = CastFilterType::New();
writer->SetFileName( argv[3] );
caster->SetInput( resampler->GetOutput() );
writer->SetInput( caster->GetOutput() );
writer->Update();
typedef itk::SubtractImageFilter<
FixedImageType,
FixedImageType,
FixedImageType > DifferenceFilterType;
DifferenceFilterType::Pointer difference = DifferenceFilterType::New();
typedef itk::RescaleIntensityImageFilter<
FixedImageType,
OutputImageType > RescalerType;
RescalerType::Pointer intensityRescaler = RescalerType::New();
intensityRescaler->SetInput( difference->GetOutput() );
intensityRescaler->SetOutputMinimum( 0 );
intensityRescaler->SetOutputMaximum( 255 );
difference->SetInput1( fixedImageReader->GetOutput() );
difference->SetInput2( resampler->GetOutput() );
resampler->SetDefaultPixelValue( 1 );
WriterType::Pointer writer2 = WriterType::New();
writer2->SetInput( intensityRescaler->GetOutput() );
// Compute the difference image between the
// fixed and resampled moving image.
if( argc > 5 )
{
writer2->SetFileName( argv[5] );
writer2->Update();
}
typedef itk::IdentityTransform< double, Dimension > IdentityTransformType;
IdentityTransformType::Pointer identity = IdentityTransformType::New();
// Compute the difference image between the
// fixed and moving image before registration.
if( argc > 4 )
{
resampler->SetTransform( identity );
writer2->SetFileName( argv[4] );
writer2->Update();
}
//
// Here we extract slices from the input volume, and the difference volumes
// produced before and after the registration. These slices are presented as
// figures in the Software Guide.
//
//
typedef itk::Image< OutputPixelType, 2 > OutputSliceType;
typedef itk::ExtractImageFilter<
OutputImageType,
OutputSliceType > ExtractFilterType;
ExtractFilterType::Pointer extractor = ExtractFilterType::New();
extractor->SetDirectionCollapseToSubmatrix();
extractor->InPlaceOn();
FixedImageType::RegionType inputRegion =
fixedImage->GetLargestPossibleRegion();
FixedImageType::SizeType size = inputRegion.GetSize();
FixedImageType::IndexType start = inputRegion.GetIndex();
// Select one slice as output
size[2] = 0;
start[2] = 90;
FixedImageType::RegionType desiredRegion;
desiredRegion.SetSize( size );
desiredRegion.SetIndex( start );
extractor->SetExtractionRegion( desiredRegion );
typedef itk::ImageFileWriter< OutputSliceType > SliceWriterType;
SliceWriterType::Pointer sliceWriter = SliceWriterType::New();
sliceWriter->SetInput( extractor->GetOutput() );
if( argc > 6 )
{
extractor->SetInput( caster->GetOutput() );
resampler->SetTransform( identity );
sliceWriter->SetFileName( argv[6] );
sliceWriter->Update();
}
if( argc > 7 )
{
extractor->SetInput( intensityRescaler->GetOutput() );
resampler->SetTransform( identity );
sliceWriter->SetFileName( argv[7] );
sliceWriter->Update();
}
if( argc > 8 )
{
resampler->SetTransform( finalTransform );
sliceWriter->SetFileName( argv[8] );
sliceWriter->Update();
}
if( argc > 9 )
{
extractor->SetInput( caster->GetOutput() );
resampler->SetTransform( finalTransform );
sliceWriter->SetFileName( argv[9] );
sliceWriter->Update();
}
return EXIT_SUCCESS;
}
bool mitk::NavigationDataLandmarkTransformFilter::FindCorrespondentLandmarks(LandmarkPointContainer& sources, const LandmarkPointContainer& targets) const
{
  if (sources.size() < 6 || targets.size() < 6)
    return false;
  //throw std::invalid_argument("ICP correspondence finding needs at least 6 landmarks");

  /* lots of type definitions */
  typedef itk::PointSet<mitk::ScalarType, 3> PointSetType;
  //typedef itk::BoundingBox<PointSetType::PointIdentifier, PointSetType::PointDimension> BoundingBoxType;

  typedef itk::EuclideanDistancePointMetric< PointSetType, PointSetType> MetricType;
  //typedef MetricType::TransformType TransformBaseType;
  //typedef MetricType::TransformType::ParametersType ParametersType;
  //typedef TransformBaseType::JacobianType JacobianType;
  //typedef itk::Euler3DTransform< double > TransformType;
  typedef itk::VersorRigid3DTransform< double > TransformType;
  typedef TransformType ParametersType;
  typedef itk::PointSetToPointSetRegistrationMethod< PointSetType, PointSetType > RegistrationType;

  /* copy landmarks to itk pointsets for registration */
  PointSetType::Pointer sourcePointSet = PointSetType::New();
  unsigned int i = 0;
  for (LandmarkPointContainer::const_iterator it = sources.begin(); it != sources.end(); ++it)
  {
    PointSetType::PointType doublePoint;
    mitk::itk2vtk(*it, doublePoint); // copy mitk::ScalarType point into double point as workaround to ITK 3.10 bug
    sourcePointSet->SetPoint(i++, doublePoint /**it*/);
  }

  i = 0;
  PointSetType::Pointer targetPointSet = PointSetType::New();
  for (LandmarkPointContainer::const_iterator it = targets.begin(); it != targets.end(); ++it)
  {
    PointSetType::PointType doublePoint;
    mitk::itk2vtk(*it, doublePoint); // copy mitk::ScalarType point into double point as workaround to ITK 3.10 bug
    targetPointSet->SetPoint(i++, doublePoint /**it*/);
  }

  /* get centroid and extends of our pointsets */
  //BoundingBoxType::Pointer sourceBoundingBox = BoundingBoxType::New();
  //sourceBoundingBox->SetPoints(sourcePointSet->GetPoints());
  //sourceBoundingBox->ComputeBoundingBox();
  //BoundingBoxType::Pointer targetBoundingBox = BoundingBoxType::New();
  //targetBoundingBox->SetPoints(targetPointSet->GetPoints());
  //targetBoundingBox->ComputeBoundingBox();


  TransformType::Pointer transform = TransformType::New();
  transform->SetIdentity();
  //transform->SetTranslation(targetBoundingBox->GetCenter() - sourceBoundingBox->GetCenter());

  itk::LevenbergMarquardtOptimizer::Pointer optimizer = itk::LevenbergMarquardtOptimizer::New();
  optimizer->SetUseCostFunctionGradient(false);

  RegistrationType::Pointer registration = RegistrationType::New();

  // Scale the translation components of the Transform in the Optimizer
  itk::LevenbergMarquardtOptimizer::ScalesType scales(transform->GetNumberOfParameters());
  const double translationScale = 5000; //sqrtf(targetBoundingBox->GetDiagonalLength2())  * 1000; // dynamic range of translations
  const double rotationScale = 1.0; // dynamic range of rotations
  scales[0] = 1.0 / rotationScale;
  scales[1] = 1.0 / rotationScale;
  scales[2] = 1.0 / rotationScale;
  scales[3] = 1.0 / translationScale;
  scales[4] = 1.0 / translationScale;
  scales[5] = 1.0 / translationScale;
  //scales.Fill(0.01);
  unsigned long numberOfIterations = 80000;
  double gradientTolerance = 1e-10; // convergence criterion
  double valueTolerance = 1e-10; // convergence criterion
  double epsilonFunction = 1e-10; // convergence criterion
  optimizer->SetScales( scales );
  optimizer->SetNumberOfIterations( numberOfIterations );
  optimizer->SetValueTolerance( valueTolerance );
  optimizer->SetGradientTolerance( gradientTolerance );
  optimizer->SetEpsilonFunction( epsilonFunction );


  registration->SetInitialTransformParameters( transform->GetParameters() );
  //------------------------------------------------------
  // Connect all the components required for Registration
  //------------------------------------------------------
  MetricType::Pointer metric = MetricType::New();

  registration->SetMetric( metric );
  registration->SetOptimizer( optimizer );
  registration->SetTransform( transform );
  registration->SetFixedPointSet( targetPointSet );
  registration->SetMovingPointSet( sourcePointSet );

  try
  {
    //registration->StartRegistration();
    registration->Update();
  }
  catch( itk::ExceptionObject & e )
  {
    MITK_INFO << "Exception caught during ICP optimization: " << e;
    return false;
    //throw e;
  }
  MITK_INFO << "ICP successful: Solution = " << transform->GetParameters() << std::endl;
  MITK_INFO << "Metric value: " << metric->GetValue(transform->GetParameters());

  /* find point correspondences */
  //mitk::PointLocator::Pointer pointLocator = mitk::PointLocator::New();  // <<- use mitk::PointLocator instead of searching manually?
  //pointLocator->SetPoints()
  for (LandmarkPointContainer::const_iterator sourcesIt = sources.begin(); sourcesIt != sources.end(); ++sourcesIt)
  {
  }
  //MetricType::MeasureType closestDistances = metric->GetValue(transform->GetParameters());
  //unsigned int index = 0;
  LandmarkPointContainer sortedSources;
  for (LandmarkPointContainer::const_iterator targetsIt = targets.begin(); targetsIt != targets.end(); ++targetsIt)
  {
    double minDistance = itk::NumericTraits<double>::max();
    LandmarkPointContainer::iterator minDistanceIterator = sources.end();
    for (LandmarkPointContainer::iterator sourcesIt = sources.begin(); sourcesIt != sources.end(); ++sourcesIt)
    {
      TransformInitializerType::LandmarkPointType transformedSource = transform->TransformPoint(*sourcesIt);
      double dist = targetsIt->EuclideanDistanceTo(transformedSource);
      MITK_INFO << "target: " << *targetsIt << ", source: " << *sourcesIt << ", transformed source: " << transformedSource << ", dist: " << dist;
      if (dist < minDistance )
      {
        minDistanceIterator = sourcesIt;
        minDistance = dist;
      }
    }
    if (minDistanceIterator == sources.end())
      return false;
    MITK_INFO << "minimum distance point is: " << *minDistanceIterator << " (dist: " << targetsIt->EuclideanDistanceTo(transform->TransformPoint(*minDistanceIterator)) << ", minDist: " << minDistance << ")";
    sortedSources.push_back(*minDistanceIterator); // this point is assigned
    sources.erase(minDistanceIterator); // erase it from sources to avoid duplicate assigns
  }
  //for (LandmarkPointContainer::const_iterator sortedSourcesIt = sortedSources.begin(); targetsIt != sortedSources.end(); ++targetsIt)
  sources = sortedSources;
  return true;
}
int mitkPyramidImageRegistrationMethodTest( int argc, char* argv[] )
{
  if( argc < 4 )
  {
    MITK_ERROR << "Not enough input \n Usage: <TEST_NAME> fixed moving type [output_image [output_transform]]"
               << "\n \t fixed : the path to the fixed image \n"
               << " \t moving : path to the image to be registered"
               << " \t type : Affine or Rigid defining the type of the transformation"
               << " \t output_image : output file optional, (full) path, and optionally output_transform : also (full)path to file";
    return EXIT_FAILURE;
  }

  MITK_TEST_BEGIN("PyramidImageRegistrationMethodTest");

  mitk::Image::Pointer fixedImage = dynamic_cast<mitk::Image*>(mitk::IOUtil::Load( argv[1] )[0].GetPointer());
  mitk::Image::Pointer movingImage = dynamic_cast<mitk::Image*>(mitk::IOUtil::Load( argv[2] )[0].GetPointer());

  std::string type_flag( argv[3] );

  mitk::PyramidImageRegistrationMethod::Pointer registrationMethod = mitk::PyramidImageRegistrationMethod::New();
  registrationMethod->SetFixedImage( fixedImage );
  registrationMethod->SetMovingImage( movingImage );

  if( type_flag == "Rigid" )
  {
    registrationMethod->SetTransformToRigid();
  }
  else if( type_flag == "Affine" )
  {
    registrationMethod->SetTransformToAffine();
  }
  else
  {
    MITK_WARN << " No type specified, using 'Affine' .";
  }

  registrationMethod->Update();

  bool imageOutput = false;
  bool transformOutput = false;

  std::string image_out_filename, transform_out_filename;

  std::string first_output( argv[4] );
  // check for txt, otherwise suppose it is an image
  if( first_output.find(".txt") != std::string::npos )
  {
    transformOutput = true;
    transform_out_filename = first_output;
  }
  else
  {
    imageOutput = true;
    image_out_filename = first_output;
  }

  if( argc > 4 )
  {
    std::string second_output( argv[5] );
    if( second_output.find(".txt") != std::string::npos )
    {
      transformOutput = true;
      transform_out_filename = second_output;
    }
  }

  MITK_INFO << " Selected output: " << transform_out_filename  << " " << image_out_filename;

  try{

    unsigned int paramCount = registrationMethod->GetNumberOfParameters();
    double* params = new double[ paramCount ];
    registrationMethod->GetParameters( &params[0] );

    std::cout << "Parameters: ";
    for( unsigned int i=0; i< paramCount; i++)
    {
      std::cout << params[ i ] << " ";
    }
    std::cout << std::endl;

    if( imageOutput )
    {
      mitk::IOUtil::Save( registrationMethod->GetResampledMovingImage(), image_out_filename.c_str() );
    }


    if( transformOutput )
    {

      itk::TransformFileWriter::Pointer writer = itk::TransformFileWriter::New();

      // Get transform parameter for resampling / saving
      // Affine
      if( paramCount == 12 )
      {
        typedef itk::AffineTransform< double > TransformType;
        TransformType::Pointer transform = TransformType::New();

        TransformType::ParametersType affine_params( paramCount );
        registrationMethod->GetParameters( &affine_params[0] );

        transform->SetParameters( affine_params );
        writer->SetInput( transform );
      }
      // Rigid
      else
      {
        typedef itk::Euler3DTransform< double > RigidTransformType;
        RigidTransformType::Pointer rtransform = RigidTransformType::New();

        RigidTransformType::ParametersType rigid_params( paramCount );
        registrationMethod->GetParameters( &rigid_params[0] );

        rtransform->SetParameters( rigid_params );
        writer->SetInput( rtransform );
      }

      writer->SetFileName( transform_out_filename );
      writer->Update();
    }

  }
  catch( const std::exception &e)
  {
    MITK_ERROR << "Caught exception: " << e.what();
  }


  MITK_TEST_END();
}
Exemple #16
0
int main (int argc, char **argv)
{
  int verbose=0, clobber=0,skip_grid=0;
  double max=5.0;
  double extent=300;

  static struct option long_options[] = {
		{"verbose", no_argument,       &verbose, 1},
		{"quiet",   no_argument,       &verbose, 0},
		{"clobber", no_argument,       &clobber, 1},
		{"spacing", required_argument, 0, 's'},
		{"max",     required_argument, 0, 'm'},
		{"version", no_argument,       0, 'v'},
    {"extent", required_argument,  0, 'e'},
		{0, 0, 0, 0}
		};
  
  double spacing=4.0;
  for (;;) {
      /* getopt_long stores the option index here. */
      int option_index = 0;

      int c = getopt_long (argc, argv, "s:m:v", long_options, &option_index);

      /* Detect the end of the options. */
      if (c == -1) break;

      switch (c)
			{
			case 0:
				break;
			case 's':
				spacing=atof(optarg);
				break;
			case 'v':
				cout << "Version: 1.0" << endl;
				return 0;
      case 'm':
        max=atof(optarg); break;
      case 'e':
        extent=atof(optarg); break;
			case '?':
				/* getopt_long already printed an error message. */
			default:
				show_usage ();
				return 1;
			}
    }

	if ((argc - optind) < 2) {
		show_usage ();
		return 1;
	}
  std::string input=argv[optind];
  std::string output=argv[optind+1];
	try
  {
    gsl_rng_env_setup();
    
		typedef minc::SphericalHarmonicsTransform TransformType;
    
		TransformType::ParametersType finalParameters;
		load_parameters(input.c_str(),finalParameters);
		TransformType::Pointer finalTransform = TransformType::New();
    cout<<"Loaded parameters:"<<finalParameters<<endl;
		finalTransform->ImportParameters( finalParameters , true);
    
    cout<<"Imported!"<<endl;
		minc::def3d::Pointer grid(minc::def3d::New());
    
    allocate_image3d<minc::def3d>(grid, 
                      fixed_vec<3, unsigned int>(extent/spacing), 
                      fixed_vec<3, double>(spacing), 
                      fixed_vec<3, double>(-extent/2));
		
		if(verbose) 
		{
			cout<<"Generating a grid file, ";
			cout<<"extent: "<<extent<<" spacing: "<<spacing<<" ..."<<std::flush;
		}
		
    def3d_iterator it(grid,grid->GetLargestPossibleRegion());
		for(it.GoToBegin();!it.IsAtEnd();++it) 
    {
      tag_point p,p2;
      grid->TransformIndexToPhysicalPoint(it.GetIndex(),p);
			p2=finalTransform->TransformPointUnCached(p);
      def_vector moved;
			moved[0]=p2[0]-p[0];
			moved[1]=p2[1]-p[1];
			moved[2]=p2[2]-p[2];
      if(fabs(moved[0])>max || fabs(moved[1])>max ||fabs(moved[2])>max)
        moved[0]=moved[1]=moved[2]=0.0;
			
      it.Value()=moved;
    }
		if(verbose)
			cout<<"Done!"<<endl;
    save_minc<def3d>(output.c_str(), grid);
		
	} catch (const minc::generic_error & err) {
    cerr << "Got an error at:" << err.file () << ":" << err.line () << endl;
    return 1; 
  }
  catch( itk::ExceptionObject & err ) 
  { 
    std::cerr << "ExceptionObject caught !" << std::endl; 
    std::cerr << err << std::endl; 
    return 2;
  } 
	return 0;
	
}
    RealImage::Pointer bsplineRegistration(RealImage::Pointer srcImg, RealImage::Pointer dstImg) {

        const unsigned int SpaceDimension = ImageDimension;
        const unsigned int SplineOrder = 3;
        typedef double CoordinateRepType;

        typedef itk::BSplineTransform<CoordinateRepType, SpaceDimension, SplineOrder> TransformType;
        typedef itk::LBFGSOptimizer OptimizerType;
        typedef itk::MeanSquaresImageToImageMetric<ImageType, ImageType> MetricType;
        typedef itk::LinearInterpolateImageFunction<ImageType, double> InterpolatorType;
        typedef itk::ImageRegistrationMethod<ImageType, ImageType> RegistrationType;

        MetricType::Pointer         metric        = MetricType::New();
        OptimizerType::Pointer      optimizer     = OptimizerType::New();
        InterpolatorType::Pointer   interpolator  = InterpolatorType::New();
        RegistrationType::Pointer   registration  = RegistrationType::New();



        // The old registration framework has problems with multi-threading
        // For now, we set the number of threads to 1
//        registration->SetNumberOfThreads(1);
        registration->SetMetric(        metric        );
        registration->SetOptimizer(     optimizer     );
        registration->SetInterpolator(  interpolator  );

        TransformType::Pointer  transform = TransformType::New();
        registration->SetTransform( transform );

        // Setup the registration
        registration->SetFixedImage(  dstImg   );
        registration->SetMovingImage(   srcImg);

        ImageType::RegionType fixedRegion = srcImg->GetBufferedRegion();
        registration->SetFixedImageRegion( fixedRegion );

        //  Here we define the parameters of the BSplineDeformableTransform grid.  We
        //  arbitrarily decide to use a grid with $5 \times 5$ nodes within the image.
        //  The reader should note that the BSpline computation requires a
        //  finite support region ( 1 grid node at the lower borders and 2
        //  grid nodes at upper borders). Therefore in this example, we set
        //  the grid size to be $8 \times 8$ and place the grid origin such that
        //  grid node (1,1) coincides with the first pixel in the fixed image.

        TransformType::PhysicalDimensionsType   fixedPhysicalDimensions;
        TransformType::MeshSizeType             meshSize;
        for (unsigned int i=0; i < ImageDimension; i++) {
            fixedPhysicalDimensions[i] = dstImg->GetSpacing()[i] *
            static_cast<double>(dstImg->GetLargestPossibleRegion().GetSize()[i] - 1 );
            meshSize[i] = dstImg->GetLargestPossibleRegion().GetSize()[i] / 8 - SplineOrder;
        }
//        unsigned int numberOfGridNodesInOneDimension = 15;
//        meshSize.Fill( numberOfGridNodesInOneDimension - SplineOrder );
        transform->SetTransformDomainOrigin( dstImg->GetOrigin() );
        transform->SetTransformDomainPhysicalDimensions( fixedPhysicalDimensions );
        transform->SetTransformDomainMeshSize( meshSize );
        transform->SetTransformDomainDirection( dstImg->GetDirection() );

        typedef TransformType::ParametersType     ParametersType;

        const unsigned int numberOfParameters = transform->GetNumberOfParameters();

        ParametersType parameters( numberOfParameters );
        parameters.Fill( 0.0 );

        transform->SetParameters( parameters );

        //  We now pass the parameters of the current transform as the initial
        //  parameters to be used when the registration process starts.

        registration->SetInitialTransformParameters( transform->GetParameters() );

        std::cout << "Intial Parameters = " << std::endl;
        std::cout << transform->GetParameters() << std::endl;

        //  Next we set the parameters of the LBFGS Optimizer.
        optimizer->SetGradientConvergenceTolerance(0.1);
        optimizer->SetLineSearchAccuracy(0.09);
        optimizer->SetDefaultStepLength(.1);
        optimizer->TraceOn();
        optimizer->SetMaximumNumberOfFunctionEvaluations(1000);

        std::cout << std::endl << "Starting Registration" << std::endl;

        try {
            registration->Update();
            std::cout << "Optimizer stop condition = "
            << registration->GetOptimizer()->GetStopConditionDescription()
            << std::endl;
        } catch (itk::ExceptionObject & err) {
            std::cerr << "ExceptionObject caught !" << std::endl;
            std::cerr << err << std::endl;
            return RealImage::Pointer();
        }

        OptimizerType::ParametersType finalParameters =
        registration->GetLastTransformParameters();
        
        std::cout << "Last Transform Parameters" << std::endl;
        std::cout << finalParameters << std::endl;
        
        transform->SetParameters( finalParameters );
        
        typedef itk::ResampleImageFilter<ImageType, ImageType>    ResampleFilterType;
        
        ResampleFilterType::Pointer resample = ResampleFilterType::New();
        
        resample->SetTransform( transform );
        resample->SetInput( srcImg );
        
        resample->SetSize(    dstImg->GetLargestPossibleRegion().GetSize() );
        resample->SetOutputOrigin(  dstImg->GetOrigin() );
        resample->SetOutputSpacing( dstImg->GetSpacing() );
        resample->SetOutputDirection( dstImg->GetDirection() );
        resample->SetDefaultPixelValue( 100 );
        resample->Update();
        return resample->GetOutput();
    }
Exemple #18
0
	void SetTransformParameters(TransformType::Pointer inputTransform) {
    transform->SetParameters( inputTransform->GetParameters() );
    transform->SetFixedParameters( inputTransform->GetFixedParameters() );
    buildSlices();
    buildMaskSlices();
	}