void ImageScaleTransform::process()
    {
    foreach (const ElementBase *source, mSourceElementsReadySet)
        for (int i = 0; i < source->getFramesNo(); ++i)
            {
            const FrameBase *frame = source->getFrame(i);
            if (frame->getMaxDimension() == ColorImageFrame::Dimensions)
                {
                mImageFrame.setSourceName(frame->getSourceName());
                mSrcFrame.resizeAndCopyFrame(*frame);
                ColorImageFrame::ImageType::Pointer srcImg = mSrcFrame;

                typedef ScaleTransform<double, 2> TransformType;
                TransformType::Pointer scaleTransform = TransformType::New();
                FixedArray<float, 2> scale;
                scale[0] = property("widthScale").toDouble();
                scale[1] = property("heightScale").toDouble();
                scaleTransform->SetScale(scale);
                Point<float, 2> center;
                center[0] = srcImg->GetLargestPossibleRegion().GetSize()[0]/2;
                center[1] = srcImg->GetLargestPossibleRegion().GetSize()[1]/2;
                scaleTransform->SetCenter(center);

                typedef ResampleImageFilter<ColorImageFrame::ImageType, ColorImageFrame::ImageType> ResampleImageFilterType;
                ResampleImageFilterType::Pointer resampleFilter = ResampleImageFilterType::New();
                resampleFilter->SetTransform(scaleTransform);
                resampleFilter->SetInput(srcImg);
                resampleFilter->SetSize(srcImg->GetLargestPossibleRegion().GetSize());
                resampleFilter->Update();
                mImageFrame = resampleFilter->GetOutput();

                emit framesReady();
                break;
                }
            }
    }
int main( int argc, char *argv[] )
{
  string input_name;
  string output_dir;
  if (argc == 3) {
    input_name = argv[1];
    output_dir = argv[2];
  }

  const     unsigned int   Dimension = 3;
  const     unsigned int   OutDimension = 2;
  typedef short InputPixelType;
  typedef int FilterPixelType;
  typedef itk::Image< InputPixelType,  Dimension >   InputImageType;
  typedef itk::Image< FilterPixelType, Dimension >   FilterImageType;
  typedef itk::Image< FilterPixelType, OutDimension >   OutFilterImageType;

  InputImageType::Pointer image;
  itk::MetaDataDictionary dict;


  if (input_name.size() && output_dir.size()) 
    {
      if (boost::filesystem::is_regular_file( input_name )) {
	typedef itk::ImageFileReader< InputImageType >  ReaderType;
	ReaderType::Pointer reader = ReaderType::New();
	reader->SetFileName( input_name );
	try 
	  { 
	  reader->Update();
	  } 
	catch( itk::ExceptionObject & err ) 
	  { 
	  std::cerr << "ERROR: ExceptionObject caught !" << std::endl; 
	  std::cerr << err << std::endl; 
	  return EXIT_FAILURE;
	  } 
	image = reader->GetOutput();
	dict = reader->GetMetaDataDictionary();
      } else if (boost::filesystem::is_directory( input_name )) {
        itkBasic::SeriesReader sreader( input_name );
	sreader.readSeriesData( 2 );
	try 
	{
	    itkBasic::ReaderType::Pointer imageReader = itkBasic::ReaderType::New();
	    itkBasic::FileNamesContainer fc;
	    sreader.getSeriesFileNames(0, fc);
	    image = itkBasic::getDicomSerie( fc, imageReader, 1 ); 
	    dict = *((*imageReader->GetMetaDataDictionaryArray())[0]);
	}
	catch( itk::ExceptionObject & err ) 
	  { 
	  std::cerr << "ERROR: ExceptionObject caught !" << std::endl; 
	  std::cerr << err << std::endl; 
	  return EXIT_FAILURE;
	  } 
      }
    }
    
    if (!image) {
	std::cerr << argv[0] << ": input output" << std::endl;
	exit(1);
    }
  

  typedef itk::SigmoidImageFilter< InputImageType, FilterImageType > SigmoidCasterType;
  SigmoidCasterType::Pointer sigmoidcaster = SigmoidCasterType::New();
  
  sigmoidcaster->SetInput( image );
  sigmoidcaster->SetOutputMaximum( 4000 );
  sigmoidcaster->SetOutputMinimum( 1000 );

  
  typedef itk::AccumulateImageFilter< FilterImageType, FilterImageType > AccumulateFilter;
  AccumulateFilter::Pointer accumulator = AccumulateFilter::New();
  accumulator->SetAccumulateDimension(1);
  accumulator->SetInput( sigmoidcaster->GetOutput() );

  typedef itk::ExtractImageFilter< FilterImageType, OutFilterImageType > ExtractFilter;
  ExtractFilter::Pointer extractor = ExtractFilter::New();
  extractor->SetInput( accumulator->GetOutput() );
  FilterImageType::Pointer accuOut = accumulator->GetOutput();
  accuOut->UpdateOutputInformation();
  FilterImageType::RegionType extractRegion = accuOut->GetLargestPossibleRegion();
  
  extractRegion.SetSize(1,0);
  
  extractor->SetExtractionRegion( extractRegion );

  typedef itk::ResampleImageFilter<OutFilterImageType, OutFilterImageType > ResampleFilter;
  ResampleFilter::Pointer resampler = ResampleFilter::New();
  resampler->SetInput( extractor->GetOutput() );
  
  typedef itk::BSplineInterpolateImageFunction< OutFilterImageType > InterpolatorType;
  InterpolatorType::Pointer interpolator = InterpolatorType::New();
  interpolator->SetSplineOrder(3);
  
  resampler->SetInterpolator( interpolator );
  OutFilterImageType::Pointer exOut = extractor->GetOutput();
  exOut->UpdateOutputInformation();
  
  typedef itk::CenteredRigid2DTransform< double > TransformType;
  TransformType::Pointer transform = TransformType::New();
  transform->SetIdentity();
  OutFilterImageType::PointType exOutCenter = exOut->GetOrigin();
  exOutCenter[0] += (exOut->GetLargestPossibleRegion().GetSize()[0]-1) * exOut->GetSpacing()[0] *.5;
  exOutCenter[1] += (exOut->GetLargestPossibleRegion().GetSize()[1]-1) * exOut->GetSpacing()[1] *.5;
  transform->SetCenter( exOutCenter );
  transform->SetAngleInDegrees( 180 );
  resampler->SetTransform( transform );
  resampler->SetOutputParametersFromImage( exOut );

  OutFilterImageType::SpacingType resampleSpacing = exOut->GetSpacing();
  resampleSpacing.Fill( std::min( resampleSpacing[0], resampleSpacing[1] ) );
  OutFilterImageType::SizeType resampleSize;
  resampleSize[0] = exOut->GetLargestPossibleRegion().GetSize()[0] * exOut->GetSpacing()[0] / resampleSpacing[0];
  resampleSize[1] = exOut->GetLargestPossibleRegion().GetSize()[1] * exOut->GetSpacing()[1] / resampleSpacing[1];
  resampler->SetSize( resampleSize );
  resampler->SetOutputSpacing( resampleSpacing );
  
  OutFilterImageType::Pointer result = resampler->GetOutput();
  
  sigmoidcaster->SetBeta( -500 );
  sigmoidcaster->SetAlpha( 5 );
  result->Update();

  int outDicomIndex = 0;
  itk::EncapsulateMetaData( dict, "0008|0008", string("DERIVED\\SECONDARY\\AXIAL"));
  
  boost::filesystem::path outpath = output_dir;
  outpath = outpath / "IM%06d";
  
  std::vector< itk::MetaDataDictionary* > dictArray;
  dictArray.push_back(&dict);
  
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "lung" ), 1, 0); // Auto Level

  sigmoidcaster->SetBeta( 1000 );
  sigmoidcaster->SetAlpha( 300 );
  result->Update();
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "bone" ), 1, 0); // Auto Level
  
  sigmoidcaster->SetBeta( 0 );
  sigmoidcaster->SetAlpha( 2000 );
  result->Update();
  itkBasic::writeDicomSeries( itkBasic::ImageRescale(itkBasic::ImageSharp(result, 0.5), -1000, 4000), outpath.string(), &dictArray, outDicomIndex);
//  itkBasic::ImageSave( itkBasic::ImageSharp(result, 0.5), boost::str( boost::format("%s.%s.png") % output_name % "normal" ), 1, 0); // Auto Level
}
int main(int argc, char *argv[])
{
	std::string inputFilenamesFilename = argv[1];
	double keyPointIntensityThreshold = atof(argv[2]);
	double dogSplitsPerOctave = atof(argv[3]);
	double statingScale = atof(argv[4]);
	double eLocation = atof(argv[5]);
	double eScale = std::log(atof(argv[6]));
	double eOrientation = atof(argv[7]);
	double gammaValue = atof(argv[8]);

	std::string distanceMapFilenamesFilename = argv[9];
	double extractionDistanceThreshold = atof(argv[10]);




	// load up the set of aligned images
	FilenamesReader::FilenamesType inputFilenames = FilenamesReader::Read(inputFilenamesFilename);
	FilenamesReader::FilenamesType distanceMapFilenames   = FilenamesReader::Read(distanceMapFilenamesFilename);
	ImageVolumeList images;
	RealVolumeList distanceMaps;
	for(unsigned int i = 0; i < inputFilenames.size(); i++)
	{
		ImageVolume::Pointer image = ImageVolumeIO::Read(inputFilenames[i]);
		images.push_back(image);
		RealVolume::Pointer distMap = RealVolumeIO::Read(distanceMapFilenames[i]);
		distanceMaps.push_back(distMap);
	}


	unsigned int sliceToTest = 7;

	// for each slice we want to learn the features
	const unsigned int sliceNum = images.front()->GetLargestPossibleRegion().GetSize()[2];
	for(unsigned int slice = sliceToTest; slice < sliceNum; slice++)
	{

		// get the set of slices that have some image data in them
		ImageSliceList validImages;
		RealSliceList validDistanceMaps;
		
		for(unsigned int im = 0; im < images.size(); im++)
		{
			ImageSlice::Pointer extractedSlice = ImageSlice::New();
			RealSlice::Pointer distanceSlice = RealSlice::New();
			ExtractSlice<ImageVolume, ImageSlice>(images[im], slice, extractedSlice);
			ExtractSlice<RealVolume, RealSlice>(distanceMaps[im], slice, distanceSlice);

			if(ImageContainsData(extractedSlice))
			{
				validDistanceMaps.push_back(distanceSlice);
				validImages.push_back(extractedSlice);
			}
		}

		/*
		if(validImages.size() < 3)
			continue;
		*/

		std::cout << "Slice Num: " << slice << " Image Number: " << validImages.size() << std::endl;



		typedef itk::Vector<double, 2> VectorType;
		typedef itk::Image<VectorType, 2> GradientType;
		typedef filter::HistogramOfGradeintsFeatureExtractor<GradientType> FeatureBuilderType;
		typedef FeatureBuilderType::FeatureType HoGFeatureType;
		std::vector<HoGFeatureType> allFeatures;
		std::vector<HoGFeatureType> allFeatures1;
		std::vector<HoGFeatureType> allFeatures2;

		

		unsigned int featureCount = 0;
		for(unsigned int im = 0; im < validImages.size(); im++)
		{
			ImageSlice::Pointer extractedSlice = validImages[im];

			// first we extract all of the keypoints points
			typedef filter::DoGKeyPointExtractor<utils::ImageSlice> ExtractorType;
			ExtractorType::Pointer extractor = ExtractorType::New();
			extractor->SetInput(extractedSlice);
			extractor->SetKeypointThreshold(keyPointIntensityThreshold);
			extractor->SetSplitsPerOctave(dogSplitsPerOctave);
			extractor->SetStartingSigma(statingScale);
			extractor->SetDistanceMap(validDistanceMaps[im]);
			extractor->SetDistanceThreshold(extractionDistanceThreshold);
			extractor->Update();

			// orientate the feature points
			typedef filter::KeyPointOrientator<utils::ImageSlice> Orientator;
			Orientator::Pointer orientator  = Orientator::New();
			orientator->SetInput(extractedSlice);
			orientator->SetKeyPoints(extractor->GetOutput());
			orientator->SetHistogramBins(32);
			orientator->SetSigmaScale(2);
			orientator->SetSampleRadius(5);
			orientator->Update();

			Orientator::OrientatedKeyPointMap orientateKeyPoints = orientator->GetOutput();


			

			// now we go through the features and compute the HOG descriptors
			Orientator::OrientatedKeyPointMap::iterator keyPointIt = orientateKeyPoints.begin();
			std::cout << orientateKeyPoints.size() << std::endl;
			while(keyPointIt != orientateKeyPoints.end())
			{
				double sigma = keyPointIt->first;
				Orientator::OrientatedKeyPointList keyPoints = keyPointIt->second;

				// smooth the image to the sigma level
				typedef itk::DiscreteGaussianImageFilter<utils::ImageSlice, utils::RealSlice> Smoother;
				Smoother::Pointer smoother = Smoother::New();
				smoother->SetInput(extractedSlice);
				smoother->SetVariance(sigma*sigma);
				smoother->SetUseImageSpacingOn();

				typedef itk::GradientRecursiveGaussianImageFilter<RealSlice, GradientType> GradientFilterType;
				GradientFilterType::Pointer gradientFilter = GradientFilterType::New();
				gradientFilter->SetInput(smoother->GetOutput());
				//gradientFilter->SetSigma(sigma);
				gradientFilter->Update();



		
				


				std::cout << "Doing Sigma " << sigma << " Key Point Number: " << keyPoints.size() << std::endl;



				for(unsigned int fnum = 0; fnum < keyPoints.size(); fnum++)
				{
					Orientator::OrientatedKeyPoint keyPoint = keyPoints[fnum];

					// build the tranform
					typedef itk::CenteredRigid2DTransform<double> TransformType;
					TransformType::Pointer transform = TransformType::New();
					transform->SetCenter(keyPoint.location);
					transform->SetAngleInDegrees(360-keyPoint.degrees);

					// extract the patch from the gradient image
					typedef filter::ImagePatchExtractor<GradientType> PatchExtractorType;
					PatchExtractorType::Pointer patchExtractor = PatchExtractorType::New();
					patchExtractor->SetInput(gradientFilter->GetOutput());
					patchExtractor->SetTransform(transform);
					patchExtractor->SetScale(keyPoint.scale*2);

					PatchExtractorType::SizeType patchSize;
					patchSize.Fill(10);
					patchExtractor->SetPatchSize(patchSize);
					patchExtractor->SetInputPoint(keyPoint.location);
					patchExtractor->Update();


					/*
					// validate the keypoint
					typedef filter::StructureTensorKeyPointValidator<utils::ImageSlice> ValidatorType;
					ValidatorType::Pointer validator = ValidatorType::New();
					validator->SetInput(extractedSlice);	
					validator->SetKeyPoint(keyPoint);
					validator->SetRatio(validatorBeta);
					validator->Update();


					bool valid = validator->IsValid();
					*/


					// create the descriptor
					FeatureBuilderType::Pointer builder = FeatureBuilderType::New();
					builder->SetInput(patchExtractor->GetOutput());
					builder->SetOrientationBins(8);
					builder->SetKeyPoint(keyPoint);
					builder->Update();


					// add the feature to the list
					FeatureBuilderType::FeatureType feature = builder->GetOutput();
					feature.featureId = featureCount;
					allFeatures.push_back(feature);

					featureCount++;

				}



				
				keyPointIt++;
			}
		}

		




		std::cout << "Computing Distance Matrix" << std::endl;
		// compute the distance matrix
		typedef utils::DoubleMatrixType MatrixType;
		MatrixType distanceMatrix = MatrixType::Zero(allFeatures.size(), allFeatures.size());
		ComputeDifferenceMatrix(allFeatures, distanceMatrix);


		std::cout << "Grouping Features" << std::endl;
		// now we group the features by thier geometry
		typedef filter::FeaturePointGrouper<2> GrouperType;
		GrouperType::Pointer grouper = GrouperType::New();
		grouper->SetInput(allFeatures);
		grouper->SetAngleThreshold(eOrientation);
		grouper->SetLocationThreshold(eLocation);
		grouper->SetScaleThreshold(eScale);
		grouper->Update();





		std::cout << "Creating Clusters" << std::endl;
		GrouperType::FeatureGroupList clusters = grouper->GetOutput();
		std::sort(clusters.begin(), clusters.end());

		GrouperType::FeatureGroupList newClusters;

		for(unsigned int i = 0; i < clusters.size(); i++)
		{
			typedef filter::FeatureClusterLearner<2> ClusterLearnerType;
			ClusterLearnerType::Pointer learner = ClusterLearnerType::New();
			learner->SetInput(clusters[i]);
			learner->SetFeatures(allFeatures);
			learner->SetDistanceMatrix(distanceMatrix);
			learner->SetGamma(gammaValue);
			learner->Update();
			
			ClusterLearnerType::ClusterType newCluster = learner->GetOutput();
			newClusters.push_back(newCluster);



		}

		std::cout << "Culling Clusters" << std::endl;
		
		typedef filter::FeatureClusterCuller<2> Culler;
		Culler::Pointer culler = Culler::New();
		culler->SetInput(newClusters);
		culler->Update();


		Culler::ClusterList culledClusters = culler->GetOutput();
		std::sort(culledClusters.begin(), culledClusters.end());
		for(unsigned int i = 0; i < culledClusters.size(); i++)
		{
			typedef filter::ClusterDistributionGenerator<2> DistributionGenerator;
			DistributionGenerator::Pointer generator = DistributionGenerator::New();
			generator->SetInput(culledClusters[i]);
			generator->Update();

			exit(1);
		}



		/*


		ImageSlice::Pointer extractedSlice = ImageSlice::New();
		ExtractSlice<ImageVolume, ImageSlice>(images[0], sliceToTest, extractedSlice);

		std::vector<std::pair<int, ImageSlice::PointType> > testOut;
		for(unsigned int i = 0; i < culledClusters.size(); i++)
		{
			for(unsigned int j = 0; j < culledClusters[i].clusterMembers.size(); j++)
			{
			
				std::pair<int, ImageSlice::PointType> p(i, culledClusters[i].clusterMembers[j].keyPoint.location);
				testOut.push_back(p);				
			}			
		}




		utils::DoubleMatrixType pOut = utils::DoubleMatrixType::Zero(testOut.size(), 2);
		utils::IntMatrixType iOut = utils::IntMatrixType::Zero(testOut.size(),1);
		for(unsigned int i = 0; i < testOut.size(); i++)
		{
			itk::ContinuousIndex<double, 2> contIndex;
			extractedSlice->TransformPhysicalPointToContinuousIndex(testOut[i].second, contIndex);

			pOut(i,0) = contIndex[0];			
			pOut(i,1) = contIndex[1];

			iOut(i,0) = testOut[i].first;			
		}


	


		utils::MatrixDataSet::Pointer dout = utils::MatrixDataSet::New();
		dout->AddData("locations", pOut);
		dout->AddData("index", iOut);
		utils::MatrixWriter::Pointer writer = utils::MatrixWriter::New();
		writer->SetInput(dout);
		writer->SetFilename("data.hdf5");
		writer->Update();



		exit(1);
		*/

		


		/*
		// compute the affinity matrix between all of the features
		unsigned int numFeatures = allFeatures.size();
		double sum = 0.0;
		int count = 0;
		int max = 0;
		int maxId = 0;
		std::vector<int> counts;

		std::vector<Cluster> allGroups;

		// groupd all the features that have a similar location / scale / orientation
		for(unsigned int i = 0; i < numFeatures; i++)
		{
			Cluster cluster;
			cluster.featureIndex = i;
			cluster.feature = allFeatures[i];
			cluster.e = 0.0;
			cluster.members.push_back(allFeatures[i]);
			
			for(unsigned int j = 0; j < numFeatures; j++)
			{
				if(i == j) continue;
				if(AreSimilar(allFeatures[i], allFeatures[j],
							eLocation, eOrientation, eScale))
				{
					cluster.members.push_back(allFeatures[j]);
				}
			}
			
			counts.push_back(cluster.members.size());
			if(cluster.members.size() > max)
			{
				max = cluster.members.size();
				maxId = i;
			}

			allGroups.push_back(cluster);
			sum += cluster.members.size();
			std::sort(cluster.members.begin(), cluster.members.end(), member_sort);
			count++;
		}



		std::sort(counts.begin(), counts.end());
		for(unsigned int i = 0; i < counts.size(); i++)
		{
			std::cout << counts[i] << std::endl;
		}


		// compute the difference matrix
		utils::DoubleMatrixType diffMat;
		ComputeDifferenceMatrix(allFeatures, diffMat);

		// loop through the groups to form the clusters
		std::vector<Cluster> allClusters;
		for(unsigned int i = 0; i < allGroups.size(); i++)
		{
			Cluster cluster;
			CreateCluster(allGroups[i], allFeatures, diffMat, cluster);
			allClusters.push_back(cluster);
		}

		// remove duplicates
		std::vector<int> toRemove;
		for(unsigned int i = 0; i < allClusters.size(); i++)
		{
			bool duplicate = false;
			for(unsigned int j = i; j < allClusters.size(); j++)
			{
				if(i == j) continue;
				if(allClusters[i].members.size() != allClusters[j].members.size())
					continue;

				bool sameMembers = true;
				for(unsigned int k = 0; k < allClusters[i].members.size(); k++)
				{
					if(allClusters[i].members[k].index != allClusters[j].members[k].index)
					{
						sameMembers = false;				
						break;
					}
				}

				if(sameMembers)
				{
					duplicate = true;
				}

				if(duplicate) break;
			}
			if(duplicate) toRemove.push_back(i);

		}

		
		std::cout << allClusters.size() << std::endl;
		for(unsigned int i = 0; i < toRemove.size(); i++)
		{
	//		allClusters.erase(allGroups.begin()+(toRemove[i]-i));
		}
		std::cout << allClusters.size() << std::endl;

		// trim the clusters
		std::vector<Cluster> trimmedClusters;
		TrimClusters(allClusters, trimmedClusters);
		std::cout << trimmedClusters.size() << std::endl;

		std::vector<std::pair<int, ImageSlice::PointType> > testOut;


		for(unsigned int i = 0; i < trimmedClusters.size(); i++)
		{
			for(unsigned int j = 0; j < trimmedClusters[i].members.size(); j++)
			{
				std::pair<int, ImageSlice::PointType> p(i, trimmedClusters[i].members[j].location);
				testOut.push_back(p);				
			}			
			std::cout << trimmedClusters[i].members.size() << std::endl;
		}

		ImageSlice::Pointer extractedSlice = ImageSlice::New();
		ExtractSlice<ImageVolume, ImageSlice>(images[0], sliceToTest, extractedSlice);


		utils::DoubleMatrixType pOut = utils::DoubleMatrixType::Zero(testOut.size(), 2);
		utils::IntMatrixType iOut = utils::IntMatrixType::Zero(testOut.size(),1);
		for(unsigned int i = 0; i < testOut.size(); i++)
		{
			itk::ContinuousIndex<double, 2> contIndex;
			extractedSlice->TransformPhysicalPointToContinuousIndex(testOut[i].second, contIndex);

			pOut(i,0) = contIndex[0];			
			pOut(i,1) = contIndex[1];
			
			// compute the image indexes


			
	
			iOut(i,0) = testOut[i].first;			
		}


	


		utils::MatrixDataSet::Pointer dout = utils::MatrixDataSet::New();
		dout->AddData("locations", pOut);
		dout->AddData("index", iOut);
		utils::MatrixWriter::Pointer writer = utils::MatrixWriter::New();
		writer->SetInput(dout);
		writer->SetFilename("data.hdf5");
		writer->Update();
		exit(1);
		*/
	}

	return 0;
}
int main( int argc, char *argv[] )
{
if( argc < 4 )
{
std::cerr << "Missing Parameters " << std::endl;
std::cerr << "Usage: " << argv[0];
std::cerr << " fixedImageFile movingImageFile ";
std::cerr << " outputImagefile [differenceBeforeRegistration] ";
std::cerr << " [differenceAfterRegistration] ";
std::cerr << " [sliceBeforeRegistration] ";
std::cerr << " [sliceDifferenceBeforeRegistration] ";
std::cerr << " [sliceDifferenceAfterRegistration] ";
std::cerr << " [sliceAfterRegistration] " << std::endl;
return EXIT_FAILURE;
}
const unsigned int Dimension = 3;
typedef float PixelType;
typedef itk::Image< PixelType, Dimension > FixedImageType;
typedef itk::Image< PixelType, Dimension > MovingImageType;
// Software Guide : BeginLatex
//
// The Transform class is instantiated using the code below. The only
// template parameter to this class is the representation type of the
// space coordinates.
//
// \index{itk::Versor\-Rigid3D\-Transform!Instantiation}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet

// Software Guide : EndCodeSnippet


typedef itk:: LinearInterpolateImageFunction< MovingImageType, double > InterpolatorType;
typedef itk::ImageRegistrationMethod< FixedImageType, MovingImageType > RegistrationType;

MetricType::Pointer metric = MetricType::New();
OptimizerType::Pointer optimizer = OptimizerType::New();
InterpolatorType::Pointer interpolator = InterpolatorType::New();
RegistrationType::Pointer registration = RegistrationType::New();
registration->SetMetric( metric );
registration->SetOptimizer( optimizer );
registration->SetInterpolator( interpolator );
// Software Guide : BeginLatex
//
// The transform object is constructed below and passed to the registration
// method.
//
// \index{itk::Versor\-Rigid3D\-Transform!New()}
// \index{itk::Versor\-Rigid3D\-Transform!Pointer}
// \index{itk::Registration\-Method!SetTransform()}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
TransformType::Pointer transform = TransformType::New();
registration->SetTransform( transform );
// Software Guide : EndCodeSnippet
typedef itk::ImageFileReader< FixedImageType > FixedImageReaderType;
typedef itk::ImageFileReader< MovingImageType > MovingImageReaderType;
FixedImageReaderType::Pointer fixedImageReader = FixedImageReaderType::New();
MovingImageReaderType::Pointer movingImageReader = MovingImageReaderType::New();
fixedImageReader->SetFileName( argv[1] );
movingImageReader->SetFileName( argv[2] );
registration->SetFixedImage( fixedImageReader->GetOutput() );
registration->SetMovingImage( movingImageReader->GetOutput() );
fixedImageReader->Update();
registration->SetFixedImageRegion(
fixedImageReader->GetOutput()->GetBufferedRegion() );
// Software Guide : BeginLatex
//
// The input images are taken from readers. It is not necessary here to
// explicitly call \code{Update()} on the readers since the
// \doxygen{CenteredTransformInitializer} will do it as part of its
// computations. The following code instantiates the type of the
// initializer. This class is templated over the fixed and moving image type
// as well as the transform type. An initializer is then constructed by
// calling the \code{New()} method and assigning the result to a smart
// pointer.
//
// \index{itk::Centered\-Transform\-Initializer!Instantiation}
// \index{itk::Centered\-Transform\-Initializer!New()}
// \index{itk::Centered\-Transform\-Initializer!SmartPointer}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
// Software Guide : BeginLatex
//
// Let's execute this example over some of the images available in the ftp
// site
//
// \url{ftp://public.kitware.com/pub/itk/Data/BrainWeb}
//
// Note that the images in the ftp site are compressed in \code{.tgz} files.
// You should download these files an uncompress them in your local system.
// After decompressing and extracting the files you could take a pair of
// volumes, for example the pair:
//
// \begin{itemize}
// \item \code{brainweb1e1a10f20.mha}
// \item \code{brainweb1e1a10f20Rot10Tx15.mha}
// \end{itemize}
//
// The second image is the result of intentionally rotating the first image
// by $10$ degrees around the origin and shifting it $15mm$ in $X$. The
// registration takes $24$ iterations and produces:
//
// \begin{center}
// \begin{verbatim}
// [-6.03744e-05, 5.91487e-06, -0.0871932, 2.64659, -17.4637, -0.00232496]
// \end{verbatim}
// \end{center}
//
// That are interpreted as
//
// \begin{itemize}
// \item Versor = $(-6.03744e-05, 5.91487e-06, -0.0871932)$
// \item Translation = $(2.64659, -17.4637, -0.00232496)$ millimeters
// \end{itemize}
//
// This Versor is equivalent to a rotation of $9.98$ degrees around the $Z$
// axis.
//
// Note that the reported translation is not the translation of $(15.0,0.0,0.0)$
// that we may be naively expecting. The reason is that the
// \code{VersorRigid3DTransform} is applying the rotation around the center
// found by the \code{CenteredTransformInitializer} and then adding the
// translation vector shown above.
//
// It is more illustrative in this case to take a look at the actual
// rotation matrix and offset resulting form the $6$ parameters.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
transform->SetParameters( finalParameters );
TransformType::MatrixType matrix = transform->GetMatrix();
TransformType::OffsetType offset = transform->GetOffset();
std::cout << "Matrix = " << std::endl << matrix << std::endl;
std::cout << "Offset = " << std::endl << offset << std::endl;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// The output of this print statements is
//
// \begin{center}
// \begin{verbatim}
// Matrix =
// 0.984795 0.173722 2.23132e-05
// -0.173722 0.984795 0.000119257
// -1.25621e-06 -0.00012132 1
//
// Offset =
// [-15.0105, -0.00672343, 0.0110854]
// \end{verbatim}
// \end{center}
//
// From the rotation matrix it is possible to deduce that the rotation is
// happening in the X,Y plane and that the angle is on the order of
// $\arcsin{(0.173722)}$ which is very close to 10 degrees, as we expected.
//
// Software Guide : EndLatex
// Software Guide : BeginLatex
//
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceBorder20}
// \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceR10X13Y17}
// \itkcaption[CenteredTransformInitializer input images]{Fixed and moving image
// provided as input to the registration method using
// CenteredTransformInitializer.}
// \label{fig:FixedMovingImageRegistration8}
// \end{figure}
//
//
// \begin{figure}
// \center
// \includegraphics[width=0.32\textwidth]{ImageRegistration8Output}
// \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceBefore}
// \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceAfter}
// \itkcaption[CenteredTransformInitializer output images]{Resampled moving
// image (left). Differences between fixed and moving images, before (center)
// and after (right) registration with the
// CenteredTransformInitializer.}
// \label{fig:ImageRegistration8Outputs}
// \end{figure}
//
// Figure \ref{fig:ImageRegistration8Outputs} shows the output of the
// registration. The center image in this figure shows the differences
// between the fixed image and the resampled moving image before the
// registration. The image on the right side presents the difference between
// the fixed image and the resampled moving image after the registration has
// been performed. Note that these images are individual slices extracted
// from the actual volumes. For details, look at the source code of this
// example, where the ExtractImageFilter is used to extract a slice from the
// the center of each one of the volumes. One of the main purposes of this
// example is to illustrate that the toolkit can perform registration on
// images of any dimension. The only limitations are, as usual, the amount of
// memory available for the images and the amount of computation time that it
// will take to complete the optimization process.
//
// \begin{figure}
// \center
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceMetric}
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceAngle}
// \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceTranslations}
// \itkcaption[CenteredTransformInitializer output plots]{Plots of the metric,
// rotation angle, center of rotation and translations during the
// registration using CenteredTransformInitializer.}
// \label{fig:ImageRegistration8Plots}
// \end{figure}
//
// Figure \ref{fig:ImageRegistration8Plots} shows the plots of the main
// output parameters of the registration process. The metric values at every
// iteration. The Z component of the versor is plotted as an indication of
// how the rotation progress. The X,Y translation components of the
// registration are plotted at every iteration too.
//
// Shell and Gnuplot scripts for generating the diagrams in
// Figure~\ref{fig:ImageRegistration8Plots} are available in the directory
//
// \code{InsightDocuments/SoftwareGuide/Art}
//
// You are strongly encouraged to run the example code, since only in this
// way you can gain a first hand experience with the behavior of the
// registration process. Once again, this is a simple reflection of the
// philosophy that we put forward in this book:
//
// \emph{If you can not replicate it, then it does not exist!}.
//
// We have seen enough published papers with pretty pictures, presenting
// results that in practice are impossible to replicate. That is vanity, not
// science.
//
// Software Guide : EndLatex
typedef itk::ResampleImageFilter<
MovingImageType,
FixedImageType > ResampleFilterType;
TransformType::Pointer finalTransform = TransformType::New();
finalTransform->SetCenter( transform->GetCenter() );
finalTransform->SetParameters( finalParameters );
finalTransform->SetFixedParameters( transform->GetFixedParameters() );
ResampleFilterType::Pointer resampler = ResampleFilterType::New();
resampler->SetTransform( finalTransform );
resampler->SetInput( movingImageReader->GetOutput() );
FixedImageType::Pointer fixedImage = fixedImageReader->GetOutput();
resampler->SetSize( fixedImage->GetLargestPossibleRegion().GetSize() );
resampler->SetOutputOrigin( fixedImage->GetOrigin() );
resampler->SetOutputSpacing( fixedImage->GetSpacing() );
resampler->SetOutputDirection( fixedImage->GetDirection() );
resampler->SetDefaultPixelValue( 100 );
typedef unsigned char OutputPixelType;
typedef itk::Image< OutputPixelType, Dimension > OutputImageType;
typedef itk::CastImageFilter< FixedImageType, OutputImageType > CastFilterType;
typedef itk::ImageFileWriter< OutputImageType > WriterType;
WriterType::Pointer writer = WriterType::New();
CastFilterType::Pointer caster = CastFilterType::New();
writer->SetFileName( argv[3] );
caster->SetInput( resampler->GetOutput() );
writer->SetInput( caster->GetOutput() );
writer->Update();
typedef itk::SubtractImageFilter<
FixedImageType,
FixedImageType,
FixedImageType > DifferenceFilterType;
DifferenceFilterType::Pointer difference = DifferenceFilterType::New();
typedef itk::RescaleIntensityImageFilter<
FixedImageType,
OutputImageType > RescalerType;
RescalerType::Pointer intensityRescaler = RescalerType::New();
intensityRescaler->SetInput( difference->GetOutput() );
intensityRescaler->SetOutputMinimum( 0 );
intensityRescaler->SetOutputMaximum( 255 );
difference->SetInput1( fixedImageReader->GetOutput() );
difference->SetInput2( resampler->GetOutput() );
resampler->SetDefaultPixelValue( 1 );
WriterType::Pointer writer2 = WriterType::New();
writer2->SetInput( intensityRescaler->GetOutput() );
// Compute the difference image between the
// fixed and resampled moving image.
if( argc > 5 )
{
writer2->SetFileName( argv[5] );
writer2->Update();
}
typedef itk::IdentityTransform< double, Dimension > IdentityTransformType;
IdentityTransformType::Pointer identity = IdentityTransformType::New();
// Compute the difference image between the
// fixed and moving image before registration.
if( argc > 4 )
{
resampler->SetTransform( identity );
writer2->SetFileName( argv[4] );
writer2->Update();
}
//
// Here we extract slices from the input volume, and the difference volumes
// produced before and after the registration. These slices are presented as
// figures in the Software Guide.
//
//
typedef itk::Image< OutputPixelType, 2 > OutputSliceType;
typedef itk::ExtractImageFilter<
OutputImageType,
OutputSliceType > ExtractFilterType;
ExtractFilterType::Pointer extractor = ExtractFilterType::New();
extractor->SetDirectionCollapseToSubmatrix();
extractor->InPlaceOn();
FixedImageType::RegionType inputRegion =
fixedImage->GetLargestPossibleRegion();
FixedImageType::SizeType size = inputRegion.GetSize();
FixedImageType::IndexType start = inputRegion.GetIndex();
// Select one slice as output
size[2] = 0;
start[2] = 90;
FixedImageType::RegionType desiredRegion;
desiredRegion.SetSize( size );
desiredRegion.SetIndex( start );
extractor->SetExtractionRegion( desiredRegion );
typedef itk::ImageFileWriter< OutputSliceType > SliceWriterType;
SliceWriterType::Pointer sliceWriter = SliceWriterType::New();
sliceWriter->SetInput( extractor->GetOutput() );
if( argc > 6 )
{
extractor->SetInput( caster->GetOutput() );
resampler->SetTransform( identity );
sliceWriter->SetFileName( argv[6] );
sliceWriter->Update();
}
if( argc > 7 )
{
extractor->SetInput( intensityRescaler->GetOutput() );
resampler->SetTransform( identity );
sliceWriter->SetFileName( argv[7] );
sliceWriter->Update();
}
if( argc > 8 )
{
resampler->SetTransform( finalTransform );
sliceWriter->SetFileName( argv[8] );
sliceWriter->Update();
}
if( argc > 9 )
{
extractor->SetInput( caster->GetOutput() );
resampler->SetTransform( finalTransform );
sliceWriter->SetFileName( argv[9] );
sliceWriter->Update();
}
return EXIT_SUCCESS;
}