int main( int argc, char *argv[] ) { if( argc < 4 ) { std::cerr << "Missing Parameters " << std::endl; std::cerr << "Usage: " << argv[0]; std::cerr << " fixedImageFile movingImageFile "; std::cerr << " outputImagefile [differenceBeforeRegistration] "; std::cerr << " [differenceAfterRegistration] "; std::cerr << " [sliceBeforeRegistration] "; std::cerr << " [sliceDifferenceBeforeRegistration] "; std::cerr << " [sliceDifferenceAfterRegistration] "; std::cerr << " [sliceAfterRegistration] " << std::endl; return EXIT_FAILURE; } const unsigned int Dimension = 3; typedef float PixelType; typedef itk::Image< PixelType, Dimension > FixedImageType; typedef itk::Image< PixelType, Dimension > MovingImageType; // Software Guide : BeginLatex // // The Transform class is instantiated using the code below. The only // template parameter to this class is the representation type of the // space coordinates. // // \index{itk::Versor\-Rigid3D\-Transform!Instantiation} // // Software Guide : EndLatex // Software Guide : BeginCodeSnippet // Software Guide : EndCodeSnippet typedef itk:: LinearInterpolateImageFunction< MovingImageType, double > InterpolatorType; typedef itk::ImageRegistrationMethod< FixedImageType, MovingImageType > RegistrationType; MetricType::Pointer metric = MetricType::New(); OptimizerType::Pointer optimizer = OptimizerType::New(); InterpolatorType::Pointer interpolator = InterpolatorType::New(); RegistrationType::Pointer registration = RegistrationType::New(); registration->SetMetric( metric ); registration->SetOptimizer( optimizer ); registration->SetInterpolator( interpolator ); // Software Guide : BeginLatex // // The transform object is constructed below and passed to the registration // method. // // \index{itk::Versor\-Rigid3D\-Transform!New()} // \index{itk::Versor\-Rigid3D\-Transform!Pointer} // \index{itk::Registration\-Method!SetTransform()} // // Software Guide : EndLatex // Software Guide : BeginCodeSnippet TransformType::Pointer transform = TransformType::New(); registration->SetTransform( transform ); // Software Guide : EndCodeSnippet typedef itk::ImageFileReader< FixedImageType > FixedImageReaderType; typedef itk::ImageFileReader< MovingImageType > MovingImageReaderType; FixedImageReaderType::Pointer fixedImageReader = FixedImageReaderType::New(); MovingImageReaderType::Pointer movingImageReader = MovingImageReaderType::New(); fixedImageReader->SetFileName( argv[1] ); movingImageReader->SetFileName( argv[2] ); registration->SetFixedImage( fixedImageReader->GetOutput() ); registration->SetMovingImage( movingImageReader->GetOutput() ); fixedImageReader->Update(); registration->SetFixedImageRegion( fixedImageReader->GetOutput()->GetBufferedRegion() ); // Software Guide : BeginLatex // // The input images are taken from readers. It is not necessary here to // explicitly call \code{Update()} on the readers since the // \doxygen{CenteredTransformInitializer} will do it as part of its // computations. The following code instantiates the type of the // initializer. This class is templated over the fixed and moving image type // as well as the transform type. An initializer is then constructed by // calling the \code{New()} method and assigning the result to a smart // pointer. // // \index{itk::Centered\-Transform\-Initializer!Instantiation} // \index{itk::Centered\-Transform\-Initializer!New()} // \index{itk::Centered\-Transform\-Initializer!SmartPointer} // // Software Guide : EndLatex // Software Guide : BeginCodeSnippet // Software Guide : BeginLatex // // Let's execute this example over some of the images available in the ftp // site // // \url{ftp://public.kitware.com/pub/itk/Data/BrainWeb} // // Note that the images in the ftp site are compressed in \code{.tgz} files. // You should download these files an uncompress them in your local system. // After decompressing and extracting the files you could take a pair of // volumes, for example the pair: // // \begin{itemize} // \item \code{brainweb1e1a10f20.mha} // \item \code{brainweb1e1a10f20Rot10Tx15.mha} // \end{itemize} // // The second image is the result of intentionally rotating the first image // by $10$ degrees around the origin and shifting it $15mm$ in $X$. The // registration takes $24$ iterations and produces: // // \begin{center} // \begin{verbatim} // [-6.03744e-05, 5.91487e-06, -0.0871932, 2.64659, -17.4637, -0.00232496] // \end{verbatim} // \end{center} // // That are interpreted as // // \begin{itemize} // \item Versor = $(-6.03744e-05, 5.91487e-06, -0.0871932)$ // \item Translation = $(2.64659, -17.4637, -0.00232496)$ millimeters // \end{itemize} // // This Versor is equivalent to a rotation of $9.98$ degrees around the $Z$ // axis. // // Note that the reported translation is not the translation of $(15.0,0.0,0.0)$ // that we may be naively expecting. The reason is that the // \code{VersorRigid3DTransform} is applying the rotation around the center // found by the \code{CenteredTransformInitializer} and then adding the // translation vector shown above. // // It is more illustrative in this case to take a look at the actual // rotation matrix and offset resulting form the $6$ parameters. // // Software Guide : EndLatex // Software Guide : BeginCodeSnippet transform->SetParameters( finalParameters ); TransformType::MatrixType matrix = transform->GetMatrix(); TransformType::OffsetType offset = transform->GetOffset(); std::cout << "Matrix = " << std::endl << matrix << std::endl; std::cout << "Offset = " << std::endl << offset << std::endl; // Software Guide : EndCodeSnippet // Software Guide : BeginLatex // // The output of this print statements is // // \begin{center} // \begin{verbatim} // Matrix = // 0.984795 0.173722 2.23132e-05 // -0.173722 0.984795 0.000119257 // -1.25621e-06 -0.00012132 1 // // Offset = // [-15.0105, -0.00672343, 0.0110854] // \end{verbatim} // \end{center} // // From the rotation matrix it is possible to deduce that the rotation is // happening in the X,Y plane and that the angle is on the order of // $\arcsin{(0.173722)}$ which is very close to 10 degrees, as we expected. // // Software Guide : EndLatex // Software Guide : BeginLatex // // \begin{figure} // \center // \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceBorder20} // \includegraphics[width=0.44\textwidth]{BrainProtonDensitySliceR10X13Y17} // \itkcaption[CenteredTransformInitializer input images]{Fixed and moving image // provided as input to the registration method using // CenteredTransformInitializer.} // \label{fig:FixedMovingImageRegistration8} // \end{figure} // // // \begin{figure} // \center // \includegraphics[width=0.32\textwidth]{ImageRegistration8Output} // \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceBefore} // \includegraphics[width=0.32\textwidth]{ImageRegistration8DifferenceAfter} // \itkcaption[CenteredTransformInitializer output images]{Resampled moving // image (left). Differences between fixed and moving images, before (center) // and after (right) registration with the // CenteredTransformInitializer.} // \label{fig:ImageRegistration8Outputs} // \end{figure} // // Figure \ref{fig:ImageRegistration8Outputs} shows the output of the // registration. The center image in this figure shows the differences // between the fixed image and the resampled moving image before the // registration. The image on the right side presents the difference between // the fixed image and the resampled moving image after the registration has // been performed. Note that these images are individual slices extracted // from the actual volumes. For details, look at the source code of this // example, where the ExtractImageFilter is used to extract a slice from the // the center of each one of the volumes. One of the main purposes of this // example is to illustrate that the toolkit can perform registration on // images of any dimension. The only limitations are, as usual, the amount of // memory available for the images and the amount of computation time that it // will take to complete the optimization process. // // \begin{figure} // \center // \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceMetric} // \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceAngle} // \includegraphics[height=0.32\textwidth]{ImageRegistration8TraceTranslations} // \itkcaption[CenteredTransformInitializer output plots]{Plots of the metric, // rotation angle, center of rotation and translations during the // registration using CenteredTransformInitializer.} // \label{fig:ImageRegistration8Plots} // \end{figure} // // Figure \ref{fig:ImageRegistration8Plots} shows the plots of the main // output parameters of the registration process. The metric values at every // iteration. The Z component of the versor is plotted as an indication of // how the rotation progress. The X,Y translation components of the // registration are plotted at every iteration too. // // Shell and Gnuplot scripts for generating the diagrams in // Figure~\ref{fig:ImageRegistration8Plots} are available in the directory // // \code{InsightDocuments/SoftwareGuide/Art} // // You are strongly encouraged to run the example code, since only in this // way you can gain a first hand experience with the behavior of the // registration process. Once again, this is a simple reflection of the // philosophy that we put forward in this book: // // \emph{If you can not replicate it, then it does not exist!}. // // We have seen enough published papers with pretty pictures, presenting // results that in practice are impossible to replicate. That is vanity, not // science. // // Software Guide : EndLatex typedef itk::ResampleImageFilter< MovingImageType, FixedImageType > ResampleFilterType; TransformType::Pointer finalTransform = TransformType::New(); finalTransform->SetCenter( transform->GetCenter() ); finalTransform->SetParameters( finalParameters ); finalTransform->SetFixedParameters( transform->GetFixedParameters() ); ResampleFilterType::Pointer resampler = ResampleFilterType::New(); resampler->SetTransform( finalTransform ); resampler->SetInput( movingImageReader->GetOutput() ); FixedImageType::Pointer fixedImage = fixedImageReader->GetOutput(); resampler->SetSize( fixedImage->GetLargestPossibleRegion().GetSize() ); resampler->SetOutputOrigin( fixedImage->GetOrigin() ); resampler->SetOutputSpacing( fixedImage->GetSpacing() ); resampler->SetOutputDirection( fixedImage->GetDirection() ); resampler->SetDefaultPixelValue( 100 ); typedef unsigned char OutputPixelType; typedef itk::Image< OutputPixelType, Dimension > OutputImageType; typedef itk::CastImageFilter< FixedImageType, OutputImageType > CastFilterType; typedef itk::ImageFileWriter< OutputImageType > WriterType; WriterType::Pointer writer = WriterType::New(); CastFilterType::Pointer caster = CastFilterType::New(); writer->SetFileName( argv[3] ); caster->SetInput( resampler->GetOutput() ); writer->SetInput( caster->GetOutput() ); writer->Update(); typedef itk::SubtractImageFilter< FixedImageType, FixedImageType, FixedImageType > DifferenceFilterType; DifferenceFilterType::Pointer difference = DifferenceFilterType::New(); typedef itk::RescaleIntensityImageFilter< FixedImageType, OutputImageType > RescalerType; RescalerType::Pointer intensityRescaler = RescalerType::New(); intensityRescaler->SetInput( difference->GetOutput() ); intensityRescaler->SetOutputMinimum( 0 ); intensityRescaler->SetOutputMaximum( 255 ); difference->SetInput1( fixedImageReader->GetOutput() ); difference->SetInput2( resampler->GetOutput() ); resampler->SetDefaultPixelValue( 1 ); WriterType::Pointer writer2 = WriterType::New(); writer2->SetInput( intensityRescaler->GetOutput() ); // Compute the difference image between the // fixed and resampled moving image. if( argc > 5 ) { writer2->SetFileName( argv[5] ); writer2->Update(); } typedef itk::IdentityTransform< double, Dimension > IdentityTransformType; IdentityTransformType::Pointer identity = IdentityTransformType::New(); // Compute the difference image between the // fixed and moving image before registration. if( argc > 4 ) { resampler->SetTransform( identity ); writer2->SetFileName( argv[4] ); writer2->Update(); } // // Here we extract slices from the input volume, and the difference volumes // produced before and after the registration. These slices are presented as // figures in the Software Guide. // // typedef itk::Image< OutputPixelType, 2 > OutputSliceType; typedef itk::ExtractImageFilter< OutputImageType, OutputSliceType > ExtractFilterType; ExtractFilterType::Pointer extractor = ExtractFilterType::New(); extractor->SetDirectionCollapseToSubmatrix(); extractor->InPlaceOn(); FixedImageType::RegionType inputRegion = fixedImage->GetLargestPossibleRegion(); FixedImageType::SizeType size = inputRegion.GetSize(); FixedImageType::IndexType start = inputRegion.GetIndex(); // Select one slice as output size[2] = 0; start[2] = 90; FixedImageType::RegionType desiredRegion; desiredRegion.SetSize( size ); desiredRegion.SetIndex( start ); extractor->SetExtractionRegion( desiredRegion ); typedef itk::ImageFileWriter< OutputSliceType > SliceWriterType; SliceWriterType::Pointer sliceWriter = SliceWriterType::New(); sliceWriter->SetInput( extractor->GetOutput() ); if( argc > 6 ) { extractor->SetInput( caster->GetOutput() ); resampler->SetTransform( identity ); sliceWriter->SetFileName( argv[6] ); sliceWriter->Update(); } if( argc > 7 ) { extractor->SetInput( intensityRescaler->GetOutput() ); resampler->SetTransform( identity ); sliceWriter->SetFileName( argv[7] ); sliceWriter->Update(); } if( argc > 8 ) { resampler->SetTransform( finalTransform ); sliceWriter->SetFileName( argv[8] ); sliceWriter->Update(); } if( argc > 9 ) { extractor->SetInput( caster->GetOutput() ); resampler->SetTransform( finalTransform ); sliceWriter->SetFileName( argv[9] ); sliceWriter->Update(); } return EXIT_SUCCESS; }
void Clipper::Clip(Lazarus::image_3d_opencv_8uc1 *input, Lazarus::image_3d_opencv_8uc1 *output) { CHECK(UniversalAlgorithm::Check3DImage(input)); CHECK(UniversalAlgorithm::isBinary(input)); image_3d_opencv_8uc1* maskConnectivityFix = new image_3d_opencv_8uc1; maskConnectivityFix->Initialize(); itk_3d_double::Pointer inITK = itk_3d_double::New(); Converter::ocv_2_itk_3d(input, inITK); typedef itk::RescaleIntensityImageFilter<itk_3d_double> RescalerType; RescalerType::Pointer rescaler = RescalerType::New(); rescaler->SetInput(inITK); rescaler->SetOutputMinimum(0); rescaler->SetOutputMaximum(255); rescaler->Update(); typedef itk::ImageToVTKImageFilter<itk_3d_double> converterType; converterType::Pointer converter = converterType::New(); converter->SetInput(rescaler->GetOutput()); converter->Update(); Pool::image=converter->GetOutput(); int dim[3]; Pool::image->GetDimensions(dim); Pool::pointSeries.clear(); Pool::IdSeries.clear(); for(int k=0; k<dim[2]; k++) { for(int j=0; j<dim[1]; j++) { for(int i=0; i<dim[0]; i++) { double * voxel = static_cast<double*> (Pool::image->GetScalarPointer(i, j, k) ); if( *voxel > 0) { double p[3]; Pool::image->GetPoint(k*dim[1]*dim[0]+j*dim[0]+i,p); Point3d pp; { pp.x = p[0]; pp.y = p[1]; pp.z = p[2]; } Point3i ppp; { ppp.x = i; ppp.y = j; ppp.z = k; } Pool::pointSeries.push_back(pp); Pool::IdSeries.push_back(ppp); } } } } // // vtkSmartPointer<vtkMarchingCubes> iso = vtkSmartPointer<vtkMarchingCubes>::New(); // iso->SetInputData(Pool::image); // iso->SetNumberOfContours(1); // iso->SetValue(0,1); // iso->ComputeGradientsOn(); // iso->ComputeNormalsOn(); // iso->ComputeNormalsOn(); // iso->ComputeScalarsOff(); // iso->Update(); // // vtkSmartPointer<vtkQuadricClustering> decimate =vtkSmartPointer<vtkQuadricClustering>::New(); // decimate->SetNumberOfXDivisions(200); // decimate->SetNumberOfYDivisions(200); // decimate->SetNumberOfZDivisions(200); // decimate->SetInputData(iso->GetOutput()); // decimate->Update(); // // vtkSmartPointer<vtkPolyDataMapper> dataMapper = vtkSmartPointer<vtkPolyDataMapper>::New(); // dataMapper->SetInputData(decimate->GetOutput()); // dataMapper->Update(); //Pool::cubeActor->SetMapper(dataMapper); ClipperVisualizer vis; Pool::currentMask = new image_3d_opencv_8uc1; Pool::currentMask->Initialize(); Pool::currentMask->SetData(input); Pool::renderWindow->AddRenderer(Pool::renderer); Pool::renderWindow->SetSize(600,600); Pool::interactor->SetRenderWindow(Pool::renderWindow); Pool::camera->GetParallelProjection(); Pool::renderer->ResetCamera(); vtkSmartPointer<myInteractorStyle> myStyle = vtkSmartPointer<myInteractorStyle>::New(); myStyle->SetDefaultRenderer(Pool::renderer); Pool::interactor->SetInteractorStyle( myStyle ); myStyle->SetWrappingQtWindow(&vis); myStyle->FirstDraw(); //Pool::renderer->AddActor(Pool::cubeActor); //QvtkWindow* wnd = new QvtkWindow(NULL); //wnd->AddRenderWindow(Pool::renderWindow); Pool::interactor->Initialize(); Pool::renderWindow->Render(); //Pool::interactor->Start(); //wnd->exec(); vis.exec(); LOG(INFO)<<"rendering terminate."<<endl; output->SetData(Pool::currentMask); //delete maskConnectivityFix; //delete wnd; }