コード例 #1
0
  void setUp()
  {
    typedef itk::Image<double, 3> ImageType;
    typedef itk::VectorImage<double, 3> VectorImageType;
    typedef itk::ImageRegionIterator<ImageType> ImageIteratorType;
    typedef itk::ImageDuplicator<ImageType> DuplicatorType;
    typedef itk::ComposeImageFilter<ImageType> CompositeFilterType;

    // generate two images with one component
    ImageType::Pointer imageComponent1 = itk::Image<double, 3>::New();
    ImageType::IndexType start;
    start.Fill(0);
    ImageType::SizeType size;
    size.Fill(5);
    ImageType::RegionType region;
    region.SetSize(size);
    region.SetIndex(start);
    imageComponent1->SetRegions(region);
    imageComponent1->Allocate();

    DuplicatorType::Pointer duplicator = DuplicatorType::New();
    duplicator->SetInputImage(imageComponent1);
    duplicator->Update();
    ImageType::Pointer imageComponent2 = duplicator->GetOutput();

    // give them differing data
    ImageIteratorType iterator1(imageComponent1, imageComponent1->GetLargestPossibleRegion());
    iterator1.GoToBegin();
    int i = 0;
    while (!iterator1.IsAtEnd())
    {
      iterator1.Set((double)i);
      ++iterator1;
      ++i;
    }

    ImageIteratorType iterator2(imageComponent2, imageComponent2->GetLargestPossibleRegion());
    iterator2.GoToBegin();
    i = 2000;
    while (!iterator2.IsAtEnd())
    {
      iterator2.Set((double)i);
      ++iterator2;
      ++i;
    }

    // copy into single VectorImage
    CompositeFilterType::Pointer compositeFilter = CompositeFilterType::New();
    compositeFilter->SetInput(0, imageComponent1);
    compositeFilter->SetInput(1, imageComponent2);
    compositeFilter->Update();
    itk::VectorImage<double, 3>::Pointer multiComponentImage = compositeFilter->GetOutput();

    // cast images to mitk
    mitk::CastToMitkImage(multiComponentImage, m_mitkMultiComponentImage);
    mitk::CastToMitkImage(imageComponent1, m_mitkImageComponent1);
    mitk::CastToMitkImage(imageComponent2, m_mitkImageComponent2);
  }
コード例 #2
0
static itk::Image< ipMITKSegmentationTYPE, 2 >::Pointer CloneImage(itk::Image< ipMITKSegmentationTYPE, 2 >::Pointer pic)
{
  typedef itk::Image< ipMITKSegmentationTYPE, 2 > ItkImageType;

  typedef itk::ImageDuplicator< ItkImageType > DuplicatorType;
  DuplicatorType::Pointer duplicator = DuplicatorType::New();
  duplicator->SetInputImage(pic);
  duplicator->Update();

  return duplicator->GetOutput();
}
コード例 #3
0
void QmitkVirtualSurgery::MyCastPixelType(itk::Image<TPixel, VImageDimension> *itkImage, mitk::Image::Pointer *pointer)
{
	std::cout<< "Function MyCastPixelType :Begin."<<std::endl;

	/***************duplicate a image to clip 8 segment******************/
	typedef itk::Image<TPixel, VImageDimension> TImageType;
	typedef itk::ImageDuplicator< typename TImageType > DuplicatorType;
	DuplicatorType::Pointer duplicator = DuplicatorType::New();
	duplicator->SetInputImage( itkImage );
	duplicator->Update();
	typename TImageType::Pointer clonedImage = duplicator->GetOutput();
	mitk::Image::Pointer resultImage = mitk::ImportItkImage( clonedImage );
	mitk::DataTreeNode::Pointer newNode = mitk::DataTreeNode::New();
	newNode->SetData(resultImage);
	newNode->SetProperty("name", mitk::StringProperty::New("Liver Image"));
	newNode->SetProperty("opacity", mitk::FloatProperty::New(0.0));
	mitk::DataStorage::GetInstance()->Add( newNode );
	mitk::RenderingManager::GetInstance()->RequestUpdateAll();
	/*********************************************************************/

	//originalPixelType = (dynamic_cast<mitk::Image *>(selectedImage->Get()->GetData()))->GetPixelType();
	if (originalPixelType == typeid(float))
	{
		std::cout<< "Function MyCastPixelType : Origin image pixel type is float."<<std::endl;
		*pointer = mitk::ImportItkImage(itkImage);
	}
	else
	{
		typedef itk::Image<TPixel, VImageDimension> InputImageType;
		typedef float OutputImagePixelType;
		typedef itk::Image<OutputImagePixelType,  VImageDimension> OutputImageType;
		typedef itk::CastImageFilter<InputImageType, OutputImageType> CasterType;

		CasterType::Pointer caster = CasterType::New();
		caster->SetInput(itkImage);
		try
		{
			caster->Update();
		}
		catch( itk::ExceptionObject & excp )
		{
			std::cerr << "CastPixelType exception thrown." 
				<< std::endl;
			std::cerr << excp << std::endl;
			return ;
		}

		OutputImageType::Pointer output = caster->GetOutput();
		*pointer = mitk::ImportItkImage(output);
	}
	
	std::cout<< "Function MyCastPixelType End."<<std::endl;
	
}
コード例 #4
0
ファイル: amyCube.cpp プロジェクト: 151706061/MedicalTile
void amyCube::Copy(amyCube* cube)
{
	typedef itk::ImageDuplicator< tCube > DuplicatorType;
	DuplicatorType::Pointer duplicator = DuplicatorType::New();
	duplicator->SetInputImage(cube->obj);
	duplicator->Update();
	this->obj = duplicator->GetOutput();
	this->windowcenter=cube->windowcenter;
	this->windowwidth=cube->windowwidth;
	this->series=cube->series;
}
コード例 #5
0
void Initialisation::searchCenters(ImageType2D::Pointer im, vector<CVector3> &vecCenter, vector<double> &vecRadii, vector<double> &vecAccumulator, float startZ)
{
	DuplicatorType::Pointer duplicator = DuplicatorType::New();
	duplicator->SetInputImage(im);
	duplicator->Update();
	ImageType2D::Pointer clonedOutput = duplicator->GetOutput();
    
	MinMaxCalculatorType::Pointer minMaxCalculator = MinMaxCalculatorType::New();
    
	unsigned int numberOfCircles = 20;
	double **center_result_small = new double*[numberOfCircles], **center_result_large = new double*[numberOfCircles];
	for (unsigned int k=0; k<numberOfCircles; k++) {
		center_result_small[k] = new double[2];
		center_result_large[k] = new double[2];
	}
	double *radius_result_small = new double[numberOfCircles], *radius_result_large = new double[numberOfCircles];
	double *accumulator_result_small = new double[numberOfCircles], *accumulator_result_large = new double[numberOfCircles];
	unsigned int numSmall = houghTransformCircles(im,numberOfCircles,center_result_small,radius_result_small,accumulator_result_small,radius_/mean_resolution_,-1.0);
	unsigned int numLarge = houghTransformCircles(clonedOutput,numberOfCircles,center_result_large,radius_result_large,accumulator_result_large,radius_/mean_resolution_+6.0,1.0);
    
	// search along results for nested circles
	vector<unsigned int> listMostPromisingCenters;
	vector<unsigned int> listMostPromisingCentersLarge;
	double distance = 0.0;
	for (unsigned int i=0; i<numSmall; i++)
	{
		for (unsigned int j=0; j<numLarge; j++)
		{
			// distance between center + small_radius must be smaller than large_radius
			distance = sqrt(pow(center_result_small[i][0]-center_result_large[j][0],2)+pow(center_result_small[i][1]-center_result_large[j][1],2));
			if ((distance+radius_result_small[i])*0.8 <= radius_result_large[j]) {
				listMostPromisingCenters.push_back(i);
				listMostPromisingCentersLarge.push_back(j);
			}
		}
	}
	for (unsigned int i=0; i<listMostPromisingCenters.size(); i++)
	{
		vecCenter.push_back(CVector3(center_result_small[listMostPromisingCenters[i]][0],startZ,center_result_small[listMostPromisingCenters[i]][1]));
		vecRadii.push_back(radius_result_small[listMostPromisingCenters[i]]);
		vecAccumulator.push_back(accumulator_result_small[listMostPromisingCenters[i]]);
	}
}
コード例 #6
0
int main( int argc, char* argv[] )
{
  // Parse Command-Line Arguments
  mitkCommandLineParser parser;
  parser.setArgumentPrefix("--","-");

  parser.setTitle("Tumor Progression Mapping");
  parser.setCategory("Preprocessing Tools");
  parser.setContributor("MBI");
  parser.setDescription("Convert a set of co-registered and resampled follow-up images into a 2D png overview (and optionally in a 4D NRRD Volume).\nNaming convecntion of files is IDENTIFIER_YYYY-MM-DD_MODALITY.nrrd");

  parser.setArgumentPrefix("--","-");
  parser.addArgument("input", "i", mitkCommandLineParser::InputDirectory, "Input folder containing all follow ups");
  parser.addArgument("output", "o", mitkCommandLineParser::OutputFile,"Output file (PNG)");
  parser.addArgument("blanked", "b", mitkCommandLineParser::Bool, "Only Display Morphology");
  parser.addArgument("morphology", "m", mitkCommandLineParser::String, "Morphology postfix.", "_T2.nrrd");
  parser.addArgument("segmentation", "s", mitkCommandLineParser::String,  "Segmentation postfix. Default: _GTV.nrrd", "_GTV.nrrd");
  parser.addArgument("4dVolume", "v", mitkCommandLineParser::OutputFile, "Filepath to merged 4d NRRD Volume.");
  parser.addArgument("skip", "k", mitkCommandLineParser::Int, "Number of slices to be skipped from top and from button (Default 0)");
  parser.addArgument("interval", "n", mitkCommandLineParser::Int, "1 - for all slices, 2 - every second, 3 - every third ...");
  parser.addArgument("opacity", "c", mitkCommandLineParser::Float, "Opacity of overlay [0,1] invisible -> visible");

  map<string, us::Any> parsedArgs = parser.parseArguments(argc, argv);

  if ( parsedArgs.size()==0 )
    return EXIT_SUCCESS;

  // Show a help message
  if (parsedArgs.count("help") || parsedArgs.count("h"))
  {
    std::cout << parser.helpText();
    return EXIT_SUCCESS;
  }

  std::string outputFile;
  std::string inputFolder;

  if (parsedArgs.count("input") || parsedArgs.count("i") )
  {
    inputFolder =  us::any_cast<string> (parsedArgs["input"]) + "/";
  }

  if (parsedArgs.count("output") || parsedArgs.count("o") )
  {
    outputFile =  us::any_cast<string> (parsedArgs["output"]);
  }

  int skip = 0;
  int interval = 1;
  float opacity = .3;


  if (parsedArgs.count("skip") || parsedArgs.count("k") )
  {
    skip = us::any_cast<int>(parsedArgs["skip"]);
  }

  if (parsedArgs.count("interval") || parsedArgs.count("n") )
  {
    interval = us::any_cast<int>(parsedArgs["interval"]);
  }

  if (parsedArgs.count("opacity") || parsedArgs.count("c") )
  {
    opacity = us::any_cast<float>(parsedArgs["opacity"]);
  }

  FileList morphFiles;
  FileList segFiles;

  std::string refPattern;
  std::string segPattern;

  if (parsedArgs.count("morphology") || parsedArgs.count("m") )
  {
    refPattern = us::any_cast<std::string>(parsedArgs["morphology"]);
  }
  else
    return EXIT_FAILURE;

  if (parsedArgs.count("segmentation") || parsedArgs.count("s") )
  {

    segPattern = us::any_cast<std::string>(parsedArgs["segmentation"]);

  }

  bool blank = false;
  if (parsedArgs.count("blanked") || parsedArgs.count("b"))
  {
    blank = true;
  }
  /// END Parsing CL Options
  typedef itk::Image<RGBPixelType, 2> OutputImageType;
  typedef itk::Image<RGBPixelType, 3> InputImageType;

  mitkProgressionVisualization progressVis;

  morphFiles = CreateFileList(inputFolder,refPattern);
  segFiles = CreateFileList(inputFolder,segPattern);

  ImageList morphImages = LoadPreprocessedFiles(morphFiles);
  ImageList segImages;
  if (segFiles.size()> 0 && blank == false)
    segImages = LoadPreprocessedFiles(segFiles);


  mitk::Image::Pointer rgbImage;

  // define color for overlay image
  mitk::Color color;
  color.Fill(0);
  color[0]=200*opacity;
  color[1]=0;

  //  Set up itk time image filter to contain 0..N-1 images
  itk::TileImageFilter<OutputImageType, OutputImageType>::Pointer tileFilter = itk::TileImageFilter<OutputImageType, OutputImageType>::New();
  itk::FixedArray< unsigned int, 2 > layout;
  unsigned int noOfTimeSteps = morphImages.size();
  layout[0] = noOfTimeSteps;
  layout[1] = 0; // automatic number of neccessary rows
  tileFilter->SetLayout(layout);

  // Get Reference image (all images are expected to have exact same dimensions!)

  std::string fileName = morphFiles.at(0);
  mitk::Image* referenceImg = morphImages.at(0);

  mitk::Image::Pointer merged4D;
  std::string volumeFile;

  if (parsedArgs.count("4dVolume") || parsedArgs.count("v") )
  {
    volumeFile = us::any_cast<string>(parsedArgs["4dVolume"]);
    if (volumeFile != "")
    {
      unsigned int* dims = new unsigned int[4];
      dims[0] = referenceImg->GetDimensions()[0];
      dims[1] = referenceImg->GetDimensions()[1];
      dims[2] = referenceImg->GetDimensions()[2];
      dims[3] = morphImages.size();
      merged4D = mitk::Image::New();
      merged4D->Initialize( referenceImg->GetPixelType() ,4,dims);
      merged4D->GetTimeGeometry()->Expand(noOfTimeSteps);
    }
  }

  unsigned int* dim = referenceImg->GetDimensions();
  unsigned int sliceMaxAxial=dim[2];

  // Now iterate over all data sets, extract overlay and add it to reference image
  mitk::Image* morphImage;
  mitk::Image* segmentationImage = NULL;

  for (unsigned int i =0; i < noOfTimeSteps; i++)
  {
    MITK_INFO << "File : " << i << " of /" << noOfTimeSteps;
    int currentSliceNo = 0;

    // Retrieve images of current time step
    fileName = morphFiles.at(i);
    morphImage = morphImages.at(i);

    // Create 4D Volume image
    if ( volumeFile != "")
    {
      mitk::ImagePixelReadAccessor<InputPixelType,3> readAc(morphImage);

      merged4D->GetGeometry(i)->SetSpacing(referenceImg->GetGeometry()->GetSpacing());
      merged4D->GetGeometry(i)->SetOrigin(referenceImg->GetGeometry()->GetOrigin());
      merged4D->GetGeometry(i)->SetIndexToWorldTransform(referenceImg->GetGeometry()->GetIndexToWorldTransform());
      merged4D->SetVolume(readAc.GetData(),i);
    }

    MITK_INFO << "-- Convert to RGB";
    rgbImage = progressVis.ConvertToRGBImage(morphImage);

    // Add current seg in red
    color.Fill(0);
    color[0]=200*opacity;

    if (!blank  )
    {
      segmentationImage = segImages.at(i);
      if (segmentationImage != NULL)
      {
        MITK_INFO << "-- Add Overlay";
        progressVis.AddColouredOverlay(rgbImage,segmentationImage, color);
      }
    }
    // Add Segmentation of next time step in red
    if (i == 0)
    {
      MITK_INFO << "Skipping retro view in first time step";
    }
    else
    {
      color.Fill(0);
      // Add previous in green
      color[1]=200*opacity;
      if (!blank)
      {
        mitk::Image* nextSeg = segImages.at(i-1);
        MITK_INFO << "-- Add Overlay of next Step";
        progressVis.AddColouredOverlay(rgbImage, nextSeg , color);
      }
    }

    // Now save all slices from overlay in output folder
    MITK_INFO << "-- Extract Slices";
    for ( int slice = sliceMaxAxial - skip -1 ; slice > skip; slice -= interval)//sliceMaxAxial/40.0f))
    {
      InputImageType::Pointer itkImage = InputImageType::New();
      InputImageType::Pointer itkImage2;

      mitk::CastToItkImage(rgbImage, itkImage);
      typedef itk::ImageDuplicator< InputImageType > DuplicatorType;
      DuplicatorType::Pointer duplicator = DuplicatorType::New();
      duplicator->SetInputImage(itkImage);
      duplicator->Update();
      itkImage2 = duplicator->GetOutput();

      itk::Image<RGBPixelType,2>::Pointer extractedSlice = itk::Image<RGBPixelType,2>::New() ;
      extractedSlice->Graft( progressVis.ExtractSlice(itkImage2,slice));
      tileFilter->SetInput(((currentSliceNo+1)*(noOfTimeSteps)+i),extractedSlice );

      tileFilter->SetInput(i,progressVis.TextAsImage(GetDate(fileName)) );

      currentSliceNo++;
    }
  }

  MITK_INFO << "Tile Filter Update";
  tileFilter->Update();

  // Write the output image
  typedef itk::ImageFileWriter< OutputImageType > WriterType;
  WriterType::Pointer writer = WriterType::New();
  writer->SetInput( tileFilter->GetOutput() );
  std::string patientName;

  patientName =  GetName(fileName);

  if (blank)
    writer->SetFileName(outputFile);
  else
    writer->SetFileName(outputFile);

  writer->Update();
  if (volumeFile != "")
    mitk::IOUtil::Save(merged4D, volumeFile);

  return EXIT_SUCCESS;
}
コード例 #7
0
ファイル: TubeDetection.cpp プロジェクト: skillii/MedBildA
void TubeDetection::calcMaxMedialness()
{
    unsigned x,y,z;

    unsigned int x_size;
    unsigned int y_size;
    unsigned int z_size;

    float V[3][3];
    float d[3];

    FloatImageType::Pointer max_medialnessimage;

    FloatImageType::IndexType index;

    float scale_index[3];
    HessianFilter::OutputImageType::IndexType indexi;

    DuplicatorType::Pointer duplicator = DuplicatorType::New();
    duplicator->SetInputImage(medialnessImages.at(medialnessImages.size() - 1));
    duplicator->Update();

    max_medialnessimage = duplicator->GetOutput();

    allocateEigenvectorImage();


    std::cout << "Starting calculating maximum of medialness." << std::endl;

    x_size = medialnessImages.at(medialnessImages.size() - 1)->GetLargestPossibleRegion().GetSize(0);
    y_size = medialnessImages.at(medialnessImages.size() - 1)->GetLargestPossibleRegion().GetSize(1);
    z_size = medialnessImages.at(medialnessImages.size() - 1)->GetLargestPossibleRegion().GetSize(2);

    float current_medialness_value;
    float max_medialness_value;
    int  max_medialness_scale_level;

    for(x = 0; x < x_size; x++)
    {
      index[0] = x;

      if(x % 10 == 0)
        std::cout << "x " << x << std::endl;

      for(y = 0; y < y_size; y++)
      {
        index[1] = y;

        for(z = 0; z < z_size; z++)
        {
          index[2] = z;

          //This is the value we obtain from the largest resolution
          max_medialness_value = max_medialnessimage->GetPixel(index);
          max_medialness_scale_level = medialnessImages.size() - 1;

          //Now we go through all scale levels and look for a maximum..
          for(unsigned i = 0; i < this->medialnessImages.size() - 1; i++)
          {
            scale_index[0] = static_cast<float>(x) / powf(2,medialnessImages.size() - 1 - i);
            scale_index[1] = static_cast<float>(y) / powf(2,medialnessImages.size() - 1 - i);
            scale_index[2] = static_cast<float>(z) / powf(2,medialnessImages.size() - 1 - i);

            current_medialness_value = NumericsHelper::trilinearInterp(medialnessImages.at(i), scale_index[0], scale_index[1], scale_index[2]);

            if(current_medialness_value > max_medialness_value)
            {
              max_medialness_value = current_medialness_value;
              max_medialness_scale_level = i;
            }

          }

          //Now write the maximum into the image again
          max_medialnessimage->SetPixel(index, max_medialness_value);



          //now calculate and save the EV of that pixel:
          indexi[0] = static_cast<int>(roundf(static_cast<float>(x) / powf(2,medialnessImages.size() - 1 - max_medialness_scale_level)));
          indexi[1] = static_cast<int>(roundf(static_cast<float>(y) / powf(2,medialnessImages.size() - 1 - max_medialness_scale_level)));
          indexi[2] = static_cast<int>(roundf(static_cast<float>(z) / powf(2,medialnessImages.size() - 1 - max_medialness_scale_level)));


          eigenValueDecomposition(max_medialness_scale_level, indexi, V, d);


          eigenvector[0]->SetPixel(index, V[0][0]);
          eigenvector[1]->SetPixel(index, V[0][1]);
          eigenvector[2]->SetPixel(index, V[0][2]);

        }
      }
    }

   this->maxMedialnessOverScales = max_medialnessimage;
   std::cout << "Finished calculating maximum of medialness." << std::endl;
}
コード例 #8
0
ファイル: TubeDetection.cpp プロジェクト: skillii/MedBildA
void TubeDetection::calcMedialness()
{
    unsigned x,y,z;
    HessianFilter::OutputImageType::IndexType index;
    unsigned int x_size;
    unsigned int y_size;
    unsigned int z_size;



    float V[3][3];
    float d[3];

    FloatImageType::Pointer medialnessimage;

    std::cout << "Starting calculating medialness for level: ";


    for(unsigned i = 0; i < this->imagePyramid.size(); i++)
    {
        std::cout << i << " " << std::endl;

        //medialnessimage = imagePyramid.at(i)->Clone();
        DuplicatorType::Pointer duplicator = DuplicatorType::New();
        duplicator->SetInputImage(imagePyramid.at(i));
        duplicator->Update();

        medialnessimage = duplicator->GetOutput();

        x_size = imagePyramid.at(i)->GetLargestPossibleRegion().GetSize(0);
        y_size = imagePyramid.at(i)->GetLargestPossibleRegion().GetSize(1);
        z_size = imagePyramid.at(i)->GetLargestPossibleRegion().GetSize(2);

        for(x = 0; x < x_size ; x++)
        {
            index[0] = x;

            if(x % 10 == 0)
              std::cout << "x = " << x << std::endl;

            for(y = 0; y < y_size; y++)
            {
                index[1] = y;

                for(z = 0; z < z_size; z++)
                {
                    //Indices for gradient, forward differences
                    index[2] = z;


                    eigenValueDecomposition(i, index, V, d);

                    float medialness = calcMedialness(i, x, y, z, d, V);

                    medialnessimage->SetPixel(index, medialness);
                }
            }
        }

        medialnessImages.push_back(medialnessimage);
    }

    std::cout << std::endl << "Finished calculation of medialness" << std::endl;
}
コード例 #9
0
ファイル: TubeDetection.cpp プロジェクト: skillii/MedBildA
void TubeDetection::calcGradients()
{
    std::cout << "Starting gradient calculation" << std::endl;

    for(unsigned i = 0; i < this->imagePyramid.size(); i++)
    {
        std::cout << "Calculating gradient for pyramid level " << i << std::endl;

        GradientImageFilter::Pointer g_filter = GradientImageFilter::New();

        g_filter->SetInput(imagePyramid.at(i));

        g_filter->Update();


        GradientImageFilter::OutputImagePointer grad = GradientImageFilter::OutputImageType::New();

        grad = g_filter->GetOutput();


        //convert gradient to float image types

        FloatImageType::Pointer gradX, gradY, gradZ;

        //allocate new images for the gradients

        DuplicatorType::Pointer duplicator = DuplicatorType::New();
        duplicator->SetInputImage(imagePyramid.at(i));
        duplicator->Update();

        gradX = duplicator->GetOutput();

        duplicator = DuplicatorType::New();
        duplicator->SetInputImage(imagePyramid.at(i));
        duplicator->Update();

        gradY = duplicator->GetOutput();

        duplicator = DuplicatorType::New();
        duplicator->SetInputImage(imagePyramid.at(i));
        duplicator->Update();

        gradZ = duplicator->GetOutput();


        GradientImageFilter::OutputImageType::IndexType index;
        FloatImageType::IndexType index_f;

        unsigned int x_size;
        unsigned int y_size;
        unsigned int z_size;


        x_size = grad->GetLargestPossibleRegion().GetSize(0);
        y_size = grad->GetLargestPossibleRegion().GetSize(1);
        z_size = grad->GetLargestPossibleRegion().GetSize(2);

        unsigned int x,y,z;
        for(x = 0; x < x_size ; x++)
        {
            index[0] = x;
            index_f[0] = x;

            for(y = 0; y < y_size; y++)
            {
                index[1] = y;
                index_f[1] = y;

                for(z = 0; z < z_size; z++)
                {
                    index[2] = z;
                    index_f[2] = z;

                    GradientImageFilter::OutputImagePixelType gradient = grad->GetPixel(index);


                    gradX->SetPixel(index_f, gradient[0]);
                    gradY->SetPixel(index_f, gradient[1]);
                    gradZ->SetPixel(index_f, gradient[2]);

                }
            }
        }
        gradientX.push_back(gradX);
        gradientY.push_back(gradY);
        gradientZ.push_back(gradZ);
    }

    std::cout << "Finished gradient calculation" << std::endl;
}
コード例 #10
0
int mitkGenerateFeatureVectFromImageTest(int argc, char* argv[])
{
  typedef unsigned char PixelType;
  typedef itk::Image< PixelType,VDimension > ImageType;
  typedef ImageType::Pointer ImageTypePtr;
  typedef itk::ImageDuplicator< ImageType > DuplicatorType;

  typedef itk::ImageFileReader< ImageType> ReaderType;
  typedef itk::ImageFileWriter< ImageType> WriterType;

  if(argc < 3)
  {
    MITK_INFO << "Please specify number of features per Voxel per Input (MUST BE IMPLEMENTED) and at least one input image" << std::endl;
    MITK_INFO << " Usage: 1 d:\\pic3d.nrrd [d:\\pic3d.nrrd] .." << std::endl;
    return -1;
  }

  int k = atoi(argv[1]);

  std::vector<std::string> inputFile;

  for(int i = 2; i < argc; i++)
  {
    inputFile.push_back(argv[i]);
  }

  ReaderType::Pointer reader = ReaderType::New();
  WriterType::Pointer writer = WriterType::New();

  std::vector<ImageTypePtr> inputImages;
  DuplicatorType::Pointer duplicator = DuplicatorType::New();

  for(int i = 0; i< inputFile.size(); i++)
  {
    reader->SetFileName(inputFile.at(i));
    reader->Update();

    duplicator->SetInputImage(reader->GetOutput());
    duplicator->Update();
    inputImages.push_back(duplicator->GetOutput());
  }

  mitk::mitkClassificationFeatureVectCreator<PixelType,VDimension>::Pointer featureVectCreator = mitk::mitkClassificationFeatureVectCreator<PixelType,VDimension>::New();

  // SET NUMBER OF FeaturesProperly
  featureVectCreator->SetNumberOfFeaturesPerVoxel(k);

  for(int i = 0; i < inputImages.size(); i++)
  {
    featureVectCreator->SetNthInput(i,inputImages.at(i));
  }

  featureVectCreator->Update();

  std::cout << "Number of Inputs: " << featureVectCreator->GetNumberOfInputs() << std::endl;

  cv::Mat output = featureVectCreator->GetOuputCv();

  float f1 = output.at<float>(0,0);
  float f2 = output.at<float>(0,1);

  MITK_INFO << "OutputVector :" << std::endl << output << std::endl;

  cv::waitKey();

  //// Test with 2D Image for "visual" verification
  //if(VDimension == 2)
  //{
  //  ImageTypePtr image = inputImages.at(0);

  //  unsigned int width;
  //  unsigned int height;
  //  unsigned int x,y;

  //  width = image->GetLargestPossibleRegion().GetSize(0);
  //  height = 2* image->GetLargestPossibleRegion().GetSize(1);

  //  cv::Mat testImage(height,width, CV_8UC1);
  //  cv::Mat testImage2(height,width, CV_8UC1);
  //  cv::Mat testImage3(height,width, CV_8UC1);

  //  unsigned int index = 0;

  //  for( index = 0; index < width*height ; index++)
  //  {
  //    x = (int) index % (width);
  //    float d = index / (width);
  //    y = (int) floor(  d);

  //    unsigned char value;
  //    float val = output.at<float>(index,0);
  //    value = (unsigned char) val;
  //    testImage.at<unsigned char>(y,x)= value;

  //    if(featureVectCreator->GetIsGenerateResponseEnabled() == false)

  //    {
  //      value = (unsigned char) output.at<float>(index,1);
  //      testImage2.at<unsigned char>(y,x)= value;

  //      value = (unsigned char) output.at<float>(index,2);
  //      testImage3.at<unsigned char>(y,x)= value;
  //    }
  //  }

  //  cv::imshow("testImage1", testImage);
  //  cv::imshow("testImage2", testImage2);
  //  cv::imshow("testImage3", testImage3);
  //  cv::waitKey();

  //}

  //if(VDimension == 3)
  //{
  // }

  return 0;
}
コード例 #11
0
ファイル: itk_benchmark.cpp プロジェクト: ddantas/visiongl
int main( int argc, char* argv[] )
{
    if( argc != 3 )
    {
        std::cerr << "Usage: "<< std::endl;
        std::cerr << argv[0];
        std::cerr << " <InputFileName> n";
        std::cerr << std::endl;
        return EXIT_FAILURE;
    }



    int operations = atoi(argv[2]);
    //sscanf(&operations,"%d",argv[2]);
    //printf("%d\n", operations);

    itk::TimeProbe itkClock;
    double t0 = 0.0;
    double tf = 0.0;

    itk::MultiThreader::SetGlobalDefaultNumberOfThreads(1);

    // Loading file
    ReaderType::Pointer reader = ReaderType::New();
    reader->SetFileName( argv[1] );
    reader->Update();
    ImageType::Pointer image = reader->GetOutput();

#ifdef GPU
    GPUReaderType::Pointer gpureader = GPUReaderType::New();
    gpureader->SetFileName( argv[1] );
    gpureader->Update();
    GPUImageType::Pointer gpuImage = gpureader->GetOutput();
#endif


    saveFile((char*) "/tmp/itk_input.dcm", image);

    // Allocate output image
    ImageType::Pointer output = ImageType::New();
    ImageType::RegionType region = image->GetBufferedRegion();
    output->SetRegions( region );
    output->SetOrigin(  image->GetOrigin()  );
    output->SetSpacing( image->GetSpacing() );
    output->Allocate();


    // Negative
    typedef itk::UnaryFunctorImageFilter<ImageType,ImageType,
                  Negate<ImageType::PixelType,ImageType::PixelType> > NegateImageFilterType;
 
    NegateImageFilterType::Pointer negateFilter = NegateImageFilterType::New();

    negateFilter = NegateImageFilterType::New();
    negateFilter->SetInput(image);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
      negateFilter->Modified();
      negateFilter->Update();    
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d negative: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
#endif
    // Saving Not result
    saveFile((char*) "/tmp/itk_not.dcm", negateFilter->GetOutput());


#ifdef GPU
    // GPU Negative
    typedef itk::GPUUnaryFunctorImageFilter<ImageType,ImageType,
                  Negate<ImageType::PixelType,ImageType::PixelType> > GPUNegateImageFilterType;
 
    GPUNegateImageFilterType::Pointer gpuNegateFilter = GPUNegateImageFilterType::New();
    gpuNegateFilter->SetInput(gpureader->GetOutput());
    gpuNegateFilter->Update();    
    // Saving Not result
    //saveFile("/tmp/itk_gpu_not.dcm", gpuNegateFilter->GetOutput());
#endif

    // Common Threshold
    int lowerThreshold = 100;
    int upperThreshold = 200;


    // Threshold
    typedef itk::BinaryThresholdImageFilter <ImageType, ImageType>
       BinaryThresholdImageFilterType;
    
    BinaryThresholdImageFilterType::Pointer thresholdFilter
      = BinaryThresholdImageFilterType::New();

    thresholdFilter = BinaryThresholdImageFilterType::New();
    thresholdFilter->SetInput(reader->GetOutput());
    thresholdFilter->SetLowerThreshold(lowerThreshold);
    thresholdFilter->SetUpperThreshold(upperThreshold);
    thresholdFilter->SetInsideValue(255);
    thresholdFilter->SetOutsideValue(0);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
	thresholdFilter->Modified();
	thresholdFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d threshold: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Threshold result
    saveFile((char*) "/tmp/itk_thresh.dcm", thresholdFilter->GetOutput());
#endif


#ifdef GPU
    // GPU Threshold

    typedef itk::GPUBinaryThresholdImageFilter <GPUImageType, GPUImageType> 
			GPUBinaryThresholdImageFilterType;
    
    GPUBinaryThresholdImageFilterType::Pointer gpuThresholdFilter
      = GPUBinaryThresholdImageFilterType::New();

    gpuThresholdFilter->SetInput(gpureader->GetOutput());
    gpuThresholdFilter->SetLowerThreshold(lowerThreshold);
    gpuThresholdFilter->SetUpperThreshold(upperThreshold);
    gpuThresholdFilter->SetInsideValue(255);
    gpuThresholdFilter->SetOutsideValue(0);

    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
      gpuThresholdFilter->Modified();
      gpuThresholdFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d GPU threshold: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving GPU Threshold result
    gpuThresholdFilter->GetOutput()->UpdateBuffers();
    saveFile((char*) "/tmp/itk_gpu_thresh.dcm", gpuThresholdFilter->GetOutput());
#endif

    // Mean
    typedef itk::MeanImageFilter< ImageType, ImageType > MeanFilterType;
    MeanFilterType::Pointer meanFilter = MeanFilterType::New();

    meanFilter = MeanFilterType::New();
    meanFilter->SetInput( image );
    meanFilter->SetRadius( 1 );
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
       meanFilter->Modified();
       meanFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d mean blur: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Convolution result
    saveFile((char*) "/tmp/itk_mean3x3.dcm", meanFilter->GetOutput());
#endif

    // Binomial Blur (aproximation of gaussian blur)
    typedef itk::BinomialBlurImageFilter<ImageType, ImageType> BinomialBlurImageFilterType;
 
    int repetitions = 1;
    BinomialBlurImageFilterType::Pointer blurFilter = BinomialBlurImageFilterType::New();

    blurFilter = BinomialBlurImageFilterType::New();
    blurFilter->SetInput( reader->GetOutput() );
    blurFilter->SetRepetitions( repetitions );
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
      blurFilter->Modified();
      blurFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d blur: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Blur result
    saveFile((char*) "/tmp/itk_blur.dcm", blurFilter->GetOutput());
#endif


#ifdef GPU
    // GPU Blur
    typedef itk::BoxImageFilter< GPUImageType, GPUImageType > BoxImageFilterType;
    typedef itk::GPUBoxImageFilter< GPUImageType, GPUImageType, BoxImageFilterType > GPUBoxImageFilterType;

    GPUBoxImageFilterType::Pointer GPUBlurFilter = GPUBoxImageFilterType::New();

	//ImageType::SizeType indexRadius;
	//indexRadius[0] = 2;
	//indexRadius[1] = 2;
	//indexRadius[2] = 2;

    GPUBlurFilter->SetInput(gpureader->GetOutput());
	//GPUBlurFilter->SetRadius(indexRadius);
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
        GPUBlurFilter->Update();
        GPUBlurFilter->Modified();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d gpu blur: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    GPUBlurFilter->GetOutput()->UpdateBuffers();
    // Saving GPU Blur result
    saveFile((char*) "/tmp/itk_gpu_blur.dcm", GPUBlurFilter->GetOutput());

#endif


    //Erosion Common
    typedef itk::BinaryBallStructuringElement<
      ImageType::PixelType, 3>                  StructuringElementType;
    typedef itk::GrayscaleErodeImageFilter <ImageType, ImageType, StructuringElementType>
      GrayscaleErodeImageFilterType;
    unsigned int radius;

    // Erosion 3x3
    StructuringElementType structuringElement3x3;
    radius = 1;
    structuringElement3x3.SetRadius(radius);
    structuringElement3x3.CreateStructuringElement();

    GrayscaleErodeImageFilterType::Pointer erodeFilter3x3;

    erodeFilter3x3= GrayscaleErodeImageFilterType::New();
    erodeFilter3x3->SetInput(reader->GetOutput());
    erodeFilter3x3->SetKernel(structuringElement3x3);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
        erodeFilter3x3->Modified();
        erodeFilter3x3->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d erosion 3x3: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;

    // Saving Erosion result
    saveFile((char*) "/tmp/itk_erode3x3.dcm", erodeFilter3x3->GetOutput());
#endif

    // Erosion 5x5
    StructuringElementType structuringElement5x5;
    radius = 2;
    structuringElement5x5.SetRadius(radius);
    structuringElement5x5.CreateStructuringElement();

    GrayscaleErodeImageFilterType::Pointer erodeFilter5x5;

    erodeFilter5x5 = GrayscaleErodeImageFilterType::New();
    erodeFilter5x5->SetInput(reader->GetOutput());
    erodeFilter5x5->SetKernel(structuringElement5x5);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {
      erodeFilter5x5->Modified();
      erodeFilter5x5->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d erosion 5x5: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;

    // Saving Erosion result
    saveFile((char*) "/tmp/itk_erode5x5.dcm", erodeFilter5x5->GetOutput());
#endif

    // Copy
    typedef itk::ImageDuplicator< ImageType > DuplicatorType;
    DuplicatorType::Pointer duplicator;

    duplicator = DuplicatorType::New();
    duplicator->SetInputImage(image);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    { 
       duplicator->Modified();
       duplicator->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d copias cpu: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Copy result
    saveFile((char*) "/tmp/itk_copy.dcm", duplicator->GetOutput());
#endif

    // Convolution common
    typedef itk::ConvolutionImageFilter<ImageType> ConvolutionImageFilterType;

    ConvolutionImageFilterType::Pointer convolutionFilter;

    convolutionFilter = ConvolutionImageFilterType::New();
    convolutionFilter->SetInput(reader->GetOutput());
    int convWidth;

    // Convolution 3x3
    ImageType::Pointer kernel3x3 = ImageType::New();
    convWidth = 3;
    CreateKernel(kernel3x3, convWidth);

    convolutionFilter->SetKernelImage(kernel3x3);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {  
       convolutionFilter->Modified();
       convolutionFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d convolucoes 3x3 cpu: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Convolution result
    saveFile((char*) "/tmp/itk_convolution3x3.dcm", convolutionFilter->GetOutput());
#endif

    // Convolution 5x5
    ImageType::Pointer kernel5x5 = ImageType::New();
    convWidth = 5;
    CreateKernel(kernel5x5, convWidth);

    convolutionFilter->SetKernelImage(kernel5x5);
#ifndef GPU_only
    itkClock.Start();
    TimerStart();
    for(int n = 0; n < operations; n++)
    {  
       convolutionFilter->Modified();
       convolutionFilter->Update();
    }
    itkClock.Stop();
    printf("Tempo gasto para fazer %d convolucoes 5x5 cpu: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
    // Saving Convolution result
    saveFile((char*) "/tmp/itk_convolution5x5.dcm", convolutionFilter->GetOutput());
#endif

#ifdef GPU

    // GPU Mean
    typedef itk::GPUMeanImageFilter<GPUImageType, GPUImageType> GPUMeanFilterType;
	GPUMeanFilterType::Pointer GPUMean = GPUMeanFilterType::New();
	GPUMean->SetInput(gpureader->GetOutput());
	GPUMean->SetRadius( 1 );
	TimerStart();
	for(int n = 0; n < operations; n++)
	{
	   GPUMean->Update();
	   GPUMean->Modified();
	}
    itkClock.Stop();
    printf("Tempo gasto para fazer %d GPU mean blur: %s\n",operations, getTimeElapsedInSeconds());
    tf = itkClock.GetTotal();
    std::cout << "My: "    << (tf - t0) << std::endl;
    t0 = tf;
	GPUMean->GetOutput()->UpdateBuffers();
    saveFile((char*) "/tmp/itk_gpu_blurmean.dcm", GPUMean->GetOutput());
#endif

    // Visualize
    /*
    QuickView viewer;
    viewer.AddImage<ImageType>(
      image,true,
      itksys::SystemTools::GetFilenameName(argv[1]));  
    std::stringstream desc;
    desc << "ITK QuickView: " 
         << argv[1];
    viewer.Visualize();
    */

    // Saving input image as is
    saveFile((char*) "/tmp/itk_input.dcm", image);


    return EXIT_SUCCESS;
}
コード例 #12
0
void QmitkVirtualSurgery::ErodeVessel(itk::Image<TPixel, VImageDimension> *itkImage,mitk::Geometry3D* imageGeometry)
{
	std::cout << "Function ErodeVessel begin..." <<std::endl;
	typedef itk::Image<TPixel, VImageDimension> TImageType;
	typedef TImageType::IndexType  IndexType;
	typedef TImageType::SizeType   SizeType;

	typedef itk::ImageDuplicator< typename TImageType > DuplicatorType;
	DuplicatorType::Pointer duplicator = DuplicatorType::New();
	duplicator->SetInputImage( itkImage );
	duplicator->Update();
	typename TImageType::Pointer clonedImage = duplicator->GetOutput();
	mitk::Image::Pointer resultImage = mitk::ImportItkImage( clonedImage );
	mitk::DataTreeNode::Pointer newNode = mitk::DataTreeNode::New();
	newNode->SetData(resultImage);
	newNode->SetProperty("name", mitk::StringProperty::New("Erode Vessel Image"));
	newNode->SetProperty("opacity", mitk::FloatProperty::New(0.0));
	mitk::DataStorage::GetInstance()->Add( newNode );
	mitk::RenderingManager::GetInstance()->RequestUpdateAll();

	//typedef itk::ImageRegionIterator<TImageType> RegionIteratorType;
	//typedef itk::ImageRegionIterator<TImageType> ImageIteratorType;
	//typedef itk::BinaryBallStructuringElement<float,VImageDimension> StructuringElementType;
	//typedef itk::BinaryDilateImageFilter<TImageType,
	//	TImageType, 
	//	StructuringElementType > DilateFilterType;
	//typedef itk::BinaryErodeImageFilter<TImageType, 
	//	TImageType, 
	//	StructuringElementType > ErodeFilterType;
	

	/*
	StructuringElementType structuringElement;
	structuringElement.SetRadius(1);
	structuringElement.CreateStructuringElement();

	ErodeFilterType::Pointer binaryErode = ErodeFilterType::New();
	DilateFilterType::Pointer binaryDilate = DilateFilterType::New();
	binaryErode->SetKernel( structuringElement );
	binaryDilate->SetKernel( structuringElement );
	binaryErode->SetErodeValue( 1 );
	binaryErode->SetBackgroundValue(0);
	binaryDilate->SetDilateValue( 1 );
	binaryDilate->SetBackgroundValue(0);

	//闭操作:先膨胀再腐蚀
	binaryDilate->SetInput(itkImage);
	binaryErode->SetInput(binaryDilate->GetOutput());
	binaryErode->Update();
	//Update,完成闭操作

	//只腐蚀
	binaryErode->SetInput(itkImage);
	binaryDilate->SetInput(binaryErode->GetOutput());
	binaryDilate->Update();

	TImageType::Pointer outputImage = binaryDilate->GetOutput();
	*pointer = mitk::ImportItkImage(outputImage);
	mitk::Image::Pointer resultImage = mitk::ImportItkImage(outputImage);

	mitk::DataTreeNode::Pointer node = mitk::DataTreeNode::New();
	node->SetData(resultImage);
	node->SetProperty("name", mitk::StringProperty::New("Result Vessel"));
	node->SetProperty("opacity", mitk::FloatProperty::New(1.0));
	node->SetProperty("Surface", mitk::BoolProperty::New(true));
	mitk::DataStorage::GetInstance()->Add( node );
	*/

	//add 2012-10-15
	/*
	typedef itk::Image<TPixel, 2>  SliceImageType;
	typedef SliceImageType::SizeType   SliceSizeType;
	typedef itk::ImageRegionIterator<SliceImageType> SliceIteratorType;
	SizeType ImageSize = itkImage->GetLargestPossibleRegion().GetSize();
	SliceSizeType  SliceSize;
	SliceSize[0] = ImageSize[0];
	SliceSize[1] = ImageSize[1];

	SliceImageType::Pointer slicer = SliceImageType::New();
	slicer->SetRegions(SliceSize);
	slicer->Allocate();
	SliceImageType::RegionType sliceRegion = slicer->GetLargestPossibleRegion();
	
	TImageType::SizeType   RequestSize;
	TImageType::IndexType  RequestIndex;
	TImageType::RegionType RequestRegion;

	RequestSize[0] = ImageSize[0];
	RequestSize[1] = ImageSize[1];
	RequestSize[2] = 1;

	RequestIndex[0] = 0;
	RequestIndex[1] = 0;

	RequestRegion.SetSize(RequestSize);

	typedef itk::BinaryBallStructuringElement<float,2> StructuringElementType2;
	typedef itk::BinaryDilateImageFilter<SliceImageType,SliceImageType, StructuringElementType2 > DilateFilterType2;
	typedef itk::BinaryErodeImageFilter<SliceImageType, SliceImageType, StructuringElementType2 > ErodeFilterType2;
	StructuringElementType2 structuringElement2;
	structuringElement2.SetRadius(2);
	structuringElement2.CreateStructuringElement();
	
	int slices = ImageSize[2];
	for (int i=0; i<slices; i++)
	{
		RequestIndex[2] = i;
		RequestRegion.SetIndex(RequestIndex);
		
		RegionIteratorType it(itkImage, RequestRegion);
		SliceIteratorType  sliceIt(slicer, sliceRegion);

		for(it.GoToBegin(), sliceIt.GoToBegin(); !it.IsAtEnd(); ++it, ++sliceIt)
		{
			sliceIt.Set(it.Get() );
		}

		ErodeFilterType2::Pointer binaryErode1 = ErodeFilterType2::New();
		DilateFilterType2::Pointer binaryDilate1 = DilateFilterType2::New();
		binaryErode1->SetKernel( structuringElement2 );
		binaryDilate1->SetKernel( structuringElement2 );
		binaryErode1->SetErodeValue( 1 );
		binaryDilate1->SetDilateValue( 1 );

		ErodeFilterType2::Pointer binaryErode2 = ErodeFilterType2::New();
		DilateFilterType2::Pointer binaryDilate2 = DilateFilterType2::New();
		binaryErode2->SetKernel( structuringElement2 );
		binaryDilate2->SetKernel( structuringElement2 );
		binaryErode2->SetErodeValue( 1 );
		binaryDilate2->SetDilateValue( 1 );

		//1.开操作:先腐蚀后膨胀
		binaryErode1->SetInput(slicer);
		binaryDilate1->SetInput(binaryErode1->GetOutput());
		binaryDilate1->Update();   //先Update,完成第一步开操作
		//2.闭操作:先膨胀再腐蚀
		//binaryDilate2->SetInput( binaryDilate1->GetOutput() );
		//binaryErode2->SetInput(binaryDilate2->GetOutput());
		//binaryErode2->Update();    //Update,完成闭操作

		SliceImageType::Pointer mask = binaryDilate1->GetOutput();

		SliceIteratorType it3(mask, sliceRegion);
		for(it.GoToBegin(), it3.GoToBegin(); !it.IsAtEnd(); ++it, ++it3)
		{
			it.Set(it3.Get());
		}
		
		
	}
	*/
	
	IndexType seedIndex;
	for ( mitk::PointSet::PointsConstIterator pointsIterator = m_PointSet->GetPointSet()->GetPoints()->Begin(); // really nice syntax to get an interator for all points
		 pointsIterator != m_PointSet->GetPointSet()->GetPoints()->End();
		 ++pointsIterator ) 
	{
		// first test if this point is inside the image at all
		if ( !imageGeometry->IsInside( pointsIterator.Value()) ) 
			continue;

		// convert world coordinates to image indices
		imageGeometry->WorldToIndex( pointsIterator.Value(), seedIndex);
		for (int i=-1;i<2;i++)
		{
			for (int j=-1;j<2;j++)
			{
				for (int k=-1;k<2;k++)
				{
					IndexType index;
					index[0]=seedIndex[0]+i;
					index[1]=seedIndex[1]+j;
					index[2]=seedIndex[2]+k;
					clonedImage->SetPixel(index,0);
				}
				
			}
		}
	}	

	//*pointer = mitk::ImportItkImage(processImage);
	//mitk::Image::Pointer resultImage = mitk::ImportItkImage(processImage);
	//mitk::DataTreeNode::Pointer newNode = mitk::DataTreeNode::New();
	//newNode->SetData(resultImage);
	//newNode->SetProperty("name", mitk::StringProperty::New("erode vessel image"));
	//newNode->SetProperty("opacity", mitk::FloatProperty::New(0.5));
	//mitk::DataStorage::GetInstance()->Add( newNode );
	//mitk::RenderingManager::GetInstance()->RequestUpdateAll();
	std::cout << "Function ErodeVessel end..." <<std::endl;
}
コード例 #13
0
bool Initialisation::computeInitialParameters(float startFactor)
{  
	ImageType::SizeType desiredSize = inputImage_->GetLargestPossibleRegion().GetSize();
	
    // The spinal cord detection is performed on a bunch of axial slices. The choice of which slices will be analyzed depends on the startFactor. Default is the middle axial slice. The parameter startFactor must be the number of the slice, or a number between 0 and 1 representing the pourcentage of the image.
    // For exemple, startFactor=0.5 means the detection will start in the middle axial slice.
    float startZ;
    if (startFactor != -1.0) startSlice_ = startFactor;
    if (startSlice_ == -1.0) {
        startZ = desiredSize[1]/2;
        startSlice_ = startZ;
    }
    else if (startSlice_ < 1.0) {
        startZ = desiredSize[1]*startSlice_;
        startSlice_ = startZ;
    }
	else startZ = startSlice_;

	// Adapt radius to the image spacing to provide a radius in pixels - use average spacing of axial slice
    ImageType::SpacingType spacing = inputImage_->GetSpacing();
    mean_resolution_ = (spacing[0]+spacing[2])/2;
    
    
    // Adapt the gap between detection axial slices to the spacing
	if (round(spacing[1]) != 0 && (int)gap_ % (int)round(spacing[1]) != 0)
	{
		gap_ = spacing[1];
	}
    
    // Adapt the number of axial slices used for the detection to the spacing and the image dimensions.
	if (startZ-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) < 0 || startZ+((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) >= desiredSize[1])
	{
		numberOfSlices_ = numberOfSlices_-2;
		//gap_ = 1;
		if (verbose_) {
			cout << "WARNING: number of slices and gap between slices are not adapted to the image dimensions for the initilization. Default parameters will be used." << endl;
			cout << "New parameters:" << endl << "Gap inter slices = " << gap_ << endl << "Number of slices = " << numberOfSlices_ << endl;
		}
	}
    
    // Initalisation of the paremeters for the spinal cord detection
	ImageType::IndexType desiredStart;
	desiredStart[0] = 0;
	desiredStart[1] = startZ;
	desiredStart[2] = 0;
	desiredSize[1] = 0;

    // First extraction of the axial slice to check if the image contains information (not null)
	ImageType::RegionType desiredRegionImage(desiredStart, desiredSize);
    typedef itk::ExtractImageFilter< ImageType, ImageType2D > Crop2DFilterType;
    Crop2DFilterType::Pointer cropFilter = Crop2DFilterType::New();
    cropFilter->SetExtractionRegion(desiredRegionImage);
    cropFilter->SetInput(inputImage_);
	#if ITK_VERSION_MAJOR >= 4
    cropFilter->SetDirectionCollapseToIdentity(); // This is required.
	#endif
    try {
        cropFilter->Update();
    } catch( itk::ExceptionObject & e ) {
        std::cerr << "Exception caught while updating cropFilter " << std::endl;
        std::cerr << e << std::endl;
    }
    ImageType2D::Pointer image_test_minmax = cropFilter->GetOutput();
	MinMaxCalculatorType::Pointer minMaxCalculator = MinMaxCalculatorType::New();
	minMaxCalculator->SetImage(image_test_minmax);
	minMaxCalculator->ComputeMaximum();
	minMaxCalculator->ComputeMinimum();
	ImageType2D::PixelType maxIm = minMaxCalculator->GetMaximum(), minIm = minMaxCalculator->GetMinimum();
	if (maxIm == minIm) {
		cerr << "WARNING: The principal axial slice where the spinal cord detection will be performed (slice " << startZ << ") is full of constant value (" << maxIm << "). You can change it using -init parameter." << endl;
	}
    
    // Starting the spinal cord detection process
    if (verbose_) cout << "Initialization" << endl;
    
    // Creation of a matrix of potential spinal cord centers
	vector<vector <vector <Node*> > > centers;
    
    // Start of the detection of circles and ellipses. For each axial slices, a Hough transform is performed to detect circles. Each axial image is stretched in the antero-posterior direction in order to detect the spinal cord as a ellipse as well as a circle.
	for (int i=round(-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i<=round(((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i+=round(gap_/spacing[1]))
	{
        // Cropping of the image
		if (verbose_) cout << "Slice num " << i << endl;
		desiredStart[1] = startZ+i;
		ImageType::RegionType desiredRegion(desiredStart, desiredSize);
		FilterType::Pointer filter = FilterType::New();
		filter->SetExtractionRegion(desiredRegion);
		filter->SetInput(inputImage_);
        #if ITK_VERSION_MAJOR >= 4
		filter->SetDirectionCollapseToIdentity(); // This is required.
        #endif
		try {
			filter->Update();
		} catch( itk::ExceptionObject & e ) {
			std::cerr << "Exception caught while updating cropFilter " << std::endl;
			std::cerr << e << std::endl;
			cout << inputImage_->GetLargestPossibleRegion().GetSize() << endl;
			cout << desiredRegion << endl;
		}
        
        // The image is duplicated to allow multiple processing on the image.
		ImageType2D::Pointer im = filter->GetOutput();
		DuplicatorType::Pointer duplicator = DuplicatorType::New();
		duplicator->SetInputImage(im);
		duplicator->Update();
		ImageType2D::Pointer clonedImage = duplicator->GetOutput();
		ImageType::DirectionType imageDirection = inputImage_->GetDirection();
		ImageType2D::DirectionType clonedImageDirection;
		clonedImageDirection[0][0] = imageDirection[0][0];
		clonedImageDirection[0][1] = imageDirection[0][2];
		clonedImageDirection[1][0] = imageDirection[1][0];
		clonedImageDirection[1][1] = imageDirection[1][2];
		clonedImage->SetDirection(clonedImageDirection);
        
		// Initialization of resulting spinal cord center list.
		vector<vector <Node*> > vecNode;
        
        // Initialization of stretching parameters
        // A stretchingFactor equals to 1 doesn't change the image
		double stretchingFactor = 1.0, step = 0.25;
		while (stretchingFactor <= 2.0)
		{
			if (verbose_) cout << "Stretching factor " << stretchingFactor << endl;
            // Stretching the image in the antero-posterior direction. This direction is chosen because potential elliptical spinal cord will be transformed to circles and wil be detected by the Hough transform. The resulting circles will then be stretch in the other direction.
			if (stretchingFactor != 1.0)
			{
				ImageType2D::SizeType inputSize = clonedImage->GetLargestPossibleRegion().GetSize(), outputSize;
				outputSize[0] = inputSize[0]*stretchingFactor;
				outputSize[1] = inputSize[1];
				ImageType2D::SpacingType outputSpacing;
				outputSpacing[0] = static_cast<double>(clonedImage->GetSpacing()[0] * inputSize[0] / outputSize[0]);
				outputSpacing[1] = static_cast<double>(clonedImage->GetSpacing()[1] * inputSize[1] / outputSize[1]);
                
				ResampleImageFilterType::Pointer resample = ResampleImageFilterType::New();
				resample->SetInput(clonedImage);
				resample->SetSize(outputSize);
				resample->SetOutputDirection(clonedImage->GetDirection());
				resample->SetOutputOrigin(clonedImage->GetOrigin());
				resample->SetOutputSpacing(outputSpacing);
				resample->SetTransform(TransformType::New());
				resample->Update();
                
				im = resample->GetOutput();
			}
            
            // Searching the circles in the image using circular Hough transform, adapted from ITK
            // The list of radii and accumulator values are then extracted for analyses
			vector<CVector3> vecCenter;
			vector<double> vecRadii, vecAccumulator;
			searchCenters(im,vecCenter,vecRadii,vecAccumulator,startZ+i);
			
            // Reformating of the detected circles in the image. Each detected circle is push in a Node with all its information.
            // The radii are transformed in mm using mean axial resolution
			vector<Node*> vecNodeTemp;
			for (unsigned int k=0; k<vecCenter.size(); k++) {
				if (vecRadii[k] != 0.0) {
					CVector3 center = vecCenter[k]; center[0] /= stretchingFactor;
					vecNodeTemp.push_back(new Node(center,mean_resolution_*vecRadii[k]/stretchingFactor,vecAccumulator[k],vecCenter[k],mean_resolution_*vecRadii[k],stretchingFactor));
				}
			}
			vecNode.push_back(vecNodeTemp);
			
            // Preparing next iteration of the spinal cord detection
			stretchingFactor += step;
		}
        // Saving the detected centers
		centers.push_back(vecNode);
	}
    
	// All centers are ordoned by slice
	// First step -> delete points without neighbour
	double limitDistance = sqrt(2.0*gap_*gap_); // in mm
	list<Node*> listPoints;
	for (unsigned int k=0; k<numberOfSlices_; k++)
	{
		// For every point in a slice, we search on next and previous slice for neighbors
        // Potential neighbours are circles that have a similar radius (less than 20% of difference)
		for (unsigned int i=0; i<centers[k].size(); i++)
		{
			for (unsigned int m=0; m<centers[k][i].size(); m++)
			{
				bool hasNeighbor = false;
				double radius = centers[k][i][m]->getRadius();
				if (k != 0) // search down
				{
                    // All the point are sorted by the distance
					map<double,Node*> listNeighbors;
					for (unsigned int j=0; j<centers[k-1][i].size(); j++)
					{
                        // Compute the distance between two adjacent centers (in mm)
                        // If this distance is less or equal to the limit distance, the two centers are attached to each others
						double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k-1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k-1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k-1][i][j]->getPosition()[2],2));
						if (currentDistance <= limitDistance)
							listNeighbors[currentDistance] = centers[k-1][i][j];
					}
					while (!listNeighbors.empty())
					{
						double radiusCurrent = listNeighbors.begin()->second->getRadius();
						if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2)
						{
							hasNeighbor = true;
							centers[k][i][m]->addPrevious(listNeighbors.begin()->second);
							break;
						}
						listNeighbors.erase(listNeighbors.begin());
					}
				}
				if (k != numberOfSlices_-1) // search up
				{
					map<double,Node*> listNeighbors;
					for (unsigned int j=0; j<centers[k+1][i].size(); j++)
					{
						double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k+1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k+1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k+1][i][j]->getPosition()[2],2));
						if (currentDistance <= limitDistance)
							listNeighbors[currentDistance] = centers[k+1][i][j];
					}
					while (!listNeighbors.empty())
					{
						double radiusCurrent = listNeighbors.begin()->second->getRadius();
						if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2)
						{
							hasNeighbor = true;
							centers[k][i][m]->addNext(listNeighbors.begin()->second);
							break;
						}
						listNeighbors.erase(listNeighbors.begin());
					}
				}
				if (hasNeighbor) // if point has at least one neighbor, we keep it
					listPoints.push_back(centers[k][i][m]);
			}
		}
	}
    
	// Second step -> assembling points
	vector<vector <Node*> > chains;
	while (listPoints.size() != 0)
	{
		vector<Node*> temp;
		Node* current = listPoints.front();
		temp.push_back(current);
		listPoints.pop_front();
		while(current->hasNext())
		{
			current = current->getNext();
			temp.push_back(current);
		}
		chains.push_back(temp);
	}
	// And search for the longest and with larger accumulation value and small angle between normals 
	unsigned int maxLenght = 0, max = 0;
	double maxAccumulator = 0.0, angleMax = 15.0;
	for (unsigned int j=0; j<chains.size(); j++)
	{
		unsigned int length = chains[j].size();
        double angle = 0.0;
        if (length >= 3)
        {
            CVector3 vector1 = chains[j][0]->getPosition()-chains[j][length/2]->getPosition(), vector2 = (chains[j][length/2]->getPosition()-chains[j][length-1]->getPosition());
            angle = 360.0*acos((vector1*vector2)/(vector1.Norm()*vector2.Norm()))/(2.0*M_PI);
        }
		if (length > maxLenght && angle <= angleMax)
		{
			maxLenght = chains[j].size();
			max = j;
			maxAccumulator = 0.0;
			for (unsigned int k=0; k<length; k++)
				maxAccumulator += chains[j][k]->getAccumulator();
		}
		else if (length == maxLenght && angle <= angleMax)
		{
			double accumulator = 0.0;
			for (unsigned int k=0; k<length; k++)
				accumulator += chains[j][k]->getAccumulator();
			if (accumulator > maxAccumulator) {
				maxLenght = chains[j].size();
				max = j;
				maxAccumulator = accumulator;
			}
		}
	}
    
	if (chains.size() > 1)
	{
		unsigned int sizeMaxChain = chains[max].size();
		//cout << "Results : " << endl;
        points_.clear();
		for (unsigned int j=0; j<sizeMaxChain; j++) {
            points_.push_back(chains[max][j]->getPosition());
			//cout << chains[max][j]->getPosition() << " " << chains[max][j]->getRadius() << endl;
        }
        if (verbose_) cout << "Stretching factor of circle found = " << chains[max][0]->getStretchingFactor() << endl;
		if (sizeMaxChain < numberOfSlices_) {
			if (verbose_) cout << "Warning: Number of center found on slices (" << sizeMaxChain << ") doesn't correspond to number of analyzed slices. An error may occur. To improve results, you can increase the number of analyzed slices (option -n must be impair)" << endl;
            
			// we have to transform pixel points to physical points
			CVector3 finalPoint, initPointT = chains[max][0]->getPosition(), finalPointT = chains[max][sizeMaxChain-1]->getPosition();
			ContinuousIndex initPointIndex, finalPointIndex;
			initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2];
			finalPointIndex[0] = finalPointT[0]; finalPointIndex[1] = finalPointT[1]; finalPointIndex[2] = finalPointT[2];
			PointType initPoint, finPoint;
			inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint);
			inputImage_->TransformContinuousIndexToPhysicalPoint(finalPointIndex,finPoint);
			initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]);
			finalPoint = CVector3(finPoint[0],finPoint[1],finPoint[2]);
			initialNormal1_ = (finalPoint-initialPoint_).Normalize();
			initialRadius_ = 0.0;
			for (unsigned int j=0; j<sizeMaxChain; j++)
				initialRadius_ += chains[max][j]->getRadiusStretch();
			initialRadius_ /= sizeMaxChain;
            stretchingFactor_ = chains[max][0]->getStretchingFactor();
		}
		else
		{
			// we have to transform pixel points to physical points
			CVector3 finalPoint1, finalPoint2, initPointT = chains[max][(int)(sizeMaxChain/2)]->getPosition(), finalPointT1 = chains[max][0]->getPosition(), finalPointT2 = chains[max][sizeMaxChain-1]->getPosition();
			ContinuousIndex initPointIndex, finalPoint1Index, finalPoint2Index;
			initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2];
			finalPoint1Index[0] = finalPointT1[0]; finalPoint1Index[1] = finalPointT1[1]; finalPoint1Index[2] = finalPointT1[2];
			finalPoint2Index[0] = finalPointT2[0]; finalPoint2Index[1] = finalPointT2[1]; finalPoint2Index[2] = finalPointT2[2];
			PointType initPoint, finPoint1, finPoint2;
			inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint);
			inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint1Index,finPoint1);
			inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint2Index,finPoint2);
			initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]);
			finalPoint1 = CVector3(finPoint1[0],finPoint1[1],finPoint1[2]);
			finalPoint2 = CVector3(finPoint2[0],finPoint2[1],finPoint2[2]);
			initialNormal1_ = (finalPoint1-initialPoint_).Normalize();
			initialNormal2_ = (finalPoint2-initialPoint_).Normalize();
			initialRadius_ = 0.0;
			for (unsigned int j=0; j<sizeMaxChain; j++)
				initialRadius_ += chains[max][j]->getRadiusStretch();
			initialRadius_ /= sizeMaxChain;
            stretchingFactor_ = chains[max][0]->getStretchingFactor();
		}
		return true;
	}
	else {
		cout << "Error: No point detected..." << endl;
		return false;
	}
}