Preprocess::ImageType3D::Pointer Preprocess::SliceTo3D(ImageType2D::Pointer img) { int size1 = img->GetLargestPossibleRegion().GetSize()[0]; int size2 = img->GetLargestPossibleRegion().GetSize()[1]; ImageType3D::Pointer nImg = ImageType3D::New(); ImageType3D::PointType origin; origin[0] = 0; origin[1] = 0; origin[2] = 0; nImg->SetOrigin( origin ); ImageType3D::IndexType index = {{ 0,0,0 }}; ImageType3D::SizeType size = {{ size1, size2, 1 }}; ImageType3D::RegionType regions; regions.SetSize( size ); regions.SetIndex( index ); nImg->SetRegions(regions); nImg->Allocate(); typedef itk::ImageRegionIterator< ImageType2D > IteratorType2D; typedef itk::ImageRegionIterator< ImageType3D > IteratorType3D; IteratorType2D iteratorIn( img, img->GetRequestedRegion() ); IteratorType3D iteratorOut( nImg, nImg->GetRequestedRegion() ); for( iteratorIn.GoToBegin(), iteratorOut.GoToBegin(); !iteratorIn.IsAtEnd(), !iteratorOut.IsAtEnd(); ++iteratorIn, ++iteratorOut ) { iteratorOut.Set( iteratorIn.Get() ); } return nImg; }
bool Initialisation::computeInitialParameters(float startFactor) { ImageType::SizeType desiredSize = inputImage_->GetLargestPossibleRegion().GetSize(); // The spinal cord detection is performed on a bunch of axial slices. The choice of which slices will be analyzed depends on the startFactor. Default is the middle axial slice. The parameter startFactor must be the number of the slice, or a number between 0 and 1 representing the pourcentage of the image. // For exemple, startFactor=0.5 means the detection will start in the middle axial slice. float startZ; if (startFactor != -1.0) startSlice_ = startFactor; if (startSlice_ == -1.0) { startZ = desiredSize[1]/2; startSlice_ = startZ; } else if (startSlice_ < 1.0) { startZ = desiredSize[1]*startSlice_; startSlice_ = startZ; } else startZ = startSlice_; // Adapt radius to the image spacing to provide a radius in pixels - use average spacing of axial slice ImageType::SpacingType spacing = inputImage_->GetSpacing(); mean_resolution_ = (spacing[0]+spacing[2])/2; // Adapt the gap between detection axial slices to the spacing if (round(spacing[1]) != 0 && (int)gap_ % (int)round(spacing[1]) != 0) { gap_ = spacing[1]; } // Adapt the number of axial slices used for the detection to the spacing and the image dimensions. if (startZ-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) < 0 || startZ+((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) >= desiredSize[1]) { numberOfSlices_ = numberOfSlices_-2; //gap_ = 1; if (verbose_) { cout << "WARNING: number of slices and gap between slices are not adapted to the image dimensions for the initilization. Default parameters will be used." << endl; cout << "New parameters:" << endl << "Gap inter slices = " << gap_ << endl << "Number of slices = " << numberOfSlices_ << endl; } } // Initalisation of the paremeters for the spinal cord detection ImageType::IndexType desiredStart; desiredStart[0] = 0; desiredStart[1] = startZ; desiredStart[2] = 0; desiredSize[1] = 0; // First extraction of the axial slice to check if the image contains information (not null) ImageType::RegionType desiredRegionImage(desiredStart, desiredSize); typedef itk::ExtractImageFilter< ImageType, ImageType2D > Crop2DFilterType; Crop2DFilterType::Pointer cropFilter = Crop2DFilterType::New(); cropFilter->SetExtractionRegion(desiredRegionImage); cropFilter->SetInput(inputImage_); #if ITK_VERSION_MAJOR >= 4 cropFilter->SetDirectionCollapseToIdentity(); // This is required. #endif try { cropFilter->Update(); } catch( itk::ExceptionObject & e ) { std::cerr << "Exception caught while updating cropFilter " << std::endl; std::cerr << e << std::endl; } ImageType2D::Pointer image_test_minmax = cropFilter->GetOutput(); MinMaxCalculatorType::Pointer minMaxCalculator = MinMaxCalculatorType::New(); minMaxCalculator->SetImage(image_test_minmax); minMaxCalculator->ComputeMaximum(); minMaxCalculator->ComputeMinimum(); ImageType2D::PixelType maxIm = minMaxCalculator->GetMaximum(), minIm = minMaxCalculator->GetMinimum(); if (maxIm == minIm) { cerr << "WARNING: The principal axial slice where the spinal cord detection will be performed (slice " << startZ << ") is full of constant value (" << maxIm << "). You can change it using -init parameter." << endl; } // Starting the spinal cord detection process if (verbose_) cout << "Initialization" << endl; // Creation of a matrix of potential spinal cord centers vector<vector <vector <Node*> > > centers; // Start of the detection of circles and ellipses. For each axial slices, a Hough transform is performed to detect circles. Each axial image is stretched in the antero-posterior direction in order to detect the spinal cord as a ellipse as well as a circle. for (int i=round(-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i<=round(((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i+=round(gap_/spacing[1])) { // Cropping of the image if (verbose_) cout << "Slice num " << i << endl; desiredStart[1] = startZ+i; ImageType::RegionType desiredRegion(desiredStart, desiredSize); FilterType::Pointer filter = FilterType::New(); filter->SetExtractionRegion(desiredRegion); filter->SetInput(inputImage_); #if ITK_VERSION_MAJOR >= 4 filter->SetDirectionCollapseToIdentity(); // This is required. #endif try { filter->Update(); } catch( itk::ExceptionObject & e ) { std::cerr << "Exception caught while updating cropFilter " << std::endl; std::cerr << e << std::endl; cout << inputImage_->GetLargestPossibleRegion().GetSize() << endl; cout << desiredRegion << endl; } // The image is duplicated to allow multiple processing on the image. ImageType2D::Pointer im = filter->GetOutput(); DuplicatorType::Pointer duplicator = DuplicatorType::New(); duplicator->SetInputImage(im); duplicator->Update(); ImageType2D::Pointer clonedImage = duplicator->GetOutput(); ImageType::DirectionType imageDirection = inputImage_->GetDirection(); ImageType2D::DirectionType clonedImageDirection; clonedImageDirection[0][0] = imageDirection[0][0]; clonedImageDirection[0][1] = imageDirection[0][2]; clonedImageDirection[1][0] = imageDirection[1][0]; clonedImageDirection[1][1] = imageDirection[1][2]; clonedImage->SetDirection(clonedImageDirection); // Initialization of resulting spinal cord center list. vector<vector <Node*> > vecNode; // Initialization of stretching parameters // A stretchingFactor equals to 1 doesn't change the image double stretchingFactor = 1.0, step = 0.25; while (stretchingFactor <= 2.0) { if (verbose_) cout << "Stretching factor " << stretchingFactor << endl; // Stretching the image in the antero-posterior direction. This direction is chosen because potential elliptical spinal cord will be transformed to circles and wil be detected by the Hough transform. The resulting circles will then be stretch in the other direction. if (stretchingFactor != 1.0) { ImageType2D::SizeType inputSize = clonedImage->GetLargestPossibleRegion().GetSize(), outputSize; outputSize[0] = inputSize[0]*stretchingFactor; outputSize[1] = inputSize[1]; ImageType2D::SpacingType outputSpacing; outputSpacing[0] = static_cast<double>(clonedImage->GetSpacing()[0] * inputSize[0] / outputSize[0]); outputSpacing[1] = static_cast<double>(clonedImage->GetSpacing()[1] * inputSize[1] / outputSize[1]); ResampleImageFilterType::Pointer resample = ResampleImageFilterType::New(); resample->SetInput(clonedImage); resample->SetSize(outputSize); resample->SetOutputDirection(clonedImage->GetDirection()); resample->SetOutputOrigin(clonedImage->GetOrigin()); resample->SetOutputSpacing(outputSpacing); resample->SetTransform(TransformType::New()); resample->Update(); im = resample->GetOutput(); } // Searching the circles in the image using circular Hough transform, adapted from ITK // The list of radii and accumulator values are then extracted for analyses vector<CVector3> vecCenter; vector<double> vecRadii, vecAccumulator; searchCenters(im,vecCenter,vecRadii,vecAccumulator,startZ+i); // Reformating of the detected circles in the image. Each detected circle is push in a Node with all its information. // The radii are transformed in mm using mean axial resolution vector<Node*> vecNodeTemp; for (unsigned int k=0; k<vecCenter.size(); k++) { if (vecRadii[k] != 0.0) { CVector3 center = vecCenter[k]; center[0] /= stretchingFactor; vecNodeTemp.push_back(new Node(center,mean_resolution_*vecRadii[k]/stretchingFactor,vecAccumulator[k],vecCenter[k],mean_resolution_*vecRadii[k],stretchingFactor)); } } vecNode.push_back(vecNodeTemp); // Preparing next iteration of the spinal cord detection stretchingFactor += step; } // Saving the detected centers centers.push_back(vecNode); } // All centers are ordoned by slice // First step -> delete points without neighbour double limitDistance = sqrt(2.0*gap_*gap_); // in mm list<Node*> listPoints; for (unsigned int k=0; k<numberOfSlices_; k++) { // For every point in a slice, we search on next and previous slice for neighbors // Potential neighbours are circles that have a similar radius (less than 20% of difference) for (unsigned int i=0; i<centers[k].size(); i++) { for (unsigned int m=0; m<centers[k][i].size(); m++) { bool hasNeighbor = false; double radius = centers[k][i][m]->getRadius(); if (k != 0) // search down { // All the point are sorted by the distance map<double,Node*> listNeighbors; for (unsigned int j=0; j<centers[k-1][i].size(); j++) { // Compute the distance between two adjacent centers (in mm) // If this distance is less or equal to the limit distance, the two centers are attached to each others double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k-1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k-1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k-1][i][j]->getPosition()[2],2)); if (currentDistance <= limitDistance) listNeighbors[currentDistance] = centers[k-1][i][j]; } while (!listNeighbors.empty()) { double radiusCurrent = listNeighbors.begin()->second->getRadius(); if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2) { hasNeighbor = true; centers[k][i][m]->addPrevious(listNeighbors.begin()->second); break; } listNeighbors.erase(listNeighbors.begin()); } } if (k != numberOfSlices_-1) // search up { map<double,Node*> listNeighbors; for (unsigned int j=0; j<centers[k+1][i].size(); j++) { double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k+1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k+1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k+1][i][j]->getPosition()[2],2)); if (currentDistance <= limitDistance) listNeighbors[currentDistance] = centers[k+1][i][j]; } while (!listNeighbors.empty()) { double radiusCurrent = listNeighbors.begin()->second->getRadius(); if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2) { hasNeighbor = true; centers[k][i][m]->addNext(listNeighbors.begin()->second); break; } listNeighbors.erase(listNeighbors.begin()); } } if (hasNeighbor) // if point has at least one neighbor, we keep it listPoints.push_back(centers[k][i][m]); } } } // Second step -> assembling points vector<vector <Node*> > chains; while (listPoints.size() != 0) { vector<Node*> temp; Node* current = listPoints.front(); temp.push_back(current); listPoints.pop_front(); while(current->hasNext()) { current = current->getNext(); temp.push_back(current); } chains.push_back(temp); } // And search for the longest and with larger accumulation value and small angle between normals unsigned int maxLenght = 0, max = 0; double maxAccumulator = 0.0, angleMax = 15.0; for (unsigned int j=0; j<chains.size(); j++) { unsigned int length = chains[j].size(); double angle = 0.0; if (length >= 3) { CVector3 vector1 = chains[j][0]->getPosition()-chains[j][length/2]->getPosition(), vector2 = (chains[j][length/2]->getPosition()-chains[j][length-1]->getPosition()); angle = 360.0*acos((vector1*vector2)/(vector1.Norm()*vector2.Norm()))/(2.0*M_PI); } if (length > maxLenght && angle <= angleMax) { maxLenght = chains[j].size(); max = j; maxAccumulator = 0.0; for (unsigned int k=0; k<length; k++) maxAccumulator += chains[j][k]->getAccumulator(); } else if (length == maxLenght && angle <= angleMax) { double accumulator = 0.0; for (unsigned int k=0; k<length; k++) accumulator += chains[j][k]->getAccumulator(); if (accumulator > maxAccumulator) { maxLenght = chains[j].size(); max = j; maxAccumulator = accumulator; } } } if (chains.size() > 1) { unsigned int sizeMaxChain = chains[max].size(); //cout << "Results : " << endl; points_.clear(); for (unsigned int j=0; j<sizeMaxChain; j++) { points_.push_back(chains[max][j]->getPosition()); //cout << chains[max][j]->getPosition() << " " << chains[max][j]->getRadius() << endl; } if (verbose_) cout << "Stretching factor of circle found = " << chains[max][0]->getStretchingFactor() << endl; if (sizeMaxChain < numberOfSlices_) { if (verbose_) cout << "Warning: Number of center found on slices (" << sizeMaxChain << ") doesn't correspond to number of analyzed slices. An error may occur. To improve results, you can increase the number of analyzed slices (option -n must be impair)" << endl; // we have to transform pixel points to physical points CVector3 finalPoint, initPointT = chains[max][0]->getPosition(), finalPointT = chains[max][sizeMaxChain-1]->getPosition(); ContinuousIndex initPointIndex, finalPointIndex; initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2]; finalPointIndex[0] = finalPointT[0]; finalPointIndex[1] = finalPointT[1]; finalPointIndex[2] = finalPointT[2]; PointType initPoint, finPoint; inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPointIndex,finPoint); initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]); finalPoint = CVector3(finPoint[0],finPoint[1],finPoint[2]); initialNormal1_ = (finalPoint-initialPoint_).Normalize(); initialRadius_ = 0.0; for (unsigned int j=0; j<sizeMaxChain; j++) initialRadius_ += chains[max][j]->getRadiusStretch(); initialRadius_ /= sizeMaxChain; stretchingFactor_ = chains[max][0]->getStretchingFactor(); } else { // we have to transform pixel points to physical points CVector3 finalPoint1, finalPoint2, initPointT = chains[max][(int)(sizeMaxChain/2)]->getPosition(), finalPointT1 = chains[max][0]->getPosition(), finalPointT2 = chains[max][sizeMaxChain-1]->getPosition(); ContinuousIndex initPointIndex, finalPoint1Index, finalPoint2Index; initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2]; finalPoint1Index[0] = finalPointT1[0]; finalPoint1Index[1] = finalPointT1[1]; finalPoint1Index[2] = finalPointT1[2]; finalPoint2Index[0] = finalPointT2[0]; finalPoint2Index[1] = finalPointT2[1]; finalPoint2Index[2] = finalPointT2[2]; PointType initPoint, finPoint1, finPoint2; inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint1Index,finPoint1); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint2Index,finPoint2); initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]); finalPoint1 = CVector3(finPoint1[0],finPoint1[1],finPoint1[2]); finalPoint2 = CVector3(finPoint2[0],finPoint2[1],finPoint2[2]); initialNormal1_ = (finalPoint1-initialPoint_).Normalize(); initialNormal2_ = (finalPoint2-initialPoint_).Normalize(); initialRadius_ = 0.0; for (unsigned int j=0; j<sizeMaxChain; j++) initialRadius_ += chains[max][j]->getRadiusStretch(); initialRadius_ /= sizeMaxChain; stretchingFactor_ = chains[max][0]->getStretchingFactor(); } return true; } else { cout << "Error: No point detected..." << endl; return false; } }