DwiPhantomGenerationFilter< TOutputScalarType >
::DwiPhantomGenerationFilter()
    : m_BValue(1000)
    , m_SignalScale(1000)
    , m_BaselineImages(0)
    , m_MaxBaseline(0)
    , m_MeanBaseline(0)
    , m_NoiseVariance(0.004)
    , m_GreyMatterAdc(0.01)
    , m_SimulateBaseline(true)
    , m_DefaultBaseline(1000)
{
    this->SetNumberOfRequiredOutputs (1);
    m_Spacing.Fill(2.5); m_Origin.Fill(0.0);
    m_DirectionMatrix.SetIdentity();
    m_ImageRegion.SetSize(0, 10);
    m_ImageRegion.SetSize(1, 10);
    m_ImageRegion.SetSize(2, 10);

    typename OutputImageType::Pointer outImage = OutputImageType::New();

    outImage->SetSpacing( m_Spacing );   // Set the image spacing
    outImage->SetOrigin( m_Origin );     // Set the image origin
    outImage->SetDirection( m_DirectionMatrix );  // Set the image direction
    outImage->SetLargestPossibleRegion( m_ImageRegion );
    outImage->SetBufferedRegion( m_ImageRegion );
    outImage->SetRequestedRegion( m_ImageRegion );
    outImage->SetVectorLength(QBALL_ODFSIZE);
    outImage->Allocate();
    outImage->FillBuffer(0);

    this->SetNthOutput (0, outImage);

}
void DicomDiffusionImageReader<TPixelType, TDimension>
::GenerateOutputInformation(void)
{
  typename OutputImageType::Pointer output = this->GetOutput();
  typedef itk::ImageSeriesReader<InputImageType> ReaderType;

  // Read the first (or last) volume and use its size.
  if (m_Headers.size() > 0)
  {
    typename ReaderType::Pointer reader = ReaderType::New();

    try
    {
      // Read the image
      reader->SetFileNames (m_Headers[0]->m_DicomFilenames);
      reader->UpdateOutputInformation();

      output->SetSpacing( reader->GetOutput()->GetSpacing() );   // Set the image spacing
      output->SetOrigin( reader->GetOutput()->GetOrigin() );     // Set the image origin
      output->SetDirection( reader->GetOutput()->GetDirection() );  // Set the image direction
      output->SetLargestPossibleRegion( reader->GetOutput()->GetLargestPossibleRegion() );
      output->SetVectorLength( m_Headers.size() );
    }
    catch (itk::ExceptionObject &e)
    {
      throw e;
    }
  }
  else
  {
    itkExceptionMacro(<< "At least one filename is required." );
  }
}
void FieldmapGeneratorFilter< OutputImageType >::BeforeThreadedGenerateData()
{
    typename OutputImageType::Pointer outImage = OutputImageType::New();
    outImage->SetSpacing( m_Spacing );
    outImage->SetOrigin( m_Origin );
    outImage->SetDirection( m_DirectionMatrix );
    outImage->SetLargestPossibleRegion( m_ImageRegion );
    outImage->SetBufferedRegion( m_ImageRegion );
    outImage->SetRequestedRegion( m_ImageRegion );
    outImage->Allocate();
    outImage->FillBuffer(0);
    this->SetNthOutput(0, outImage);
}
Example #4
0
  void TractsToFiberEndingsImageFilter< TInputImage, TOutputPixelType >
      ::GenerateData()
  {
    MITK_INFO << "Generating 2D fiber endings image";
    if(&typeid(TOutputPixelType) != &typeid(unsigned char))
    {
      MITK_INFO << "Only 'unsigned char' and 'itk::RGBAPixel<unsigned char> supported as OutputPixelType";
      return;
    }
    mitk::Geometry3D::Pointer geometry = m_FiberBundle->GetGeometry();

    typename OutputImageType::Pointer outImage =
        static_cast< OutputImageType * >(this->ProcessObject::GetOutput(0));

    outImage->SetSpacing( geometry->GetSpacing()/m_UpsamplingFactor );   // Set the image spacing

    mitk::Point3D origin = geometry->GetOrigin();
    mitk::Point3D indexOrigin;
    geometry->WorldToIndex(origin, indexOrigin);
    indexOrigin[0] = indexOrigin[0] - .5 * (1.0-1.0/m_UpsamplingFactor);
    indexOrigin[1] = indexOrigin[1] - .5 * (1.0-1.0/m_UpsamplingFactor);
    indexOrigin[2] = indexOrigin[2] - .5 * (1.0-1.0/m_UpsamplingFactor);
    mitk::Point3D newOrigin;
    geometry->IndexToWorld(indexOrigin, newOrigin);

    outImage->SetOrigin( newOrigin );     // Set the image origin

    itk::Matrix<double, 3, 3> matrix;
    for (int i=0; i<3; i++)
      for (int j=0; j<3; j++)
        matrix[j][i] = geometry->GetMatrixColumn(i)[j]/geometry->GetSpacing().GetElement(i);
    outImage->SetDirection( matrix );  // Set the image direction

    float* bounds = m_FiberBundle->GetBounds();
    ImageRegion<3> upsampledRegion;
    upsampledRegion.SetSize(0, bounds[0]);
    upsampledRegion.SetSize(1, bounds[1]);
    upsampledRegion.SetSize(2, bounds[2]);

    typename InputImageType::RegionType::SizeType upsampledSize = upsampledRegion.GetSize();
    for (unsigned int n = 0; n < 3; n++)
    {
      upsampledSize[n] = upsampledSize[n] * m_UpsamplingFactor;
    }
    upsampledRegion.SetSize( upsampledSize );
    outImage->SetRegions( upsampledRegion );

    outImage->Allocate();

    int w = upsampledSize[0];
    int h = upsampledSize[1];
    int d = upsampledSize[2];


    unsigned char* accuout;
    accuout = reinterpret_cast<unsigned char*>(outImage->GetBufferPointer());
    for (int i=0; i<w*h*d; i++) accuout[i] = 0;

    typedef mitk::FiberBundle::ContainerTractType   ContainerTractType;
    typedef mitk::FiberBundle::ContainerType        ContainerType;
    typedef mitk::FiberBundle::ContainerPointType   ContainerPointType;
    ContainerType::Pointer tractContainer = m_FiberBundle->GetTractContainer();

    for (int i=0; i<tractContainer->Size(); i++)
    {
      ContainerTractType::Pointer tract = tractContainer->GetElement(i);
      int tractsize = tract->Size();

      if (tractsize>1)
      {
        ContainerPointType start = tract->GetElement(0);
        ContainerPointType end = tract->GetElement(tractsize-1);

        start[0] = (start[0]+0.5) * m_UpsamplingFactor;
        start[1] = (start[1]+0.5) * m_UpsamplingFactor;
        start[2] = (start[2]+0.5) * m_UpsamplingFactor;

        // int coordinates inside image?
        int px = (int) (start[0]);
        if (px < 0 || px >= w)
          continue;
        int py = (int) (start[1]);
        if (py < 0 || py >= h)
          continue;
        int pz = (int) (start[2]);
        if (pz < 0 || pz >= d)
          continue;

        accuout[( px   + w*(py  + h*pz  ))] += 1;


        end[0] = (end[0]+0.5) * m_UpsamplingFactor;
        end[1] = (end[1]+0.5) * m_UpsamplingFactor;
        end[2] = (end[2]+0.5) * m_UpsamplingFactor;

        // int coordinates inside image?
        px = (int) (end[0]);
        if (px < 0 || px >= w)
          continue;
        py = (int) (end[1]);
        if (py < 0 || py >= h)
          continue;
        pz = (int) (end[2]);
        if (pz < 0 || pz >= d)
          continue;

        accuout[( px   + w*(py  + h*pz  ))] += 1;
      }
    }

    MITK_INFO << "2D fiber endings image generated";
  }
void TractDensityImageFilter< OutputImageType >::GenerateData()
{
    // generate upsampled image
    mitk::Geometry3D::Pointer geometry = m_FiberBundle->GetGeometry();
    typename OutputImageType::Pointer outImage = this->GetOutput();

    // calculate new image parameters
    mitk::Vector3D newSpacing;
    mitk::Point3D newOrigin;
    itk::Matrix<double, 3, 3> newDirection;
    ImageRegion<3> upsampledRegion;
    if (m_UseImageGeometry && !m_InputImage.IsNull())
    {
        MITK_INFO << "TractDensityImageFilter: using image geometry";
        newSpacing = m_InputImage->GetSpacing()/m_UpsamplingFactor;
        upsampledRegion = m_InputImage->GetLargestPossibleRegion();
        newOrigin = m_InputImage->GetOrigin();
        typename OutputImageType::RegionType::SizeType size = upsampledRegion.GetSize();
        size[0] *= m_UpsamplingFactor;
        size[1] *= m_UpsamplingFactor;
        size[2] *= m_UpsamplingFactor;
        upsampledRegion.SetSize(size);
        newDirection = m_InputImage->GetDirection();
    }
    else
    {
        MITK_INFO << "TractDensityImageFilter: using fiber bundle geometry";
        newSpacing = geometry->GetSpacing()/m_UpsamplingFactor;
        newOrigin = geometry->GetOrigin();
        mitk::Geometry3D::BoundsArrayType bounds = geometry->GetBounds();
        newOrigin[0] += bounds.GetElement(0);
        newOrigin[1] += bounds.GetElement(2);
        newOrigin[2] += bounds.GetElement(4);

        for (int i=0; i<3; i++)
            for (int j=0; j<3; j++)
                newDirection[j][i] = geometry->GetMatrixColumn(i)[j];
        upsampledRegion.SetSize(0, geometry->GetExtent(0)*m_UpsamplingFactor);
        upsampledRegion.SetSize(1, geometry->GetExtent(1)*m_UpsamplingFactor);
        upsampledRegion.SetSize(2, geometry->GetExtent(2)*m_UpsamplingFactor);
    }
    typename OutputImageType::RegionType::SizeType upsampledSize = upsampledRegion.GetSize();

    // apply new image parameters
    outImage->SetSpacing( newSpacing );
    outImage->SetOrigin( newOrigin );
    outImage->SetDirection( newDirection );
    outImage->SetRegions( upsampledRegion );
    outImage->Allocate();
    outImage->FillBuffer(0.0);

    int w = upsampledSize[0];
    int h = upsampledSize[1];
    int d = upsampledSize[2];

    // set/initialize output
    OutPixelType* outImageBufferPointer = (OutPixelType*)outImage->GetBufferPointer();

    // resample fiber bundle
    float minSpacing = 1;
    if(newSpacing[0]<newSpacing[1] && newSpacing[0]<newSpacing[2])
        minSpacing = newSpacing[0];
    else if (newSpacing[1] < newSpacing[2])
        minSpacing = newSpacing[1];
    else
        minSpacing = newSpacing[2];

    MITK_INFO << "TractDensityImageFilter: resampling fibers to ensure sufficient voxel coverage";
    m_FiberBundle = m_FiberBundle->GetDeepCopy();
    m_FiberBundle->ResampleFibers(minSpacing);

    MITK_INFO << "TractDensityImageFilter: starting image generation";
    vtkSmartPointer<vtkPolyData> fiberPolyData = m_FiberBundle->GetFiberPolyData();
    vtkSmartPointer<vtkCellArray> vLines = fiberPolyData->GetLines();
    vLines->InitTraversal();
    int numFibers = m_FiberBundle->GetNumFibers();
    boost::progress_display disp(numFibers);
    for( int i=0; i<numFibers; i++ )
    {
        ++disp;
        vtkIdType   numPoints(0);
        vtkIdType*  points(NULL);
        vLines->GetNextCell ( numPoints, points );

        // fill output image
        for( int j=0; j<numPoints; j++)
        {
            itk::Point<float, 3> vertex = GetItkPoint(fiberPolyData->GetPoint(points[j]));
            itk::Index<3> index;
            itk::ContinuousIndex<float, 3> contIndex;
            outImage->TransformPhysicalPointToIndex(vertex, index);
            outImage->TransformPhysicalPointToContinuousIndex(vertex, contIndex);

            float frac_x = contIndex[0] - index[0];
            float frac_y = contIndex[1] - index[1];
            float frac_z = contIndex[2] - index[2];

            if (frac_x<0)
            {
                index[0] -= 1;
                frac_x += 1;
            }
            if (frac_y<0)
            {
                index[1] -= 1;
                frac_y += 1;
            }
            if (frac_z<0)
            {
                index[2] -= 1;
                frac_z += 1;
            }

            frac_x = 1-frac_x;
            frac_y = 1-frac_y;
            frac_z = 1-frac_z;

            // int coordinates inside image?
            if (index[0] < 0 || index[0] >= w-1)
                continue;
            if (index[1] < 0 || index[1] >= h-1)
                continue;
            if (index[2] < 0 || index[2] >= d-1)
                continue;

            if (m_BinaryOutput)
            {
                outImageBufferPointer[( index[0]   + w*(index[1]  + h*index[2]  ))] = 1;
                outImageBufferPointer[( index[0]   + w*(index[1]+1+ h*index[2]  ))] = 1;
                outImageBufferPointer[( index[0]   + w*(index[1]  + h*index[2]+h))] = 1;
                outImageBufferPointer[( index[0]   + w*(index[1]+1+ h*index[2]+h))] = 1;
                outImageBufferPointer[( index[0]+1 + w*(index[1]  + h*index[2]  ))] = 1;
                outImageBufferPointer[( index[0]+1 + w*(index[1]  + h*index[2]+h))] = 1;
                outImageBufferPointer[( index[0]+1 + w*(index[1]+1+ h*index[2]  ))] = 1;
                outImageBufferPointer[( index[0]+1 + w*(index[1]+1+ h*index[2]+h))] = 1;
            }
            else
            {
                outImageBufferPointer[( index[0]   + w*(index[1]  + h*index[2]  ))] += (  frac_x)*(  frac_y)*(  frac_z);
                outImageBufferPointer[( index[0]   + w*(index[1]+1+ h*index[2]  ))] += (  frac_x)*(1-frac_y)*(  frac_z);
                outImageBufferPointer[( index[0]   + w*(index[1]  + h*index[2]+h))] += (  frac_x)*(  frac_y)*(1-frac_z);
                outImageBufferPointer[( index[0]   + w*(index[1]+1+ h*index[2]+h))] += (  frac_x)*(1-frac_y)*(1-frac_z);
                outImageBufferPointer[( index[0]+1 + w*(index[1]  + h*index[2]  ))] += (1-frac_x)*(  frac_y)*(  frac_z);
                outImageBufferPointer[( index[0]+1 + w*(index[1]  + h*index[2]+h))] += (1-frac_x)*(  frac_y)*(1-frac_z);
                outImageBufferPointer[( index[0]+1 + w*(index[1]+1+ h*index[2]  ))] += (1-frac_x)*(1-frac_y)*(  frac_z);
                outImageBufferPointer[( index[0]+1 + w*(index[1]+1+ h*index[2]+h))] += (1-frac_x)*(1-frac_y)*(1-frac_z);
            }
        }
    }

    if (!m_OutputAbsoluteValues && !m_BinaryOutput)
    {
        MITK_INFO << "TractDensityImageFilter: max-normalizing output image";
        OutPixelType max = 0;
        for (int i=0; i<w*h*d; i++)
            if (max < outImageBufferPointer[i])
                max = outImageBufferPointer[i];
        if (max>0)
            for (int i=0; i<w*h*d; i++)
                outImageBufferPointer[i] /= max;
    }
    if (m_InvertImage)
    {
        MITK_INFO << "TractDensityImageFilter: inverting image";
        for (int i=0; i<w*h*d; i++)
            outImageBufferPointer[i] = 1-outImageBufferPointer[i];
    }
    MITK_INFO << "TractDensityImageFilter: finished processing";
}
Example #6
0
  void TractsToProbabilityImageFilter< TInputImage, TOutputPixelType >
      ::GenerateData()
  {
    bool isRgba = false;
    if(&typeid(TOutputPixelType) == &typeid(itk::RGBAPixel<unsigned char>))
    {
      isRgba = true;
    }
    else if(&typeid(TOutputPixelType) != &typeid(unsigned char))
    {
      MITK_INFO << "Only 'unsigned char' and 'itk::RGBAPixel<unsigned char> supported as OutputPixelType";
      return;
    }

    mitk::Geometry3D::Pointer geometry = m_FiberBundle->GetGeometry();

    typename OutputImageType::Pointer outImage =
        static_cast< OutputImageType * >(this->ProcessObject::GetOutput(0));

    outImage->SetSpacing( geometry->GetSpacing()/m_UpsamplingFactor );   // Set the image spacing

    mitk::Point3D origin = geometry->GetOrigin();
    mitk::Point3D indexOrigin;
    geometry->WorldToIndex(origin, indexOrigin);
    indexOrigin[0] = indexOrigin[0] - .5 * (1.0-1.0/m_UpsamplingFactor);
    indexOrigin[1] = indexOrigin[1] - .5 * (1.0-1.0/m_UpsamplingFactor);
    indexOrigin[2] = indexOrigin[2] - .5 * (1.0-1.0/m_UpsamplingFactor);
    mitk::Point3D newOrigin;
    geometry->IndexToWorld(indexOrigin, newOrigin);

    outImage->SetOrigin( newOrigin );     // Set the image origin
    itk::Matrix<double, 3, 3> matrix;
    for (int i=0; i<3; i++)
      for (int j=0; j<3; j++)
        matrix[j][i] = geometry->GetMatrixColumn(i)[j]/geometry->GetSpacing().GetElement(i);
    outImage->SetDirection( matrix );  // Set the image direction

    float* bounds = m_FiberBundle->GetBounds();
    ImageRegion<3> upsampledRegion;
    upsampledRegion.SetSize(0, bounds[0]);
    upsampledRegion.SetSize(1, bounds[1]);
    upsampledRegion.SetSize(2, bounds[2]);

    typename InputImageType::RegionType::SizeType upsampledSize = upsampledRegion.GetSize();
    for (unsigned int n = 0; n < 3; n++)
    {
      upsampledSize[n] = upsampledSize[n] * m_UpsamplingFactor;
    }
    upsampledRegion.SetSize( upsampledSize );
    outImage->SetRegions( upsampledRegion );

    outImage->Allocate();
    //    itk::RGBAPixel<unsigned char> pix;
    //    pix.Set(0,0,0,0);
    //    outImage->FillBuffer(pix);

    int w = upsampledSize[0];
    int h = upsampledSize[1];
    int d = upsampledSize[2];


    unsigned char* accuout;
    float* accu;

    accuout = reinterpret_cast<unsigned char*>(outImage->GetBufferPointer());

    if(isRgba)
    {
//      accuout = static_cast<unsigned char*>( outImage->GetBufferPointer()[0].GetDataPointer());
      accu = new float[w*h*d*4];
      for (int i=0; i<w*h*d*4; i++) accu[i] = 0;
    }
    else
    {
      accu = new float[w*h*d];
      for (int i=0; i<w*h*d; i++) accu[i] = 0;
    }

    // for each tract
    int numTracts = m_FiberBundle->GetNumTracts();
    for( int i=0; i<numTracts; i++ )
    {
      ////////////////////
      // upsampling
      std::vector< itk::Point<float, 3> > vertices;

      // for each vertex
      int numVertices = m_FiberBundle->GetNumPoints(i);
      for( int j=0; j<numVertices-1; j++)
      {
        itk::Point<float, 3> point = m_FiberBundle->GetPoint(i,j);
        itk::Point<float, 3> nextPoint = m_FiberBundle->GetPoint(i,j+1);
        point[0] += 0.5 - 0.5/m_UpsamplingFactor;
        point[1] += 0.5 - 0.5/m_UpsamplingFactor;
        point[2] += 0.5 - 0.5/m_UpsamplingFactor;
        nextPoint[0] += 0.5 - 0.5/m_UpsamplingFactor;
        nextPoint[1] += 0.5 - 0.5/m_UpsamplingFactor;
        nextPoint[2] += 0.5 - 0.5/m_UpsamplingFactor;

        for(int k=1; k<=m_UpsamplingFactor; k++)
        {
          itk::Point<float, 3> newPoint;
          newPoint[0] = point[0] + ((double)k/(double)m_UpsamplingFactor)*(nextPoint[0]-point[0]);
          newPoint[1] = point[1] + ((double)k/(double)m_UpsamplingFactor)*(nextPoint[1]-point[1]);
          newPoint[2] = point[2] + ((double)k/(double)m_UpsamplingFactor)*(nextPoint[2]-point[2]);
          vertices.push_back(newPoint);
        }
      }

      ////////////////////
      // calc directions (which are used as weights)
      std::list< itk::Point<float, 3> > rgbweights;
      std::list<float> intensities;

      // for each vertex
      numVertices = vertices.size();
      for( int j=0; j<numVertices-1; j++)
      {

        itk::Point<float, 3> vertex = vertices.at(j);
        itk::Point<float, 3> vertexPost = vertices.at(j+1);

        itk::Point<float, 3> dir;
        dir[0] = fabs((vertexPost[0] - vertex[0]) * outImage->GetSpacing()[0]);
        dir[1] = fabs((vertexPost[1] - vertex[1]) * outImage->GetSpacing()[1]);
        dir[2] = fabs((vertexPost[2] - vertex[2]) * outImage->GetSpacing()[2]);

        if(isRgba)
        {
          rgbweights.push_back(dir);
        }

        float intensity = sqrt(dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]);
        intensities.push_back(intensity);

        // last point gets same as previous one
        if(j==numVertices-2)
        {
          if(isRgba)
          {
            rgbweights.push_back(dir);
          }
          intensities.push_back(intensity);
        }
      }


      ////////////////////
      // fill output image

      // for each vertex
      for( int j=0; j<numVertices; j++)
      {
        itk::Point<float, 3> vertex = vertices.at(j);
        itk::Point<float, 3> rgbweight;
        if(isRgba)
        {
          rgbweight = rgbweights.front();
          rgbweights.pop_front();
        }
        float intweight = intensities.front();
        intensities.pop_front();

        // scaling coordinates (index coords scale with upsampling)
        vertex[0] = vertex[0] * m_UpsamplingFactor;
        vertex[1] = vertex[1] * m_UpsamplingFactor;
        vertex[2] = vertex[2] * m_UpsamplingFactor;

        // int coordinates inside image?
        int px = (int) (vertex[0]);
        if (px < 0 || px >= w-1)
          continue;
        int py = (int) (vertex[1]);
        if (py < 0 || py >= h-1)
          continue;
        int pz = (int) (vertex[2]);
        if (pz < 0 || pz >= d-1)
          continue;

        // float fraction of coordinates
        float frac_x = vertex[0] - px;
        float frac_y = vertex[1] - py;
        float frac_z = vertex[2] - pz;

        float scale = 100 * pow((float)m_UpsamplingFactor,3);

        if(isRgba)
        {
          // add to r-channel in output image
          accu[0+4*( px   + w*(py  + h*pz  ))] += (1-frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[0] * scale;
          accu[0+4*( px   + w*(py+1+ h*pz  ))] += (1-frac_x)*(  frac_y)*(1-frac_z) * rgbweight[0] * scale;
          accu[0+4*( px   + w*(py  + h*pz+h))] += (1-frac_x)*(1-frac_y)*(  frac_z) * rgbweight[0] * scale;
          accu[0+4*( px   + w*(py+1+ h*pz+h))] += (1-frac_x)*(  frac_y)*(  frac_z) * rgbweight[0] * scale;
          accu[0+4*( px+1 + w*(py  + h*pz  ))] += (  frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[0] * scale;
          accu[0+4*( px+1 + w*(py  + h*pz+h))] += (  frac_x)*(1-frac_y)*(  frac_z) * rgbweight[0] * scale;
          accu[0+4*( px+1 + w*(py+1+ h*pz  ))] += (  frac_x)*(  frac_y)*(1-frac_z) * rgbweight[0] * scale;
          accu[0+4*( px+1 + w*(py+1+ h*pz+h))] += (  frac_x)*(  frac_y)*(  frac_z) * rgbweight[0] * scale;

          // add to g-channel in output image
          accu[1+4*( px   + w*(py  + h*pz  ))] += (1-frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[1] * scale;
          accu[1+4*( px   + w*(py+1+ h*pz  ))] += (1-frac_x)*(  frac_y)*(1-frac_z) * rgbweight[1] * scale;
          accu[1+4*( px   + w*(py  + h*pz+h))] += (1-frac_x)*(1-frac_y)*(  frac_z) * rgbweight[1] * scale;
          accu[1+4*( px   + w*(py+1+ h*pz+h))] += (1-frac_x)*(  frac_y)*(  frac_z) * rgbweight[1] * scale;
          accu[1+4*( px+1 + w*(py  + h*pz  ))] += (  frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[1] * scale;
          accu[1+4*( px+1 + w*(py  + h*pz+h))] += (  frac_x)*(1-frac_y)*(  frac_z) * rgbweight[1] * scale;
          accu[1+4*( px+1 + w*(py+1+ h*pz  ))] += (  frac_x)*(  frac_y)*(1-frac_z) * rgbweight[1] * scale;
          accu[1+4*( px+1 + w*(py+1+ h*pz+h))] += (  frac_x)*(  frac_y)*(  frac_z) * rgbweight[1] * scale;

          // add to b-channel in output image
          accu[2+4*( px   + w*(py  + h*pz  ))] += (1-frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[2] * scale;
          accu[2+4*( px   + w*(py+1+ h*pz  ))] += (1-frac_x)*(  frac_y)*(1-frac_z) * rgbweight[2] * scale;
          accu[2+4*( px   + w*(py  + h*pz+h))] += (1-frac_x)*(1-frac_y)*(  frac_z) * rgbweight[2] * scale;
          accu[2+4*( px   + w*(py+1+ h*pz+h))] += (1-frac_x)*(  frac_y)*(  frac_z) * rgbweight[2] * scale;
          accu[2+4*( px+1 + w*(py  + h*pz  ))] += (  frac_x)*(1-frac_y)*(1-frac_z) * rgbweight[2] * scale;
          accu[2+4*( px+1 + w*(py  + h*pz+h))] += (  frac_x)*(1-frac_y)*(  frac_z) * rgbweight[2] * scale;
          accu[2+4*( px+1 + w*(py+1+ h*pz  ))] += (  frac_x)*(  frac_y)*(1-frac_z) * rgbweight[2] * scale;
          accu[2+4*( px+1 + w*(py+1+ h*pz+h))] += (  frac_x)*(  frac_y)*(  frac_z) * rgbweight[2] * scale;

          // add to a-channel in output image
          accu[3+4*( px   + w*(py  + h*pz  ))] += (1-frac_x)*(1-frac_y)*(1-frac_z) * intweight * scale;
          accu[3+4*( px   + w*(py+1+ h*pz  ))] += (1-frac_x)*(  frac_y)*(1-frac_z) * intweight * scale;
          accu[3+4*( px   + w*(py  + h*pz+h))] += (1-frac_x)*(1-frac_y)*(  frac_z) * intweight * scale;
          accu[3+4*( px   + w*(py+1+ h*pz+h))] += (1-frac_x)*(  frac_y)*(  frac_z) * intweight * scale;
          accu[3+4*( px+1 + w*(py  + h*pz  ))] += (  frac_x)*(1-frac_y)*(1-frac_z) * intweight * scale;
          accu[3+4*( px+1 + w*(py  + h*pz+h))] += (  frac_x)*(1-frac_y)*(  frac_z) * intweight * scale;
          accu[3+4*( px+1 + w*(py+1+ h*pz  ))] += (  frac_x)*(  frac_y)*(1-frac_z) * intweight * scale;
          accu[3+4*( px+1 + w*(py+1+ h*pz+h))] += (  frac_x)*(  frac_y)*(  frac_z) * intweight * scale;
        }
        else if (m_BinaryEnvelope)
        {
          accu[( px   + w*(py  + h*pz  ))] = 1;
          accu[( px   + w*(py+1+ h*pz  ))] = 1;
          accu[( px   + w*(py  + h*pz+h))] = 1;
          accu[( px   + w*(py+1+ h*pz+h))] = 1;
          accu[( px+1 + w*(py  + h*pz  ))] = 1;
          accu[( px+1 + w*(py  + h*pz+h))] = 1;
          accu[( px+1 + w*(py+1+ h*pz  ))] = 1;
          accu[( px+1 + w*(py+1+ h*pz+h))] = 1;
        }
        else
        {
          accu[( px   + w*(py  + h*pz  ))] += (1-frac_x)*(1-frac_y)*(1-frac_z) * intweight * scale;
          accu[( px   + w*(py+1+ h*pz  ))] += (1-frac_x)*(  frac_y)*(1-frac_z) * intweight * scale;
          accu[( px   + w*(py  + h*pz+h))] += (1-frac_x)*(1-frac_y)*(  frac_z) * intweight * scale;
          accu[( px   + w*(py+1+ h*pz+h))] += (1-frac_x)*(  frac_y)*(  frac_z) * intweight * scale;
          accu[( px+1 + w*(py  + h*pz  ))] += (  frac_x)*(1-frac_y)*(1-frac_z) * intweight * scale;
          accu[( px+1 + w*(py  + h*pz+h))] += (  frac_x)*(1-frac_y)*(  frac_z) * intweight * scale;
          accu[( px+1 + w*(py+1+ h*pz  ))] += (  frac_x)*(  frac_y)*(1-frac_z) * intweight * scale;
          accu[( px+1 + w*(py+1+ h*pz+h))] += (  frac_x)*(  frac_y)*(  frac_z) * intweight * scale;
        }

      }
    }

    float maxRgb = 0.000000001;
    float maxInt = 0.000000001;
    int numPix;

    if(isRgba)
    {
      numPix = w*h*d*4;

      // calc maxima
      for(int i=0; i<numPix; i++)
      {
        if((i-3)%4 != 0)
        {
          if(accu[i] > maxRgb)
          {
            maxRgb = accu[i];
          }
        }
        else
        {
          if(accu[i] > maxInt)
          {
            maxInt = accu[i];
          }
        }
      }

      // write output, normalized uchar 0..255
      for(int i=0; i<numPix; i++)
      {
        if((i-3)%4 != 0)
        {
          accuout[i] = (unsigned char) (255.0 * accu[i] / maxRgb);
        }
        else
        {
          accuout[i] = (unsigned char) (255.0 * accu[i] / maxInt);
        }
      }
    }
    else if (m_BinaryEnvelope)
    {
      numPix = w*h*d;

      // write output, normalized uchar 0..255
      for(int i=0; i<numPix; i++)
      {
        if(m_InvertImage)
        {
          accuout[i] = (unsigned char) ((int)(accu[i]+1)%2);
        }
        else
        {
          accuout[i] = (unsigned char) accu[i];
        }
      }
    }
    else
    {
      numPix = w*h*d;

      // calc maxima
      for(int i=0; i<numPix; i++)
      {
        if(accu[i] > maxInt)
        {
          maxInt = accu[i];
        }
      }

      // write output, normalized uchar 0..255
      for(int i=0; i<numPix; i++)
      {
          accuout[i] = (unsigned char) (255.0 * accu[i] / maxInt);
      }
    }

    delete[] accu;
  }
  void ExtractChannelFromRgbaImageFilter< ReferenceImageType, OutputImageType >::GenerateData()
  {

    typename InputImageType::Pointer rgbaImage = static_cast< InputImageType * >( this->ProcessObject::GetInput(0) );

    typename OutputImageType::Pointer outputImage =
        static_cast< OutputImageType * >(this->ProcessObject::GetOutput(0));

    typename InputImageType::RegionType region = rgbaImage->GetLargestPossibleRegion();
    outputImage->SetSpacing( m_ReferenceImage->GetSpacing() );   // Set the image spacing
    outputImage->SetOrigin( m_ReferenceImage->GetOrigin() );     // Set the image origin
    outputImage->SetDirection( m_ReferenceImage->GetDirection() );  // Set the image direction
    outputImage->SetRegions( m_ReferenceImage->GetLargestPossibleRegion());
    outputImage->Allocate();
    outputImage->FillBuffer(0);
    float* outImageBufferPointer = outputImage->GetBufferPointer();

    itk::Image< short, 3 >::Pointer counterImage = itk::Image< short, 3 >::New();
    counterImage->SetSpacing( m_ReferenceImage->GetSpacing() );   // Set the image spacing
    counterImage->SetOrigin( m_ReferenceImage->GetOrigin() );     // Set the image origin
    counterImage->SetDirection( m_ReferenceImage->GetDirection() );  // Set the image direction
    counterImage->SetRegions( m_ReferenceImage->GetLargestPossibleRegion());
    counterImage->Allocate();
    counterImage->FillBuffer(0);
    short* counterImageBufferPointer = counterImage->GetBufferPointer();

    int w = m_ReferenceImage->GetLargestPossibleRegion().GetSize().GetElement(0);
    int h = m_ReferenceImage->GetLargestPossibleRegion().GetSize().GetElement(1);
    int d = m_ReferenceImage->GetLargestPossibleRegion().GetSize().GetElement(2);

    typedef ImageRegionConstIterator< InputImageType > InImageIteratorType;
    InImageIteratorType rgbaIt(rgbaImage, region);
    rgbaIt.GoToBegin();
    while(!rgbaIt.IsAtEnd()){

      InPixelType x = rgbaIt.Get();
      ++rgbaIt;

      itk::Point<float, 3> vertex;
      itk::Index<3> index = rgbaIt.GetIndex();

      rgbaImage->TransformIndexToPhysicalPoint(index, vertex);
      outputImage->TransformPhysicalPointToIndex(vertex, index);

      itk::ContinuousIndex<float, 3> contIndex;
      outputImage->TransformPhysicalPointToContinuousIndex(vertex, contIndex);

      float frac_x = contIndex[0] - index[0];
      float frac_y = contIndex[1] - index[1];
      float frac_z = contIndex[2] - index[2];
      int px = index[0];
      if (frac_x<0)
      {
        px -= 1;
        frac_x += 1;
      }
      int py = index[1];
      if (frac_y<0)
      {
        py -= 1;
        frac_y += 1;
      }
      int pz = index[2];
      if (frac_z<0)
      {
        pz -= 1;
        frac_z += 1;
      }
      frac_x = 1-frac_x;
      frac_y = 1-frac_y;
      frac_z = 1-frac_z;

      // int coordinates inside image?
      if (px < 0 || px >= w-1)
        continue;
      if (py < 0 || py >= h-1)
        continue;
      if (pz < 0 || pz >= d-1)
        continue;

      OutPixelType out;
      switch (m_Channel)
      {
      case RED:
        out = (float)x.GetRed()/255;
        break;
      case GREEN:
        out = (float)x.GetGreen()/255;
        break;
      case BLUE:
        out = (float)x.GetBlue()/255;
        break;
      case ALPHA:
        out = (float)x.GetAlpha()/255;
      }

      outImageBufferPointer[( px   + w*(py  + h*pz  ))] += out*(  frac_x)*(  frac_y)*(  frac_z);
      outImageBufferPointer[( px   + w*(py+1+ h*pz  ))] += out*(  frac_x)*(1-frac_y)*(  frac_z);
      outImageBufferPointer[( px   + w*(py  + h*pz+h))] += out*(  frac_x)*(  frac_y)*(1-frac_z);
      outImageBufferPointer[( px   + w*(py+1+ h*pz+h))] += out*(  frac_x)*(1-frac_y)*(1-frac_z);
      outImageBufferPointer[( px+1 + w*(py  + h*pz  ))] += out*(1-frac_x)*(  frac_y)*(  frac_z);
      outImageBufferPointer[( px+1 + w*(py  + h*pz+h))] += out*(1-frac_x)*(  frac_y)*(1-frac_z);
      outImageBufferPointer[( px+1 + w*(py+1+ h*pz  ))] += out*(1-frac_x)*(1-frac_y)*(  frac_z);
      outImageBufferPointer[( px+1 + w*(py+1+ h*pz+h))] += out*(1-frac_x)*(1-frac_y)*(1-frac_z);

      counterImageBufferPointer[( px   + w*(py  + h*pz  ))] += 1;
      counterImageBufferPointer[( px   + w*(py+1+ h*pz  ))] += 1;
      counterImageBufferPointer[( px   + w*(py  + h*pz+h))] += 1;
      counterImageBufferPointer[( px   + w*(py+1+ h*pz+h))] += 1;
      counterImageBufferPointer[( px+1 + w*(py  + h*pz  ))] += 1;
      counterImageBufferPointer[( px+1 + w*(py  + h*pz+h))] += 1;
      counterImageBufferPointer[( px+1 + w*(py+1+ h*pz  ))] += 1;
      counterImageBufferPointer[( px+1 + w*(py+1+ h*pz+h))] += 1;

    }

    typedef ImageRegionIterator< OutputImageType > OutImageIteratorType;
    OutImageIteratorType outIt(outputImage, outputImage->GetLargestPossibleRegion());
    outIt.GoToBegin();
    typedef ImageRegionConstIterator< itk::Image< short, 3 > > CountImageIteratorType;
    CountImageIteratorType counterIt(counterImage, counterImage->GetLargestPossibleRegion());
    counterIt.GoToBegin();

    while(!outIt.IsAtEnd() && !counterIt.IsAtEnd()){
      if (counterIt.Value()>0)
        outIt.Set(outIt.Value()/counterIt.Value());
      ++outIt;
      ++counterIt;
    }
  }
void DwiPhantomGenerationFilter< TOutputScalarType >
::GenerateData()
{
    if (m_NoiseVariance < 0)
        m_NoiseVariance = 0.001;

    if (!m_SimulateBaseline)
    {
        MITK_INFO << "Baseline image values are set to default. Noise variance value is treated as SNR!";
        if (m_NoiseVariance <= 0)
            m_NoiseVariance = 0.0001;
        if (m_NoiseVariance>99)
            m_NoiseVariance = 0;
        else
        {
            m_NoiseVariance = m_DefaultBaseline/(m_NoiseVariance*m_SignalScale);
            m_NoiseVariance *= m_NoiseVariance;
        }
    }

    m_RandGen = Statistics::MersenneTwisterRandomVariateGenerator::New();
    m_RandGen->SetSeed();

    typename OutputImageType::Pointer outImage = OutputImageType::New();
    outImage->SetSpacing( m_Spacing );
    outImage->SetOrigin( m_Origin );
    outImage->SetDirection( m_DirectionMatrix );
    outImage->SetLargestPossibleRegion( m_ImageRegion );
    outImage->SetBufferedRegion( m_ImageRegion );
    outImage->SetRequestedRegion( m_ImageRegion );
    outImage->SetVectorLength(m_GradientList.size());
    outImage->Allocate();
    typename OutputImageType::PixelType pix;
    pix.SetSize(m_GradientList.size());
    pix.Fill(0.0);
    outImage->FillBuffer(pix);
    this->SetNthOutput (0, outImage);

    double minSpacing = m_Spacing[0];
    if (m_Spacing[1]<minSpacing)
        minSpacing = m_Spacing[1];
    if (m_Spacing[2]<minSpacing)
        minSpacing = m_Spacing[2];

    m_DirectionImageContainer = ItkDirectionImageContainer::New();
    for (int i=0; i<m_SignalRegions.size(); i++)
    {
        itk::Vector< float, 3 > nullVec; nullVec.Fill(0.0);
        ItkDirectionImage::Pointer img = ItkDirectionImage::New();
        img->SetSpacing( m_Spacing );
        img->SetOrigin( m_Origin );
        img->SetDirection( m_DirectionMatrix );
        img->SetRegions( m_ImageRegion );
        img->Allocate();
        img->FillBuffer(nullVec);
        m_DirectionImageContainer->InsertElement(m_DirectionImageContainer->Size(), img);
    }
    m_NumDirectionsImage = ItkUcharImgType::New();
    m_NumDirectionsImage->SetSpacing( m_Spacing );
    m_NumDirectionsImage->SetOrigin( m_Origin );
    m_NumDirectionsImage->SetDirection( m_DirectionMatrix );
    m_NumDirectionsImage->SetRegions( m_ImageRegion );
    m_NumDirectionsImage->Allocate();
    m_NumDirectionsImage->FillBuffer(0);

    m_SNRImage = ItkFloatImgType::New();
    m_SNRImage->SetSpacing( m_Spacing );
    m_SNRImage->SetOrigin( m_Origin );
    m_SNRImage->SetDirection( m_DirectionMatrix );
    m_SNRImage->SetRegions( m_ImageRegion );
    m_SNRImage->Allocate();
    m_SNRImage->FillBuffer(0);

    vtkSmartPointer<vtkCellArray> m_VtkCellArray = vtkSmartPointer<vtkCellArray>::New();
    vtkSmartPointer<vtkPoints>    m_VtkPoints = vtkSmartPointer<vtkPoints>::New();

    m_BaselineImages = 0;
    for( unsigned int i=0; i<m_GradientList.size(); i++)
        if (m_GradientList[i].GetNorm()<=0.0001)
            m_BaselineImages++;

    typedef ImageRegionIterator<OutputImageType>      IteratorOutputType;
    IteratorOutputType it (outImage, m_ImageRegion);

    // isotropic tensor
    itk::DiffusionTensor3D<float> isoTensor;
    isoTensor.Fill(0);
    float e1 = m_GreyMatterAdc;
    float e2 = m_GreyMatterAdc;
    float e3 = m_GreyMatterAdc;
    isoTensor.SetElement(0,e1);
    isoTensor.SetElement(3,e2);
    isoTensor.SetElement(5,e3);
    m_MaxBaseline = GetTensorL2Norm(isoTensor);

    GenerateTensors();

    // simulate measurement
    m_MeanBaseline = 0;
    double noiseStdev = sqrt(m_NoiseVariance);
    while(!it.IsAtEnd())
    {
        pix = it.Get();
        typename OutputImageType::IndexType index = it.GetIndex();

        int numDirs = 0;
        for (int i=0; i<m_SignalRegions.size(); i++)
        {
            ItkUcharImgType::Pointer region = m_SignalRegions.at(i);

            if (region->GetPixel(index)!=0)
            {
                numDirs++;
                pix += SimulateMeasurement(m_TensorList[i], m_TensorWeight[i]);

                // set direction image pixel
                ItkDirectionImage::Pointer img = m_DirectionImageContainer->GetElement(i);
                itk::Vector< float, 3 > pixel = img->GetPixel(index);
                vnl_vector_fixed<double, 3> dir = m_TensorDirection.at(i);
                dir.normalize();
                dir *= m_TensorWeight.at(i);
                pixel.SetElement(0, dir[0]);
                pixel.SetElement(1, dir[1]);
                pixel.SetElement(2, dir[2]);
                img->SetPixel(index, pixel);

                vtkSmartPointer<vtkPolyLine> container = vtkSmartPointer<vtkPolyLine>::New();
                itk::ContinuousIndex<double, 3> center;
                center[0] = index[0];
                center[1] = index[1];
                center[2] = index[2];
                itk::Point<double> worldCenter;
                outImage->TransformContinuousIndexToPhysicalPoint( center, worldCenter );
                itk::Point<double> worldStart;
                worldStart[0] = worldCenter[0]-dir[0]/2 * minSpacing;
                worldStart[1] = worldCenter[1]-dir[1]/2 * minSpacing;
                worldStart[2] = worldCenter[2]-dir[2]/2 * minSpacing;
                vtkIdType id = m_VtkPoints->InsertNextPoint(worldStart.GetDataPointer());
                container->GetPointIds()->InsertNextId(id);
                itk::Point<double> worldEnd;
                worldEnd[0] = worldCenter[0]+dir[0]/2 * minSpacing;
                worldEnd[1] = worldCenter[1]+dir[1]/2 * minSpacing;
                worldEnd[2] = worldCenter[2]+dir[2]/2 * minSpacing;
                id = m_VtkPoints->InsertNextPoint(worldEnd.GetDataPointer());
                container->GetPointIds()->InsertNextId(id);
                m_VtkCellArray->InsertNextCell(container);
            }
        }

        if (numDirs>1)
        {
            for (int i=0; i<m_GradientList.size(); i++)
                pix[i] /= numDirs;
        }
        else if (numDirs==0)
        {
            if (m_SimulateBaseline)
                pix = SimulateMeasurement(isoTensor, 1.0);
            else
                pix.Fill(0.0);
        }

        m_MeanBaseline += pix[0];
        it.Set(pix);
        m_NumDirectionsImage->SetPixel(index, numDirs);
        if (m_NoiseVariance>0)
            m_SNRImage->SetPixel(index, pix[0]/(noiseStdev*m_SignalScale));
        ++it;
    }
    m_MeanBaseline /= m_ImageRegion.GetNumberOfPixels();
    if (m_NoiseVariance>0)
        MITK_INFO << "Mean SNR: " << m_MeanBaseline/(noiseStdev*m_SignalScale);
    else
        MITK_INFO << "No noise added";

    // add rician noise
    it.GoToBegin();
    while(!it.IsAtEnd())
    {
        pix = it.Get();
        AddNoise(pix);
        it.Set(pix);
        ++it;
    }

    // generate fiber bundle
    vtkSmartPointer<vtkPolyData> directionsPolyData = vtkSmartPointer<vtkPolyData>::New();
    directionsPolyData->SetPoints(m_VtkPoints);
    directionsPolyData->SetLines(m_VtkCellArray);
    m_OutputFiberBundle = mitk::FiberBundleX::New(directionsPolyData);
}
template <class inputType, unsigned int Dimension> medAbstractJob::medJobExitStatus medItkBiasCorrectionProcess::N4BiasCorrectionCore()
{
    medJobExitStatus eRes = medAbstractJob::MED_JOB_EXIT_SUCCESS;

    typedef itk::Image<inputType, Dimension > ImageType;
    typedef itk::Image <float, Dimension> OutputImageType;
    typedef itk::Image<unsigned char, Dimension> MaskImageType;
    typedef itk::N4BiasFieldCorrectionImageFilter<OutputImageType, MaskImageType, OutputImageType> BiasFilter;
    typedef itk::ConstantPadImageFilter<OutputImageType, OutputImageType> PadderType;
    typedef itk::ConstantPadImageFilter<MaskImageType, MaskImageType> MaskPadderType;
    typedef itk::ShrinkImageFilter<OutputImageType, OutputImageType> ShrinkerType;
    typedef itk::ShrinkImageFilter<MaskImageType, MaskImageType> MaskShrinkerType;
    typedef itk::BSplineControlPointImageFilter<typename BiasFilter::BiasFieldControlPointLatticeType, typename BiasFilter::ScalarImageType> BSplinerType;
    typedef itk::ExpImageFilter<OutputImageType, OutputImageType> ExpFilterType;
    typedef itk::DivideImageFilter<OutputImageType, OutputImageType, OutputImageType> DividerType;
    typedef itk::ExtractImageFilter<OutputImageType, OutputImageType> CropperType;

    unsigned int uiThreadNb = static_cast<unsigned int>(m_poUIThreadNb->value());
    unsigned int uiShrinkFactors = static_cast<unsigned int>(m_poUIShrinkFactors->value());
    unsigned int uiSplineOrder = static_cast<unsigned int>(m_poUISplineOrder->value());
    float fWienerFilterNoise = static_cast<float>(m_poFWienerFilterNoise->value());
    float fbfFWHM = static_cast<float>(m_poFbfFWHM->value());
    float fConvergenceThreshold = static_cast<float>(m_poFConvergenceThreshold->value());
    float fSplineDistance = static_cast<float>(m_poFSplineDistance->value());

    float fProgression = 0;

    QStringList oListValue = m_poSMaxIterations->value().split("x");

    std::vector<unsigned int> oMaxNumbersIterationsVector(oListValue.size());
    std::vector<float> oInitialMeshResolutionVect(Dimension);
    for (int i=0; i<oMaxNumbersIterationsVector.size(); ++i)
    {
       oMaxNumbersIterationsVector[i] = (unsigned int)oListValue[i].toInt();
    }
    oInitialMeshResolutionVect[0] = static_cast<float>(m_poFInitialMeshResolutionVect1->value());
    oInitialMeshResolutionVect[1] = static_cast<float>(m_poFInitialMeshResolutionVect2->value());
    oInitialMeshResolutionVect[2] = static_cast<float>(m_poFInitialMeshResolutionVect3->value());

    typename ImageType::Pointer image = dynamic_cast<ImageType *>((itk::Object*)(this->input()->data()));
    typedef itk::CastImageFilter <ImageType, OutputImageType> CastFilterType;
    typename CastFilterType::Pointer castFilter = CastFilterType::New();
    castFilter->SetInput(image);

    /********************************************************************************/
    /***************************** PREPARING STARTING *******************************/
    /********************************************************************************/

    /*** 0 ******************* Create filter and accessories ******************/
    ABORT_CHECKING(m_bAborting);
    typename BiasFilter::Pointer filter = BiasFilter::New();
    typename BiasFilter::ArrayType oNumberOfControlPointsArray;
    m_filter = filter;

    /*** 1 ******************* Read input image *******************************/
    ABORT_CHECKING(m_bAborting);
    fProgression = 1;
    updateProgression(fProgression);

    /*** 2 ******************* Creating Otsu mask *****************************/
    ABORT_CHECKING(m_bAborting);
    itk::TimeProbe timer;
    timer.Start();
    typename MaskImageType::Pointer maskImage = ITK_NULLPTR;
    typedef itk::OtsuThresholdImageFilter<OutputImageType, MaskImageType> ThresholderType;
    typename ThresholderType::Pointer otsu = ThresholderType::New();
    m_filter = otsu;
    otsu->SetInput(castFilter->GetOutput());
    otsu->SetNumberOfHistogramBins(200);
    otsu->SetInsideValue(0);
    otsu->SetOutsideValue(1);

    otsu->SetNumberOfThreads(uiThreadNb);
    otsu->Update();
    updateProgression(fProgression);
    maskImage = otsu->GetOutput();


    /*** 3A *************** Set Maximum number of Iterations for the filter ***/
    ABORT_CHECKING(m_bAborting);
    typename BiasFilter::VariableSizeArrayType itkTabMaximumIterations;
    itkTabMaximumIterations.SetSize(oMaxNumbersIterationsVector.size());
    for (int i = 0; i < oMaxNumbersIterationsVector.size(); ++i)
    {
        itkTabMaximumIterations[i] = oMaxNumbersIterationsVector[i];
    }
    filter->SetMaximumNumberOfIterations(itkTabMaximumIterations);

    /*** 3B *************** Set Fitting Levels for the filter *****************/
    typename BiasFilter::ArrayType oFittingLevelsTab;
    oFittingLevelsTab.Fill(oMaxNumbersIterationsVector.size());
    filter->SetNumberOfFittingLevels(oFittingLevelsTab);

    updateProgression(fProgression);

    /*** 4 ******************* Save image's index, size, origine **************/
    ABORT_CHECKING(m_bAborting);
    typename ImageType::IndexType oImageIndex = image->GetLargestPossibleRegion().GetIndex();
    typename ImageType::SizeType oImageSize = image->GetLargestPossibleRegion().GetSize();
    typename ImageType::PointType newOrigin = image->GetOrigin();

    typename OutputImageType::Pointer outImage = castFilter->GetOutput();

    if (fSplineDistance > 0)
    {
        /*** 5 ******************* Compute number of control points  **************/
        ABORT_CHECKING(m_bAborting);
        itk::SizeValueType lowerBound[3];
        itk::SizeValueType upperBound[3];

        for (unsigned int i = 0; i < 3; i++)
        {
            float domain = static_cast<float>(image->GetLargestPossibleRegion().GetSize()[i] - 1) * image->GetSpacing()[i];
            unsigned int numberOfSpans = static_cast<unsigned int>(std::ceil(domain / fSplineDistance));
            unsigned long extraPadding = static_cast<unsigned long>((numberOfSpans * fSplineDistance - domain) / image->GetSpacing()[i] + 0.5);
            lowerBound[i] = static_cast<unsigned long>(0.5 * extraPadding);
            upperBound[i] = extraPadding - lowerBound[i];
            newOrigin[i] -= (static_cast<float>(lowerBound[i]) * image->GetSpacing()[i]);
            oNumberOfControlPointsArray[i] = numberOfSpans + filter->GetSplineOrder();
        }
        updateProgression(fProgression);

        /*** 6 ******************* Padder  ****************************************/
        ABORT_CHECKING(m_bAborting);
        typename PadderType::Pointer imagePadder = PadderType::New();
        m_filter = imagePadder;
        imagePadder->SetInput(castFilter->GetOutput());
        imagePadder->SetPadLowerBound(lowerBound);
        imagePadder->SetPadUpperBound(upperBound);
        imagePadder->SetConstant(0);
        imagePadder->SetNumberOfThreads(uiThreadNb);
        imagePadder->Update();
        updateProgression(fProgression);

        outImage = imagePadder->GetOutput();

        /*** 7 ******************** Handle the mask image *************************/
        ABORT_CHECKING(m_bAborting);
        typename MaskPadderType::Pointer maskPadder = MaskPadderType::New();
        m_filter = maskPadder;
        maskPadder->SetInput(maskImage);
        maskPadder->SetPadLowerBound(lowerBound);
        maskPadder->SetPadUpperBound(upperBound);
        maskPadder->SetConstant(0);
        maskPadder->SetNumberOfThreads(uiThreadNb);
        maskPadder->Update();
        updateProgression(fProgression);

        maskImage = maskPadder->GetOutput();

        /*** 8 ******************** SetNumber Of Control Points *******************/
        ABORT_CHECKING(m_bAborting);
        filter->SetNumberOfControlPoints(oNumberOfControlPointsArray);
    }
    else if (oInitialMeshResolutionVect.size() == 3)
    {
        /*** 9 ******************** SetNumber Of Control Points alternative *******/
        ABORT_CHECKING(m_bAborting);
        for (unsigned i = 0; i < 3; i++)
        {
            oNumberOfControlPointsArray[i] = static_cast<unsigned int>(oInitialMeshResolutionVect[i]) + filter->GetSplineOrder();
        }
        filter->SetNumberOfControlPoints(oNumberOfControlPointsArray);

        updateProgression(fProgression, 3);
    }
    else
    {
        fProgression = 0;
        updateProgression(fProgression);
        std::cout << "No BSpline distance and Mesh Resolution is ignored because not 3 dimensions" << std::endl;
    }

    /*** 10 ******************* Shrinker image ********************************/
    ABORT_CHECKING(m_bAborting);
    typename ShrinkerType::Pointer imageShrinker = ShrinkerType::New();
    m_filter = imageShrinker;
    imageShrinker->SetInput(outImage);

    /*** 11 ******************* Shrinker mask *********************************/
    ABORT_CHECKING(m_bAborting);
    typename MaskShrinkerType::Pointer maskShrinker = MaskShrinkerType::New();
    m_filter = maskShrinker;
    maskShrinker->SetInput(maskImage);

    /*** 12 ******************* Shrink mask and image *************************/
    ABORT_CHECKING(m_bAborting);
    imageShrinker->SetShrinkFactors(uiShrinkFactors);
    maskShrinker->SetShrinkFactors(uiShrinkFactors);
    imageShrinker->SetNumberOfThreads(uiThreadNb);
    maskShrinker->SetNumberOfThreads(uiThreadNb);
    imageShrinker->Update();
    updateProgression(fProgression);
    maskShrinker->Update();
    updateProgression(fProgression);

    /*** 13 ******************* Filter setings ********************************/
    ABORT_CHECKING(m_bAborting);
    filter->SetSplineOrder(uiSplineOrder);
    filter->SetWienerFilterNoise(fWienerFilterNoise);
    filter->SetBiasFieldFullWidthAtHalfMaximum(fbfFWHM);
    filter->SetConvergenceThreshold(fConvergenceThreshold);
    filter->SetInput(imageShrinker->GetOutput());
    filter->SetMaskImage(maskShrinker->GetOutput());

    /*** 14 ******************* Apply filter **********************************/
    ABORT_CHECKING(m_bAborting);
    try
    {
        filter->SetNumberOfThreads(uiThreadNb);
        filter->Update();
        updateProgression(fProgression, 5);
    }
    catch (itk::ExceptionObject & err)
    {
        std::cerr << "ExceptionObject caught !" << std::endl;
        std::cerr << err << std::endl;
        eRes = medAbstractJob::MED_JOB_EXIT_FAILURE;
        return eRes;
    }


    /**
    * Reconstruct the bias field at full image resolution.  Divide
    * the original input image by the bias field to get the final
    * corrected image.
    */
    ABORT_CHECKING(m_bAborting);
    typename BSplinerType::Pointer bspliner = BSplinerType::New();
    m_filter = bspliner;
    bspliner->SetInput(filter->GetLogBiasFieldControlPointLattice());
    bspliner->SetSplineOrder(filter->GetSplineOrder());
    bspliner->SetSize(image->GetLargestPossibleRegion().GetSize());
    bspliner->SetOrigin(newOrigin);
    bspliner->SetDirection(image->GetDirection());
    bspliner->SetSpacing(image->GetSpacing());
    bspliner->SetNumberOfThreads(uiThreadNb);
    bspliner->Update();
    updateProgression(fProgression);


    /*********************** Logarithm phase ***************************/
    ABORT_CHECKING(m_bAborting);
    typename OutputImageType::Pointer logField = OutputImageType::New();
    logField->SetOrigin(image->GetOrigin());
    logField->SetSpacing(image->GetSpacing());
    logField->SetRegions(image->GetLargestPossibleRegion());
    logField->SetDirection(image->GetDirection());
    logField->Allocate();

    itk::ImageRegionIterator<typename BiasFilter::ScalarImageType> IB(bspliner->GetOutput(), bspliner->GetOutput()->GetLargestPossibleRegion());

    itk::ImageRegionIterator<OutputImageType> IF(logField, logField->GetLargestPossibleRegion());

    for (IB.GoToBegin(), IF.GoToBegin(); !IB.IsAtEnd(); ++IB, ++IF)
    {
        IF.Set(IB.Get()[0]);
    }


    /*********************** Exponential phase *************************/
    ABORT_CHECKING(m_bAborting);
    typename ExpFilterType::Pointer expFilter = ExpFilterType::New();
    m_filter = expFilter;
    expFilter->SetInput(logField);
    expFilter->SetNumberOfThreads(uiThreadNb);
    expFilter->Update();
    updateProgression(fProgression);

    /************************ Dividing phase ***************************/
    ABORT_CHECKING(m_bAborting);
    typename DividerType::Pointer divider = DividerType::New();
    m_filter = divider;
    divider->SetInput1(castFilter->GetOutput());
    divider->SetInput2(expFilter->GetOutput());
    divider->SetNumberOfThreads(uiThreadNb);
    divider->Update();
    updateProgression(fProgression);


    /******************** Prepare cropping phase ***********************/
    ABORT_CHECKING(m_bAborting);
    typename ImageType::RegionType inputRegion;
    inputRegion.SetIndex(oImageIndex);
    inputRegion.SetSize(oImageSize);

    /************************ Cropping phase ***************************/
    ABORT_CHECKING(m_bAborting);
    typename CropperType::Pointer cropper = CropperType::New();
    m_filter = cropper;
    cropper->SetInput(divider->GetOutput());
    cropper->SetExtractionRegion(inputRegion);
    cropper->SetDirectionCollapseToSubmatrix();
    cropper->SetNumberOfThreads(uiThreadNb);
    cropper->Update();
    updateProgression(fProgression);

    /********************** Write output image *************************/
    ABORT_CHECKING(m_bAborting);
    medAbstractImageData *out = qobject_cast<medAbstractImageData *>(medAbstractDataFactory::instance()->create("itkDataImageFloat3"));
    out->setData(cropper->GetOutput());
    this->setOutput(out);

    m_filter = 0;
    
    return eRes;
}
  void TractsToFiberEndingsImageFilter< OutputImageType >::GenerateData()
  {
    // generate upsampled image
    mitk::Geometry3D::Pointer geometry = m_FiberBundle->GetGeometry();
    typename OutputImageType::Pointer outImage = this->GetOutput();

    // calculate new image parameters
    mitk::Vector3D newSpacing;
    mitk::Point3D newOrigin;
    itk::Matrix<double, 3, 3> newDirection;
    ImageRegion<3> upsampledRegion;
    if (m_UseImageGeometry && !m_InputImage.IsNull())
    {
      newSpacing = m_InputImage->GetSpacing()/m_UpsamplingFactor;
      upsampledRegion = m_InputImage->GetLargestPossibleRegion();
      newOrigin = m_InputImage->GetOrigin();
      typename OutputImageType::RegionType::SizeType size = upsampledRegion.GetSize();
      size[0] *= m_UpsamplingFactor;
      size[1] *= m_UpsamplingFactor;
      size[2] *= m_UpsamplingFactor;
      upsampledRegion.SetSize(size);
      newDirection = m_InputImage->GetDirection();
    }
    else
    {
      newSpacing = geometry->GetSpacing()/m_UpsamplingFactor;
      newOrigin = geometry->GetOrigin();
      mitk::Geometry3D::BoundsArrayType bounds = geometry->GetBounds();
      newOrigin[0] += bounds.GetElement(0);
      newOrigin[1] += bounds.GetElement(2);
      newOrigin[2] += bounds.GetElement(4);

      for (int i=0; i<3; i++)
        for (int j=0; j<3; j++)
          newDirection[j][i] = geometry->GetMatrixColumn(i)[j];
      upsampledRegion.SetSize(0, geometry->GetExtent(0)*m_UpsamplingFactor);
      upsampledRegion.SetSize(1, geometry->GetExtent(1)*m_UpsamplingFactor);
      upsampledRegion.SetSize(2, geometry->GetExtent(2)*m_UpsamplingFactor);
    }
    typename OutputImageType::RegionType::SizeType upsampledSize = upsampledRegion.GetSize();

    // apply new image parameters
    outImage->SetSpacing( newSpacing );
    outImage->SetOrigin( newOrigin );
    outImage->SetDirection( newDirection );
    outImage->SetRegions( upsampledRegion );
    outImage->Allocate();

    int w = upsampledSize[0];
    int h = upsampledSize[1];
    int d = upsampledSize[2];

    // set/initialize output
    OutPixelType* outImageBufferPointer = (OutPixelType*)outImage->GetBufferPointer();
    for (int i=0; i<w*h*d; i++)
      outImageBufferPointer[i] = 0;

    // resample fiber bundle
    float minSpacing = 1;
    if(newSpacing[0]<newSpacing[1] && newSpacing[0]<newSpacing[2])
        minSpacing = newSpacing[0];
    else if (newSpacing[1] < newSpacing[2])
        minSpacing = newSpacing[1];
    else
        minSpacing = newSpacing[2];

    vtkSmartPointer<vtkPolyData> fiberPolyData = m_FiberBundle->GetFiberPolyData();
    vtkSmartPointer<vtkCellArray> vLines = fiberPolyData->GetLines();
    vLines->InitTraversal();

    int numFibers = m_FiberBundle->GetNumFibers();
    boost::progress_display disp(numFibers);
    for( int i=0; i<numFibers; i++ )
    {
        ++disp;
      vtkIdType   numPoints(0);
      vtkIdType*  points(NULL);
      vLines->GetNextCell ( numPoints, points );

      // fill output image
      if (numPoints>0)
      {
        itk::Point<float, 3> vertex = GetItkPoint(fiberPolyData->GetPoint(points[0]));
        itk::Index<3> index;
        outImage->TransformPhysicalPointToIndex(vertex, index);
        if (m_BinaryOutput)
            outImage->SetPixel(index, 1);
        else
            outImage->SetPixel(index, outImage->GetPixel(index)+1);
      }

      if (numPoints>2)
      {
        itk::Point<float, 3> vertex = GetItkPoint(fiberPolyData->GetPoint(points[numPoints-1]));
        itk::Index<3> index;
        outImage->TransformPhysicalPointToIndex(vertex, index);
        if (m_BinaryOutput)
            outImage->SetPixel(index, 1);
        else
            outImage->SetPixel(index, outImage->GetPixel(index)+1);
      }
    }

    if (m_InvertImage)
      for (int i=0; i<w*h*d; i++)
        outImageBufferPointer[i] = 1-outImageBufferPointer[i];
  }