Ejemplo n.º 1
0
void jacobian_transpose_nllsq_impl(Function f, GradFunction fill_jac, InputVector& x, const OutputVector& y, unsigned int max_iter,
			           LimitFunction impose_limits, 
				   typename vect_traits<InputVector>::value_type abs_tol = typename vect_traits<InputVector>::value_type(1e-6), 
				   typename vect_traits<InputVector>::value_type abs_grad_tol = typename vect_traits<InputVector>::value_type(1e-6)) {
  typedef typename vect_traits<InputVector>::value_type ValueType;
  using std::sqrt; using std::fabs;
  
  OutputVector y_approx = y;
  OutputVector r = y;
  mat<ValueType, mat_structure::rectangular> J(y.size(), x.size());
  InputVector e = x; e -= x;
  OutputVector Je = y;
  unsigned int iter = 0;
  do {
    if(++iter > max_iter)
      throw maximum_iteration(max_iter);
    x += e;
    y_approx = f(x);
    r = y; r -= y_approx;
    fill_jac(J,x,y_approx);
    e = r * J;
    Je = J * e;
    ValueType alpha = Je * Je;
    if(alpha < abs_grad_tol)
      return;
    alpha = (r * Je) / alpha;
    e *= alpha;
    impose_limits(x,e);
  } while(norm_2(e) > abs_tol);
};
Ejemplo n.º 2
0
vtkSmartPointer<vtkPolyData> MatricesToVtk::parsePoints(
    const InputVector & inputData, size_t firstColumn,
    int vtk_dataType /*= VTK_FLOAT*/)
{
    assert(inputData.size() > firstColumn);

    auto points = vtkSmartPointer<vtkPoints>::New();
    points->SetDataType(vtk_dataType);

    size_t nbRows = inputData.at(firstColumn).size();
    std::vector<vtkIdType> pointIds(nbRows);

    // copy triangle vertexes to VTK point list
    for (size_t row = 0; row < nbRows; ++row)
    {
        pointIds.at(row) = points->InsertNextPoint(inputData[firstColumn][row], inputData[firstColumn + 1][row], inputData[firstColumn + 2][row]);
    }

    // Create the topology of the point (a vertex)
    auto vertices = vtkSmartPointer<vtkCellArray>::New();
    vertices->InsertNextCell(nbRows, pointIds.data());

    auto resultPolyData = vtkSmartPointer<vtkPolyData>::New();
    resultPolyData->SetPoints(points);
    resultPolyData->SetVerts(vertices);

    return resultPolyData;
}
Ejemplo n.º 3
0
std::unique_ptr<DataObject> MatricesToVtk::readRawFile(const QString & fileName)
{
    InputVector inputVectors;
    auto && result = TextFileReader(fileName).read(inputVectors);

    if (!result.testFlag(TextFileReader::successful))
    {
        return nullptr;
    }

    int numColumns = (int)inputVectors.size();
    if (numColumns == 0)
    {
        return nullptr;
    }
    int numRows = (int)inputVectors.at(0).size();
    if (numRows == 0)
    {
        return nullptr;
    }

    bool switchRowsColumns = numColumns > numRows;
    if (switchRowsColumns)
    {
        std::swap(numColumns, numRows);
    }

    auto dataArray = vtkSmartPointer<vtkFloatArray>::New();
    dataArray->SetNumberOfComponents(numColumns);
    dataArray->SetNumberOfTuples(numRows);

    if (!switchRowsColumns)
    {
        for (vtkIdType component = 0; component < numColumns; ++component)
        {
            for (vtkIdType cellId = 0; cellId < numRows; ++cellId)
            {
                dataArray->SetValue(cellId * numColumns + component,
                    static_cast<float>(inputVectors.at(component).at(cellId)));
            }
        }
    }
    else
    {
        for (vtkIdType component = 0; component < numColumns; ++component)
        {
            for (vtkIdType cellId = 0; cellId < numRows; ++cellId)
            {
                dataArray->SetValue(cellId * numColumns + component,
                    static_cast<float>(inputVectors.at(cellId).at(component)));
            }
        }
    }

    QFileInfo fInfo(fileName);

    return std::make_unique<RawVectorData>(fInfo.baseName(), *dataArray);
}
void BarrelTermPositionIteratorTestCase::testSkipToPositionRandomly()
{
    FX_TRACE("Begin testSkipToPositionRandomly");

    const static size_t TEST_TIMES = 10000;
    for (size_t dataSize = 1; dataSize < TEST_TIMES; dataSize += 123)
    {
        m_nTestDataSize = dataSize;

        FX_TRACE("Test data size: %u", (uint32_t)m_nTestDataSize);
        setUpTestData();
        
        InputVector input;
        generateInputData(input);

        BarrelTermPositionIterator iterator;
        iterator.init(m_pDecoder, NULL);

        docid_t docId = 0;
        for (size_t i = 0; i < input.size(); ++i)
        {
            size_t testDoc = input[i].first;
            docId = m_answer[testDoc].first - 1;
            if (docId < 0)
            {
                docId = 0;
            }
            docId = iterator.skipTo(docId);
            //CPPUNIT_ASSERT_EQUAL(m_answer[testDoc].first, docId);
            assert(m_answer[testDoc].first ==  docId);
            CPPUNIT_ASSERT_EQUAL((tf_t)m_answer[testDoc].second.size(), iterator.freq());

            ctf_t ctf = 0;
            getCTF(m_answer, 0, testDoc, ctf);
            FX_TRACE("ctf: %lld", ctf);
            
            for (size_t k = 0; k < input[i].second.size(); ++k)
            {
                size_t testPos = input[i].second[k];
                loc_t t = m_answer[testDoc].second[testPos] - (k % 2);
                loc_t pos = iterator.skipToPosition(t);
                CPPUNIT_ASSERT_EQUAL(m_answer[testDoc].second[testPos], pos);
//                assert(m_answer[testDoc].second[p] == pos);
            }
        }

        docId = m_answer[m_nTestDataSize - 1].first + 3;
        docId = iterator.skipTo(docId);
        CPPUNIT_ASSERT_EQUAL(INVALID_DOCID, docId);

        tearDownTestData();
    }
}
Ejemplo n.º 5
0
vtkSmartPointer<vtkDataArray> MatricesToVtk::parseFloatVector(
    const InputVector & inputData, const QString & arrayName, size_t firstColumn, size_t lastColumn,
    int vtk_dataType /*= VTK_FLOAT*/)
{
    assert(firstColumn <= lastColumn);
    assert(inputData.size() > lastColumn);
    int numComponents = static_cast<int>(lastColumn - firstColumn + 1);
    vtkIdType numTuples = inputData.at(lastColumn).size();

    DEBUG_ONLY(for (auto && ax : inputData)
    {
        assert(ax.size() == size_t(numTuples));
    })
Ejemplo n.º 6
0
void OperatorItem::initialize()
{
    typedef std::vector<const stromx::runtime::Input*> InputVector;
    typedef std::vector<const stromx::runtime::Output*> OutputVector;
    
    InputVector inputs = m_model->op()->info().inputs();
    qreal firstInputYPos = computeFirstYPos(inputs.size());
    qreal yOffset = ConnectorItem::SIZE + CONNECTOR_OFFSET;
    qreal inputXPos = -SIZE/2 - ConnectorItem::SIZE/2;
    
    unsigned int i = 0;
    for(InputVector::iterator iter = inputs.begin();
        iter != inputs.end();
        ++iter, ++i)
    {
        ConnectorItem* inputItem = new ConnectorItem(this->m_model, (*iter)->id(), ConnectorItem::INPUT, this);
        inputItem->setPos(inputXPos, firstInputYPos + i*yOffset);
        
        m_inputs[(*iter)->id()] = inputItem;
    }
    
    OutputVector outputs = m_model->op()->info().outputs();
    qreal firstOutputYPos = computeFirstYPos(outputs.size());
    qreal outputXPos = SIZE/2 + ConnectorItem::SIZE/2;
    
    i = 0;
    for(OutputVector::iterator iter = outputs.begin();
        iter != outputs.end();
        ++iter, ++i)
    {
        ConnectorItem* outputItem = new ConnectorItem(this->m_model, (*iter)->id(), ConnectorItem::OUTPUT, this);
        outputItem->setPos(outputXPos, firstOutputYPos + i*yOffset);
        
        m_outputs[(*iter)->id()] = outputItem;
    }
}
Ejemplo n.º 7
0
int main(int argc, char **argv) {
	ParametersVectorType initialParameters;
	std::string outPrefix, bmImageName, initFile, initMeansFile, mrfModelFile;
	std::string s_minimization = DEFAULT_MRF_ALG;
	float lambda1, lambda2;
	ProbabilityPixelType atlas_th, mask_th;
	unsigned int nClasses, emIterations, mrfIterations;
	std::vector<std::string> channels, priors, geometricTerm;
	std::string s_mode = "km+em";
	std::string outExt = "nii.gz";

	std::vector<double> dataOffsets;
	std::vector<double> dataFactors;

	bool useFile = false;
	bool skipMRF = false;
	bool usePVE = false;
	bool doOutputStats,doSegmentsOutput, doOutputMRFMaps, doOutputMRFIterations,
	     doOutputSteps, useExplicitPVE, skipNormalization, skipBias, doBiasOutput, doCorrectedOutput,
	     channelsAreMasked = false;
	unsigned int userBucketSize = 0;

	boost::unordered_map<std::string, INIT_MODES> modes_map;

	modes_map["none"] = NONE;
	modes_map["km"] = KMEANS;
	modes_map["em"] = EM;
	modes_map["km+em"] = KMEANS_EM;

	boost::unordered_map<std::string, typename MRFFilter::GCOptimizerType::MINIMIZATION_MODES > minimiz_map;
	minimiz_map["expansion"] = MRFFilter::GCOptimizerType::EXPANSION;
	minimiz_map["swap"] = MRFFilter::GCOptimizerType::SWAP;


	bpo::options_description general("General options");
	general.add_options()
		("help", "show help message")
		("out,o",bpo::value < std::string > (&outPrefix), "prefix for output files")
		("out-ext", bpo::value< std::string >( &outExt ), "output format, if supported by ITK")
		("brain-mask,x",bpo::value < std::string > (&bmImageName),"brain extracted mask")
		("channels-masked,M", bpo::bool_switch(&channelsAreMasked), "set this flag if channels are already masked")
		("channels,C", bpo::value< std::vector< std::string > >(&channels)->multitoken()->required(),"list of channels for multivariate segmentation")
		("class,n",	bpo::value<unsigned int>(&nClasses)->default_value(DEFAULT_NCLASSES), "number of tissue-type classes")
		("init,I", bpo::value< std::string >(&s_mode), "operation mode for initialization [none,km,em,km+em]")
		("init-file,f",bpo::value < std::string > (&initFile),"use file for initialization of tissue-type means & variances")
		("init-means",bpo::value < std::string > (&initMeansFile),"use file for initialization of tissue-type means")
		("segments,g", bpo::bool_switch(&doSegmentsOutput),"Output a posteriori probability for each class")
		//("pv", bpo::bool_switch(&useExplicitPVE), "Use explicit PVE Gaussian Model between tissues")
		("output-stats", bpo::bool_switch(&doOutputStats),"output classes statistics to CSV file")
		("output-steps", bpo::bool_switch(&doOutputSteps), "Output intermediate segmentation steps")
		("normalize-off,N", bpo::bool_switch(&skipNormalization), "Do not normalize input's intensity range")
		("brain-mask-th,t",bpo::value < ProbabilityPixelType > (&mask_th)->default_value(0.0), "mask probability threshold")
		("version,v", "show tool version");

	bpo::options_description kmeans_desc("Kmeans options");
	kmeans_desc.add_options()
		("bucket-size", bpo::value< unsigned int >(&userBucketSize), "bucket size for K-means operation");

	bpo::options_description em_desc("Expectation-Maximization options");
	em_desc.add_options()
		("priors,P", bpo::value< std::vector< std::string > >(&priors)->multitoken(),"add prior to list of atlases for initializing segmentation")
		("atlas-threshold",bpo::value < ProbabilityPixelType > (&atlas_th)->default_value(DEFAULT_ATLAS_TH), "threshold for atlases")
		("em-iterations", bpo::value<unsigned int>(&emIterations)->default_value(DEFAULT_MAX_ITER), "maximum number of iterations");

	bpo::options_description bias_desc("Bias estimator options");
	bias_desc.add_options()
		("bias-skip", bpo::bool_switch(&skipBias), "Do not estimate Bias field")
		("bias-output", bpo::bool_switch(&doBiasOutput), "Output estimated Bias field")
		("bias-corrected-output", bpo::bool_switch(&doCorrectedOutput), "Output corrected input images with estimated Bias field");

	bpo::options_description mrf_desc( "MRF Segmentation options" );
	mrf_desc.add_options()
		("mrf-lambda,l", bpo::value<float>(&lambda1)->default_value(DEFAULT_LAMBDA),"Regularization weighting. The higher this value is, the higher smoothing. You may use a value around 0.3-1.0 for typical brain images")
		("mrf-minimization,m", bpo::value < std::string > (&s_minimization)->default_value(DEFAULT_MRF_ALG), "Minimization algorithm")
		("mrf-energy-model,e",bpo::value < std::string > (&mrfModelFile), "Energy Model matrix, basic Pott's Model used if missing" )
		("mrf-iterations", bpo::value<unsigned int>(&mrfIterations)->default_value(DEFAULT_MRF_ITERATIONS),"MRF re-estimation iterations")
		("mrf-skip,S", bpo::bool_switch(&skipMRF),"Skip MRF step")
		("mrf-pve", bpo::bool_switch(&usePVE), "Compute PVE classes")
		("mrf-external-lambda", bpo::value<float>(&lambda2)->default_value(1.0),"External field energy weighting. The higher this value is, the higher impact from the external field")
		("mrf-external-field,E", bpo::value< std::vector< std::string > >(&geometricTerm)->multitoken(),"External field maps that manipulate the dataterm in MRF energy function")
		("mrf-output-maps", bpo::bool_switch(&doOutputMRFMaps), "Output GC probabilities and energy maps")
		("mrf-output-it", bpo::bool_switch(&doOutputMRFIterations), "Output intermediate segmentation steps");


	bpo::options_description all("Usage");
	all.add(general).add(kmeans_desc).add(bias_desc).add(em_desc).add(mrf_desc);

	bpo::variables_map vmap;
	bpo::store(bpo::command_line_parser(argc, argv).options(all).run(), vmap);


	if ( vmap.count("version")) {
			std::cout << "MBIS Segmentation tool. Version " << MBIS_VERSION_MAJOR << "." << MBIS_VERSION_MINOR << "-" << MBIS_RELEASE << "." << std::endl;
			std::cout << "--------------------------------------------------------"<< std::endl;
			std::cout << "Copyright (c) 2012, [email protected] (Oscar Esteban)"<< std::endl; 
			std::cout << "with Signal Processing Lab 5, EPFL (LTS5-EPFL)"<< std::endl;
			std::cout << "and Biomedical Image Technology, UPM (BIT-UPM)"<< std::endl;
			std::cout << "All rights reserved."<< std::endl;

			return 0;
	}

	if ( vmap.empty() || vmap.count("help")) {
		std::cout << all << std::endl;
		return 0;
	}

	try {
		bpo::notify(vmap);
	} catch (boost::exception_detail::clone_impl<
			boost::exception_detail::error_info_injector<
					boost::program_options::required_option> > &err) {
		std::cout << "Error parsing options:" << err.what()
				<< std::endl;
		std::cout << std::endl << all << std::endl;
		return EXIT_FAILURE;
	}

	std::cout << std::endl << "Set-up step --------------------------------------------" << std::endl;

	ProbabilityImagePointer bm;
	InputVector input;
	PriorsVector atlas;
	ClassifiedImageType::Pointer solution;
	bool useFileMask = vmap.count("brain-mask") && bfs::exists(bmImageName);
	bool useImplicitMasks = channelsAreMasked && !useFileMask;

	if( useFileMask ) {
		ProbabilityImageReader::Pointer r = ProbabilityImageReader::New();
		r->SetFileName(bmImageName);
		r->Update();

		try {
			bm = r->GetOutput();
		} catch (...) {
			std::cout << "Error reading brain mask" << std::endl;
			return EXIT_FAILURE;
		}

		StatisticsImageFilterType::Pointer calc = StatisticsImageFilterType::New();
		calc->SetInput(bm);
		calc->Update();
		ProbabilityPixelType max = calc->GetMaximum();

		if (max > 1.0 ){
			ProbabilityPixelType min = calc->GetMinimum();

			IntensityWindowingImageFilterType::Pointer intensityFilter = IntensityWindowingImageFilterType::New();
			intensityFilter->SetInput( bm );
			intensityFilter->SetWindowMaximum( max );
			intensityFilter->SetWindowMinimum( min );
			intensityFilter->SetOutputMaximum( 1.0 );
			intensityFilter->SetOutputMinimum( 0.0 );
			intensityFilter->Update();
			bm = intensityFilter->GetOutput();
		}

		std::cout << "\t* Mask: read from file " << bmImageName << ".";

		if ( mask_th != 0.0 ) {
			ThresholdImageFilterType::Pointer th = ThresholdImageFilterType::New();
			th->SetInput( bm );
			th->ThresholdBelow( mask_th );
			th->Update();
			bm = th->GetOutput();
			std::cout << " Mask Threshold = " << mask_th << ".";
		}

		std::cout << std::endl;

	} else {
		if ( useImplicitMasks ) {
			std::cout << "\t* Mask: channels are masked." << std::endl;
		} else {
			std::cout << "\t* Mask: not requested." << std::endl;
		}
	}

	std::cout << "\t* Inputs normalization is " << ((!skipNormalization)?"ON":"OFF") << std::endl;

	for (vector<string>::iterator it = channels.begin(); it != channels.end(); it++) {
		ChannelReader::Pointer r = ChannelReader::New();
		r->SetFileName( *it );
		ChannelPointer p = r->GetOutput();
		r->Update();

		ChannelPixelType max = itk::NumericTraits< ChannelPixelType >::max();
		ChannelPixelType min = 0.0;
		ChannelPixelType absMin = 0.0;

		if ( bm.IsNotNull() ) {
			ProbabilityPixelType* mBuff = bm->GetBufferPointer();
			ChannelPixelType* cBuff = r->GetOutput()->GetBufferPointer();
			size_t nPix = bm->GetLargestPossibleRegion().GetNumberOfPixels();
			std::vector< ChannelPixelType > sample;

			for( size_t i = 0; i<nPix; i++ ) {
				if ( *(mBuff+i) > 0 ) {
					sample.push_back( *(cBuff+i) );
				}
			}
			std::sort( sample.begin(), sample.end() );
			max = sample[ (size_t) ((sample.size()-1)*0.98) ];
			min = sample[ (size_t) ((sample.size()-1)*0.02) ];
			absMin = sample[0];
		} else {
			StatisticsChannelFilterType::Pointer calc = StatisticsChannelFilterType::New();
			calc->SetInput(p);
			calc->Update();
			max = calc->GetMaximum();
			min = calc->GetMinimum();
			absMin = min;
		}

		if( !skipNormalization ) {
			double factor = NORM_MAX_INTENSITY / (max - min);
			double constant = - factor * absMin;

			if ( factor!= 1 ) {
				typename MultiplyFilter::Pointer multiplier = MultiplyFilter::New();
				multiplier->SetInput( p );
				multiplier->SetConstant( factor );
				multiplier->Update();
				p = multiplier->GetOutput();
			}

			if ( constant!= 0 ) {
				typename AddConstantFilter::Pointer adder = AddConstantFilter::New();
				adder->SetInput( p );
				adder->SetConstant( - constant );
				adder->Update();
				p = adder->GetOutput();
			}

			dataOffsets.push_back( constant );
			dataFactors.push_back( factor );

			if ( bm.IsNotNull() ) {
				ChannelImageType::DirectionType dir = bm->GetDirection();
				p->SetDirection( dir );
				p->SetOrigin( bm->GetOrigin() );

				MaskChannelFilterType::Pointer m = MaskChannelFilterType::New();
				m->SetInput( p );
				m->SetMaskImage( bm );
				m->Update();
				p = m->GetOutput();
			}


			if( doOutputSteps ) {
				itk::ImageFileWriter< ChannelImageType >::Pointer wc = itk::ImageFileWriter< ChannelImageType >::New();
				wc->SetInput( p );
				std::stringstream ss;
				ss << outPrefix << "_normin_" << input.size() << "." << outExt;
				wc->SetFileName( ss.str() );
				wc->Update();
			}
		}

		InputComponentConstPointer im;

		typename ChannelToComponentCaster::Pointer c = ChannelToComponentCaster::New();
		c->SetInput(p);
		c->Update();
		im = c->GetOutput();

		input.push_back( im );

		std::cout << "\t* Channel [" << std::setw(2) << input.size() << "] read: " << (*it) << std::endl;
	}

	if ( useImplicitMasks ) {
		bm = ProbabilityImageType::New();
		bm->SetRegions( input[0]->GetLargestPossibleRegion() );
		bm->CopyInformation( input[0] );
		bm->Allocate();
		bm->FillBuffer( 1.0 );
		bm->Update();

		ProbabilityPixelType* buffer = bm->GetBufferPointer();
		size_t numberOfPixels = bm->GetLargestPossibleRegion().GetNumberOfPixels();

		for ( size_t i = 0; i < input.size(); i++) {
			const PixelType* buffer2 = input[i]->GetBufferPointer();

			for( size_t offset = 0; offset < numberOfPixels; offset++ ) {
				if( *( buffer + offset ) > 0.0 ) {
					*( buffer + offset ) = PixelType ( *( buffer2 + offset ) > 1e-5);
				}
			}
		}


		if( doOutputSteps ) {
			itk::ImageFileWriter< ProbabilityImageType >::Pointer wc = itk::ImageFileWriter< ProbabilityImageType >::New();
			wc->SetInput( bm );
			std::stringstream ss;
			ss << outPrefix << "_mask." << outExt;
			wc->SetFileName( ss.str() );
			wc->Update();
		}
	}


	unsigned int init_mode = modes_map[s_mode];
	unsigned int nComponents = input.size();
	unsigned int nElements = nComponents * (1+nComponents);

	if( priors.size() > 0 ) {
		if (init_mode== KMEANS ) init_mode = MANUAL;
		if (init_mode == KMEANS_EM ) init_mode = EM;

		for (vector<string>::iterator it = priors.begin(); it != priors.end(); it++) {
			ProbabilityImageReader::Pointer r = ProbabilityImageReader::New();
			r->SetFileName( *it );
			ProbabilityImageType::ConstPointer p = r->GetOutput();
			r->Update();

			StatisticsImageFilterType::Pointer calc = StatisticsImageFilterType::New();
			calc->SetInput(p);
			calc->Update();
			ProbabilityPixelType max = calc->GetMaximum();

			if (max > 1.0 ){
				ProbabilityPixelType min = calc->GetMinimum();

				IntensityWindowingImageFilterType::Pointer intensityFilter = IntensityWindowingImageFilterType::New();
				intensityFilter->SetInput( p );
				intensityFilter->SetWindowMaximum( max );
				intensityFilter->SetWindowMinimum( min );
				intensityFilter->SetOutputMaximum( 1.0 );
				intensityFilter->SetOutputMinimum( 0.0 );
				intensityFilter->Update();
				p = intensityFilter->GetOutput();
			}
			atlas.push_back(p);

			std::cout << "\t* Prior [" << std::setw(2) << atlas.size() << "] read: " << (*it) << std::endl;

			ParameterEstimator::Pointer est = ParameterEstimator::New();
			est->SetInputVector( input );
			est->SetPrior( p );
			if ( bm.IsNotNull() ) {
				est->SetMaskImage( bm );
			}

			est->Update();
			initialParameters.push_back( est->GetOutputParameters() );
		}
	}

	if ( bfs::exists(initFile)) {
		if (init_mode== KMEANS ) init_mode = MANUAL;
		if (init_mode == KMEANS_EM ) init_mode = EM;

		std::cout << "\t* Parsing tissue parameters file: " << initFile << std::endl;
		initialParameters = ReadParametersFile(initFile);

		for ( ParametersVectorType::iterator it = initialParameters.begin(); it!=initialParameters.end(); it++) {
			size_t n = (*it).size();
			if ( n != nElements ) {
				std::cerr << "Parameters file is incorrect or badly interpreted" << std::endl;
			}

			if ( !skipNormalization ) {
				for( size_t i = 0; i<nComponents; i++ ) {
					(*it)[i] *= dataFactors[i];
					(*it)[i] += dataOffsets[i];
				}

				for( size_t i = nComponents; i<n; i++ ) {
					(*it)[i] = dataFactors[i%nComponents] * (*it)[i];
				}
			}
		}

		useFile = true;

		std::cout << "\t* Manual Parameters: " << std::endl;
		for (size_t i = 0; i<initialParameters.size(); i++)
			std::cout << "\t\t" << initialParameters[i] << std::endl;
	}

	if ( bfs::exists(initMeansFile)) {
		std::cout << "\t* Parsing tissue means file: " << initMeansFile << std::endl;

		initialParameters = ReadParametersFile( initMeansFile );

		for ( ParametersVectorType::iterator it = initialParameters.begin(); it!=initialParameters.end(); it++) {
			size_t n = (*it).size();

			if ( n != nComponents ) {
				std::cerr << "Means file is incorrect or badly interpreted" << std::endl;
			}


			if ( !skipNormalization ) {
				for( size_t i = 0; i<n; i++ ) {
					(*it)[i] *= dataFactors[i];
					(*it)[i] += dataOffsets[i];
				}
			}
		}

		useFile = true;

		std::cout << "\t* Manual Parameters: " << std::endl;
		for (size_t i = 0; i<initialParameters.size(); i++)
			std::cout << "\t\t" << initialParameters[i] << std::endl;
	}

	EMFilter::Pointer em_filter;
	KMeansFilter::Pointer kmeans;


	if (init_mode == KMEANS || init_mode == KMEANS_EM ) {
		std::cout << std::endl << "Kmeans step --------------------------------------------" << std::endl;
		kmeans = KMeansFilter::New();
		kmeans->SetInputVector( input );
		if ( bm.IsNotNull() ) kmeans->SetMaskImage( bm );
		kmeans->SetNumberOfClasses(nClasses);
		if(vmap.count("bucket-size")) {
			kmeans->SetUserBucketSize(userBucketSize);
		}

		if( useFile ) {
			kmeans->SetInitialParameters( initialParameters );
		}

		kmeans->Compute();
		initialParameters = kmeans->GetOutputParameters();

		if (doOutputStats) {
			stringstream s;
			s << outPrefix << "_stats_kmeans.csv";
			std::ofstream outputCSVfile ( s.str().c_str() );
			for ( unsigned int i = 0; i < initialParameters.size(); i++)
				outputCSVfile << initialParameters[i] << "\n";
			outputCSVfile.close();
		}
		kmeans->Update();
	}

	// Check for sanity initial parameters
	if( initialParameters.size()!=nClasses ) {
		std::cerr << "Error: initial parameters size (" << initialParameters.size() << ") doesn't match number of classes (" << (int) nClasses << std::endl;
	} else if ( initialParameters[0].size() != nElements ) {
		typename MLClassifierFilter::Pointer ml_classifier = MLClassifierFilter::New();
		ml_classifier->SetInputVector( input );
		if ( bm.IsNotNull() ) ml_classifier->SetMaskImage(bm);
		ml_classifier->SetParameters( initialParameters );
		ml_classifier->Update();
		solution = ml_classifier->GetOutput();

		if ( doOutputSteps ) {
			ClassifiedImageWriter::Pointer w = ClassifiedImageWriter::New();
			w->SetInput( solution );
			w->SetFileName(outPrefix + "_means." + outExt );
			w->Update();
		}

		ProbabilityImagePointer unoImage = ProbabilityImageType::New();
		unoImage->SetRegions( input[0]->GetLargestPossibleRegion() );
		unoImage->CopyInformation( input[0] );
		unoImage->Allocate();
		unoImage->FillBuffer( 1.0 );
		unoImage->Update();

		// Initialize Covariance matrix --------------------------------------
		ParameterEstimator::Pointer est = ParameterEstimator::New();
		est->SetInputVector( input );

		typedef itk::LabelImageToLabelMapFilter< ClassifiedImageType > ClassifiedToLabelMapFilter;
		typedef typename ClassifiedToLabelMapFilter::OutputImageType LabelMap;
		typedef itk::LabelMapMaskImageFilter< LabelMap, ProbabilityImageType > LabelMapMaskFilter;

		unsigned int maskOffset = (unsigned int) bm.IsNotNull();

	    for ( typename ClassifiedImageType::PixelType i = 0 ; i < nClasses ; i++ ) {
			typename ClassifiedToLabelMapFilter::Pointer lfilter = ClassifiedToLabelMapFilter::New();
			lfilter->SetInput( solution );
			lfilter->Update();

			typename LabelMapMaskFilter::Pointer lmask = LabelMapMaskFilter::New();
			lmask->SetInput1( lfilter->GetOutput() );
			lmask->SetInput2( unoImage );
			lmask->SetLabel( i + maskOffset );
			lmask->Update();

			est->SetPrior( lmask->GetOutput());
			est->Update();
			initialParameters[i] = est->GetOutputParameters();
	    }
		// End initialize covariance matrix -------------------------------------------
	}

	if (init_mode == EM  || init_mode == KMEANS_EM ) {
		std::cout << std::endl << "E-M Step -----------------------------------------------" << std::endl;
		em_filter = EMFilter::New();
		em_filter->SetMaskImage( bm );
		em_filter->SetNumberOfClasses( nClasses );
		em_filter->SetMaximumIteration( emIterations );
		em_filter->SetMaxBiasEstimationIterations( 5 );
		em_filter->SetInputVector( input );
		em_filter->SetInitialParameters( initialParameters );
		em_filter->SetUseExplicitPVModel( useExplicitPVE );
		em_filter->SetUseBiasCorrection( !skipBias );

		if ( atlas.size() != 0 ) {
			em_filter->SetPriors( atlas );
			em_filter->SetUsePriorProbabilityImages( true );
		}

		em_filter->Update();

		// TODO change to GetModelParameters()
		initialParameters = em_filter->GetInitialParameters();
		solution = em_filter->GetOutput();

		if ( doOutputSteps || skipMRF ) {
			ClassifiedImageWriter::Pointer w = ClassifiedImageWriter::New();
			w->SetInput( solution );
			w->SetFileName(outPrefix + "_em." + outExt );
			w->Update();
		}

		//
		// Output file with initial stats if switch is true
		//
		if (doOutputStats) {
			stringstream s;
			s << outPrefix << "_stats_em.csv";
			std::ofstream outputCSVfile ( s.str().c_str() );
			for ( unsigned int i = 0; i < initialParameters.size(); i++)
				outputCSVfile << initialParameters[i] << "\n";
			outputCSVfile.close();
		}

		if( em_filter->GetUseBiasCorrection() ) {
			typedef typename EMFilter::EstimatorType::InputVectorImageType  VectorImage;
			typedef typename itk::ImageFileWriter< ChannelImageType > ChannelWriter;
			typedef itk::VectorIndexSelectionCastImageFilter< VectorImage, ChannelImageType> SelectorType;
			typename VectorImage::ConstPointer vec = em_filter->GetEstimator()->GetCorrectedInput();
			for( int channel = 0; channel< input.size(); channel++ ) {
				typename SelectorType::Pointer channelSelector = SelectorType::New();
				channelSelector->SetInput( vec );
				channelSelector->SetIndex( channel );
				channelSelector->Update();
				channelSelector->GetOutput()->SetRegions( vec->GetRequestedRegion() );
				input[channel] = channelSelector->GetOutput();

				if ( doCorrectedOutput ) {
					char name[50];
					sprintf(name, "_corrected_ch%02d.%s" , channel, outExt.c_str() );
					typename ChannelWriter::Pointer chW = ChannelWriter::New();
					chW->SetInput(input[channel]);
					chW->SetFileName( outPrefix + name );
					chW->Update();
				}
			}

			if( doBiasOutput ) {
				typedef typename EMFilter::EstimatorType::InputVectorImageType  VectorImage;
				typedef typename itk::ImageFileWriter< ChannelImageType > ChannelWriter;
				typename VectorImage::ConstPointer bias = em_filter->GetEstimator()->GetCurrentBias();
				for( int channel = 0; channel< input.size(); channel++ ) {
					typename SelectorType::Pointer channelSelector = SelectorType::New();
					channelSelector->SetInput( bias );
					channelSelector->SetIndex( channel );
					channelSelector->Update();
					channelSelector->GetOutput()->SetRegions( bias->GetRequestedRegion() );

					char name[50];
					sprintf(name, "_bias_ch%02d.%s" , channel, outExt.c_str() );
					typename ChannelWriter::Pointer chW = ChannelWriter::New();
					chW->SetInput(channelSelector->GetOutput());
					chW->SetFileName( outPrefix + name );
					chW->Update();
				}
			}

		}
	}

	//
	// MRF Segmentation
	//

	if( !skipMRF ) {
		std::cout << std::endl << "MRF Step -----------------------------------------------" << std::endl;
		MRFFilter::Pointer mrf = MRFFilter::New();
		mrf->SetUseExplicitPVModel( useExplicitPVE );
		mrf->SetNumberOfClasses( nClasses );
		mrf->SetMaskImage( bm );
		mrf->SetInputVector( input );
		mrf->SetLambda( lambda1 );
		mrf->SetExternalLambda( lambda2 );
		mrf->SetUseOutputPriors( doSegmentsOutput );
		mrf->SetInitialParameters( initialParameters );
		mrf->SetMRFIterations( mrfIterations );
		mrf->SetMinimizationMode( minimiz_map[s_minimization] );
		mrf->SetOutputPrefix( outPrefix );

		if( doOutputMRFIterations ) {
			typedef itk::SimpleMemberCommand< MRFFilter >  ObserverType;
			ObserverType::Pointer obs = ObserverType::New();
			obs->SetCallbackFunction( mrf, &MRFFilter::WriteIteration );
			mrf->AddObserver( itk::IterationEvent(), obs );
		}


		if( doOutputMRFMaps ) {
			typedef itk::SimpleMemberCommand< MRFFilter >  ObserverType;
			ObserverType::Pointer obs = ObserverType::New();
			obs->SetCallbackFunction( mrf, &MRFFilter::WriteMaps );
			mrf->AddObserver( itk::IterationEvent(), obs );
		}

		if ( doOutputSteps && init_mode == NONE ) {
			EMFilter::Pointer em_classifier = EMFilter::New();
			em_classifier->SetMaskImage( bm );
			em_classifier->SetNumberOfClasses( nClasses );
			em_classifier->SetInputVector( input );
			em_classifier->SetInitialParameters( initialParameters );
			em_classifier->SetUseExplicitPVModel( useExplicitPVE );
			em_classifier->SetUseOnlyClassify(true);
			em_classifier->Update();
			ClassifiedImageWriter::Pointer w = ClassifiedImageWriter::New();
			w->SetInput( em_classifier->GetOutput() );
			w->SetFileName(outPrefix + "_mrf_initialization." + outExt );
			w->Update();

		}

		if( bfs::exists( mrfModelFile ) ) {
			std::cout << "\t* Loading Energy Model Matrix: " << mrfModelFile << std::endl;
			mrf->SetMatrixFile( mrfModelFile );
		}

		for (vector<string>::iterator it = geometricTerm.begin(); it != geometricTerm.end(); it++) {
			if( bfs::exists(*it) ) {
				size_t label = (it - geometricTerm.begin()) + 1;

				ProbabilityImageReader::Pointer r = ProbabilityImageReader::New();
				r->SetFileName( *it );
				ProbabilityImageType::Pointer p = r->GetOutput();
				r->Update();

				mrf->SetSpatialEnergyMap(label, p );


				std::cout << "\t* Geometrical constraint [" << std::setw(2) << label << "] read: " << (*it) << std::endl;
			}
		}

		mrf->Update();
		solution = mrf->GetOutput();

		ClassifiedImageWriter::Pointer w2 = ClassifiedImageWriter::New();
		w2->SetInput( solution );
		w2->SetFileName(outPrefix + "_mrf." + outExt );
		w2->Update();

		if ( doSegmentsOutput ) {
			for ( unsigned int i = 1; i<=nClasses; i++ ) {
				ProbabilityImageWriter::Pointer wProb = ProbabilityImageWriter::New();
				wProb->SetInput( mrf->GetPosteriorProbabilityImage(i) );
				char name[50];
				sprintf(name, "_mrf_seg%02d.%s" , i, outExt.c_str() );
				wProb->SetFileName( outPrefix + name );
				wProb->Update();
			}

		}

		if (doOutputStats) {
			initialParameters = mrf->GetInitialParameters();
			stringstream s;
			s << outPrefix << "_stats_final.csv";
			std::ofstream outputCSVfile ( s.str().c_str() );
			for ( unsigned int i = 0; i < initialParameters.size(); i++)
				outputCSVfile << initialParameters[i] << "\n";
			outputCSVfile.close();
		}
	}


	return EXIT_SUCCESS;
}
int levenberg_marquardt_nllsq_impl(Function f, JacobianFunction fill_jac, 
                                    InputVector& x, const OutputVector& y, 
                                    LinearSolver lin_solve, LimitFunction impose_limits, unsigned int max_iter, 
                                    T tau, T epsj, T epsx, T epsy)
{
  typedef typename vect_traits<InputVector>::value_type ValueType;
  typedef typename vect_traits<InputVector>::size_type SizeType;
  
  /* Check if the problem is defined properly */
  if (y.size() < x.size())
    throw improper_problem("Levenberg-Marquardt requires M > N!");
  
  mat<ValueType,mat_structure::rectangular> J(y.size(),x.size());
  mat<ValueType,mat_structure::square> JtJ(x.size());
  mat<ValueType,mat_structure::diagonal> diag_JtJ(x.size());
  mat<ValueType,mat_structure::scalar> mu(x.size(),0.0);
  InputVector Jte = x;
  InputVector Dp = x; Dp -= x;
  mat_vect_adaptor<InputVector> Dp_mat(Dp);
  InputVector pDp = x;
  impose_limits(x,Dp); // make sure the initial solution is feasible.
  x += Dp;
  
  if(tau <= 0.0) 
    tau = 1E-03;
  if(epsj <= 0.0) 
    epsj = 1E-17;
  if(epsx <= 0.0) 
    epsx = 1E-17;
  ValueType epsx_sq = epsx * epsx;
  if(epsy <= 0.0) 
    epsy = 1E-17;
  if(max_iter <= 1)
    max_iter = 2;

  /* compute e=x - f(p) and its L2 norm */
  OutputVector y_approx = f(x);
  OutputVector e = y; e -= y_approx;
  OutputVector e_tmp = e;
  ValueType p_eL2 = e * e;
  
  unsigned int nu = 2;
  for(unsigned int k = 0; k < max_iter; ++k) {
    
    if(p_eL2 < epsy)
      return 1;  //residual is too small.

    fill_jac(J,x,y_approx);

    /* J^T J, J^T e */
    for(SizeType i = 0; i < J.get_col_count(); ++i) {
      for(SizeType j = i; j < J.get_col_count(); ++j) {
        ValueType tmp(0.0);
        for(SizeType l = 0; l < J.get_row_count(); ++l)
          tmp += J(l,i) * J(l,j);
        JtJ(i,j) = JtJ(j,i) = tmp;
      };
    };
    Jte = e * J;
    
    ValueType p_L2 = x * x;

    /* check for convergence */
    if( norm_inf(mat_vect_adaptor<InputVector>(Jte)) < epsj) 
      return 2;  //Jacobian is too small.

    /* compute initial damping factor */
    if( k == 0 ) {
      ValueType tmp = std::numeric_limits<ValueType>::min();
      for(SizeType i=0; i < JtJ.get_row_count(); ++i)
        if(JtJ(i,i) > tmp) 
          tmp = JtJ(i,i); /* find max diagonal element */
      mu = mat<ValueType,mat_structure::scalar>(x.size(), tau * tmp);
    };

    /* determine increment using adaptive damping */
    while(true) {

      /* solve augmented equations */
      try {
        lin_solve(make_damped_matrix(JtJ,mu),Dp_mat,mat_vect_adaptor<InputVector>(Jte),epsj);
        
        impose_limits(x,Dp);
        ValueType Dp_L2 = Dp * Dp;
        pDp = x; pDp += Dp;

        if(Dp_L2 < epsx_sq * p_L2) /* relative change in p is small, stop */
          return 3;  //steps are too small.

        if( Dp_L2 >= (p_L2 + epsx) / ( std::numeric_limits<ValueType>::epsilon() * std::numeric_limits<ValueType>::epsilon() ) ) 
          throw 42; //signal to throw a singularity-error (see below).
        
        e_tmp = y; e_tmp -= f(pDp);
        ValueType pDp_eL2 = e_tmp * e_tmp;
        ValueType dL = mu(0,0) * Dp_L2 + Dp * Jte;

        ValueType dF = p_eL2 - pDp_eL2;
        
        if( (dL < 0.0) || (dF < 0.0) ) 
          throw singularity_error("reject inc.");

        // reduction in error, increment is accepted
        ValueType tmp = ( ValueType(2.0) * dF / dL - ValueType(1.0));
        tmp = 1.0 - tmp * tmp * tmp;
        mu *= ( ( tmp >= ValueType(1.0 / 3.0) ) ? tmp : ValueType(1.0 / 3.0) );
        nu = 2;

        x = pDp;
        y_approx = y; y_approx -= e_tmp;
        e = e_tmp;
        p_eL2 = pDp_eL2;
        break; //the step is accepted and the loop is broken.
      } catch(singularity_error&) {
        //the increment must be rejected (either by singularity in damped matrix or no-redux by the step.
        mu *= ValueType(nu);
        nu <<= 1; // 2*nu;
        if( nu == 0 ) /* nu has overflown. */
          throw infeasible_problem("Levenberg-Marquardt method cannot reduce the function further, matrix damping has overflown!");
      } catch(int i) {
        if(i == 42)
          throw singularity_error("Levenberg-Marquardt method has detected a near-singularity in the Jacobian matrix!");
        else
          throw i;  //just in case there might be another integer thrown (very unlikely).
      };

    }; /* inner loop */
  };

  //if this point is reached, it means we have reached the maximum iterations.
  throw maximum_iteration(max_iter);

};