value_type iterate( matrix_type const& initial_matrix, matrix_type& result_matrix ) { triple_homotopy_fitting<value_type> thf{ug_size}; size_type const tilt_number = diag_matrix.row(); matrix_type intensity{ intensity_matrix.col(), 1 }; for ( size_type index = 0; index != tilt_number; ++index ) { std::copy( intensity_matrix.row_begin(index), intensity_matrix.row_end(index), intensity.col_begin(0) ); //TODO -- optimizaton here thf.register_entry( ar, //C1 approximation alpha(progress_ratio), make_coefficient_matrix( thickness, diag_matrix.row_begin(index), diag_matrix.row_end(index), column_index ), //C/2 * C/2 approximation beta(progress_ratio), make_coefficient_matrix( thickness/2.0, diag_matrix.row_begin(index), diag_matrix.row_end(index) ), expm( make_structure_matrix(ar, initial_matrix, diag_matrix.row_begin(index), diag_matrix.row_end(index) ), thickness/2.0, column_index ), //standard expm gamma(progress_ratio), make_scattering_matrix( ar, initial_matrix, diag_matrix.row_begin(index), diag_matrix.row_end(index), thickness, column_index ), intensity, column_index ); } result_matrix.resize( ug_size, 1 ); value_type const residual = thf.output( result_matrix.begin() ); /* std::cout << "\n current residual is " << residual << "\n"; std::cout << "\n current ug is \n" << result_matrix.transpose() << "\n"; */ return residual; }
const complex_matrix_type make_ug( const matrix_type& G, const matrix_type& A, const matrix_type& D ) const { assert( G.col() == 3 ); assert( A.col() == 3 ); assert( D.col() == 1 ); assert( A.row() == D.row() ); auto const M = make_matrix(); auto const S = G * ( M.inverse() ); matrix_type s( 1, S.row() ); for ( size_type i = 0; i < S.row(); ++ i ) { s[0][i] = value_type( 0.5 ) * std::sqrt( std::inner_product( S.row_begin( i ), S.row_end( i ), S.row_begin( i ), value_type( 0 ) ) ); } auto const piomega = 3.141592553590 * feng::inner_product( array_type( M[0][0], M[1][0], M[2][0] ), feng::cross_product( array_type( M[0][1], M[1][1], M[2][1] ), array_type( M[0][2], M[1][2], M[2][2] ) ) ); auto const atomcellfacte = make_gaussian_electron( s, v0 ); const complex_matrix_type dwss = D * feng::pow( s, value_type( 2 ) ); const complex_matrix_type piag = A * G.transpose(); auto fact = feng::exp( - dwss - piag * complex_type( 0, 6.2831853071796 ) ); std::transform( fact.begin(), fact.end(), atomcellfacte.begin(), fact.begin(), [piomega]( const complex_type f, const value_type a ) { return f * a / piomega; } ); complex_matrix_type Ug( fact.col(), 1 ); for ( size_type i = 0; i < fact.col(); ++i ) { Ug[i][0] = std::accumulate( fact.col_begin( i ), fact.col_end( i ), complex_type() ); //if ( std::abs(Ug[i][0].real()) < 1.0e-8 ) Ug[i][0].real(0); //if ( std::abs(Ug[i][0].imag()) < 1.0e-8 ) Ug[i][0].imag(0); } return Ug; }
void register_entry( size_matrix_type const& ar, value_type alpha, complex_matrix_type const& lhs_matrix, complex_matrix_type const& rhs_matrix, value_type beta, complex_matrix_type const& expm_matrix, matrix_type const& intensity, size_type const column_index = 0 ) { assert( ar.row() == ar.col() ); assert( ar.row() == lhs_matrix.row() ); assert( lhs_matrix.row() == lhs_matrix.col() ); assert( ar.row() == rhs_matrix.row() ); assert( ar.row() == intensity.row() ); assert( 1 == intensity.col() ); assert( (*(std::max_element(ar.begin(), ar.end()))) < ug_size ); assert( alpha >= value_type{0} ); assert( beta >= value_type{0} ); assert( alpha <= value_type{1} ); assert( beta <= value_type{1} ); assert( std::abs(alpha+beta-value_type{1}) < value_type{ 1.0e-10} ); //assert( c1_matrix.row() == ar.row() ); //assert( c1_matrix.col() == 1 ); assert( expm_matrix.row() == ar.row() ); assert( expm_matrix.col() == 1 ); assert( column_index < ar.row() ); size_type const n = ar.row(); size_type const m = ug_size; matrix_type real_part(m, 1); matrix_type imag_part(m, 1); value_type norm_factor{0}; //norm only one column //std::for_each( expm_matrix.col_begin( column_index ), expm_matrix.col_end( column_index ), [&norm_factor]( complex_type const& c ){ norm_factor += std::norm(c); } ); std::for_each( expm_matrix.begin(), expm_matrix.end(), [&norm_factor]( complex_type const& c ){ norm_factor += std::norm(c); } ); norm_factor /= static_cast<value_type>( expm_matrix.row() ); for ( size_type r = 0; r != ar.row(); ++r ) { //for \beta C/2 C/2 part extract_inner_product_coefficients( m, n, ar.row_begin(r), lhs_matrix.row_begin(r), rhs_matrix.col_begin(column_index), real_part.begin(), imag_part.begin() ); real_part *= alpha; imag_part *= alpha; //for \gamma E part real_part[0][0] += beta * std::real( expm_matrix[r][column_index] ); imag_part[0][0] += beta * std::imag( expm_matrix[r][column_index] ); //real_part[0][0] += beta * std::real( expm_matrix[r][column_index] ) / norm_factor; //imag_part[0][0] += beta * std::imag( expm_matrix[r][column_index] ) / norm_factor; //needs modifying here dsm.register_entry( intensity[r][0], real_part.begin(), imag_part.begin() ); } #if 0 //register lambda, ensuring lambda to be 1 std::fill( real_part.begin(), real_part.end(), value_type{} ); value_type const factor = value_type{1.0}; value_type const weigh = factor * std::sqrt( static_cast<value_type>( intensity.row() ) ); real_part[0][0] = weigh; imag_part[0][0] = weigh; dsm.register_entry( value_type{2} * weigh * weigh, real_part.begin(), imag_part.begin() ); #endif }
void run() { // Read filenames vector<std::string> filenames; { std::string folder = Path::dirname (argument[0]); std::ifstream ifs (argument[0].c_str()); std::string temp; while (getline (ifs, temp)) { std::string filename (Path::join (folder, temp)); size_t p = filename.find_last_not_of(" \t"); if (std::string::npos != p) filename.erase(p+1); if (filename.size()) { if (!MR::Path::exists (filename)) throw Exception ("Input connectome file not found: \"" + filename + "\""); filenames.push_back (filename); } } } const MR::Connectome::matrix_type example_connectome = load_matrix (filenames.front()); if (example_connectome.rows() != example_connectome.cols()) throw Exception ("Connectome of first subject is not square (" + str(example_connectome.rows()) + " x " + str(example_connectome.cols()) + ")"); const MR::Connectome::node_t num_nodes = example_connectome.rows(); // Initialise enhancement algorithm std::shared_ptr<Stats::EnhancerBase> enhancer; switch (int(argument[1])) { case 0: { auto opt = get_options ("threshold"); if (!opt.size()) throw Exception ("For NBS algorithm, -threshold option must be provided"); enhancer.reset (new MR::Connectome::Enhance::NBS (num_nodes, opt[0][0])); } break; case 1: { std::shared_ptr<Stats::TFCE::EnhancerBase> base (new MR::Connectome::Enhance::NBS (num_nodes)); enhancer.reset (new Stats::TFCE::Wrapper (base)); load_tfce_parameters (*(dynamic_cast<Stats::TFCE::Wrapper*>(enhancer.get()))); if (get_options ("threshold").size()) WARN (std::string (argument[1]) + " is a threshold-free algorithm; -threshold option ignored"); } break; case 2: { enhancer.reset (new MR::Connectome::Enhance::PassThrough()); if (get_options ("threshold").size()) WARN ("No enhancement algorithm being used; -threshold option ignored"); } break; default: throw Exception ("Unknown enhancement algorithm"); } size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); size_t nperms_nonstationary = get_option_value ("nperms_nonstationarity", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); // Load design matrix const matrix_type design = load_matrix (argument[2]); if (size_t(design.rows()) != filenames.size()) throw Exception ("number of subjects does not match number of rows in design matrix"); // Load permutations file if supplied auto opt = get_options("permutations"); vector<vector<size_t> > permutations; if (opt.size()) { permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); num_perms = permutations.size(); if (permutations[0].size() != (size_t)design.rows()) throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); } // Load non-stationary correction permutations file if supplied opt = get_options("permutations_nonstationary"); vector<vector<size_t> > permutations_nonstationary; if (opt.size()) { permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); nperms_nonstationary = permutations.size(); if (permutations_nonstationary[0].size() != (size_t)design.rows()) throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); } // Load contrast matrix matrix_type contrast = load_matrix (argument[3]); if (contrast.cols() > design.cols()) throw Exception ("too many contrasts for design matrix"); contrast.conservativeResize (contrast.rows(), design.cols()); const std::string output_prefix = argument[4]; // Load input data // For compatibility with existing statistics code, symmetric matrix data is adjusted // into vector form - one row per edge in the symmetric connectome. The Mat2Vec class // deals with the re-ordering of matrix data into this form. MR::Connectome::Mat2Vec mat2vec (num_nodes); const size_t num_edges = mat2vec.vec_size(); matrix_type data (num_edges, filenames.size()); { ProgressBar progress ("Loading input connectome data", filenames.size()); for (size_t subject = 0; subject < filenames.size(); subject++) { const std::string& path (filenames[subject]); MR::Connectome::matrix_type subject_data; try { subject_data = load_matrix (path); } catch (Exception& e) { throw Exception (e, "Error loading connectome data for subject #" + str(subject) + " (file \"" + path + "\""); } try { MR::Connectome::to_upper (subject_data); if (size_t(subject_data.rows()) != num_nodes) throw Exception ("Connectome matrix is not the correct size (" + str(subject_data.rows()) + ", should be " + str(num_nodes) + ")"); } catch (Exception& e) { throw Exception (e, "Connectome for subject #" + str(subject) + " (file \"" + path + "\") invalid"); } for (size_t i = 0; i != num_edges; ++i) data(i, subject) = subject_data (mat2vec(i).first, mat2vec(i).second); ++progress; } } { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); for (size_t i = 0; i < size_t(contrast.cols()); ++i) { save_matrix (mat2vec.V2M (betas.col(i)), output_prefix + "_beta_" + str(i) + ".csv"); ++progress; } const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); save_matrix (mat2vec.V2M (abs_effects.col(0)), output_prefix + "_abs_effect.csv"); ++progress; const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); matrix_type first_std_effect = mat2vec.V2M (std_effects.col (0)); for (MR::Connectome::node_t i = 0; i != num_nodes; ++i) { for (MR::Connectome::node_t j = 0; j != num_nodes; ++j) { if (!std::isfinite (first_std_effect (i, j))) first_std_effect (i, j) = 0.0; } } save_matrix (first_std_effect, output_prefix + "_std_effect.csv"); ++progress; const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } Math::Stats::GLMTTest glm_ttest (data, design, contrast); // If performing non-stationarity adjustment we need to pre-compute the empirical statistic vector_type empirical_statistic; if (do_nonstationary_adjustment) { empirical_statistic = vector_type::Zero (num_edges); if (permutations_nonstationary.size()) { Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); } else { Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", true); Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); } save_matrix (mat2vec.V2M (empirical_statistic), output_prefix + "_empirical.csv"); } // Precompute default statistic and enhanced statistic vector_type tvalue_output (num_edges); vector_type enhanced_output (num_edges); Stats::PermTest::precompute_default_permutation (glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr<vector_type>(), tvalue_output); save_matrix (mat2vec.V2M (tvalue_output), output_prefix + "_tvalue.csv"); save_matrix (mat2vec.V2M (enhanced_output), output_prefix + "_enhanced.csv"); // Perform permutation testing if (!get_options ("notest").size()) { // FIXME Getting NANs in the null distribution // Check: was result of pre-nulled subject data vector_type null_distribution (num_perms); vector_type uncorrected_pvalues (num_edges); if (permutations.size()) { Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr<vector_type>(), null_distribution, std::shared_ptr<vector_type>(), uncorrected_pvalues, std::shared_ptr<vector_type>()); } else { Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr<vector_type>(), null_distribution, std::shared_ptr<vector_type>(), uncorrected_pvalues, std::shared_ptr<vector_type>()); } save_vector (null_distribution, output_prefix + "_null_dist.txt"); vector_type pvalue_output (num_edges); Math::Stats::Permutation::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); save_matrix (mat2vec.V2M (pvalue_output), output_prefix + "_fwe_pvalue.csv"); save_matrix (mat2vec.V2M (uncorrected_pvalues), output_prefix + "_uncorrected_pvalue.csv"); } }
void fit() { std::cerr << "\nbefore the fit, thickness is set to be " << thickness << "\n"; assert( ug_size ); assert( ar_dim ); assert( column_index < ar_dim ); assert( std::abs(std::real(thickness)) < 1.0e-10 ); assert( std::imag(thickness) > 1.0e-10 ); assert( diag_matrix.col() == ar_dim ); assert( diag_matrix.row() == intensity_matrix.row() ); assert( intensity_matrix.col() == ar_dim ); assert( initial_ug.row() == ug_size ); assert( initial_ug.col() == 1 ); assert( ar.row() == ar.col() ); assert( ar_dim == ar.row() ); assert( progress_ratio >= value_type{0} ); assert( progress_ratio <= value_type{1} ); assert( alpha ); assert( beta ); assert( gamma ); new_residual = iterate( initial_ug, new_ug ); matrix_type second_ug{ initial_ug }; size_type current_iteration = 0; matrix_vector_type vm; vector_type vr; vm.push_back( new_ug ); vr.push_back( new_residual ); value_type best_residual_so_far = new_residual; while ( true ) { value_type const second_residual = iterate( new_ug, second_ug ); bool break_flag = false; //?? if ( best_residual_so_far > max_iteration * second_residual ) break_flag = true; best_residual_so_far = std::min( second_residual, best_residual_so_far ); if( ++current_iteration > max_iteration ) break_flag = true; new_ug.swap( second_ug ); new_residual = second_residual; vm.push_back( new_ug ); vr.push_back( new_residual ); if ( break_flag ) break; } size_type const elite_index = std::distance( vr.begin(), std::min_element( vr.begin(), vr.end() ) ); std::copy( vm[elite_index].begin(), vm[elite_index].end(), new_ug.begin() ); //std::cout << "\ncurrent elite residual is " << vr[elite_index] << ", at iteration " << current_iteration << std::endl; }