int GenericReconCartesianNonLinearSpirit2DTGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } // ------------------------------------------------- // check the parameters if(this->spirit_nl_iter_max.value()==0) { this->spirit_nl_iter_max.value(15); GDEBUG_STREAM("spirit_iter_max: " << this->spirit_nl_iter_max.value()); } if (this->spirit_nl_iter_thres.value()<FLT_EPSILON) { this->spirit_nl_iter_thres.value(0.004); GDEBUG_STREAM("spirit_nl_iter_thres: " << this->spirit_nl_iter_thres.value()); } if (this->spirit_image_reg_lamda.value() < FLT_EPSILON) { if(this->spirit_reg_proximity_across_cha.value()) { this->spirit_image_reg_lamda.value(0.0002); } else { this->spirit_image_reg_lamda.value(0.00005); } GDEBUG_STREAM("spirit_image_reg_lamda: " << this->spirit_image_reg_lamda.value()); } if (this->spirit_reg_N_weighting_ratio.value() < FLT_EPSILON) { if(acceFactorE1_[0]<=5) { this->spirit_reg_N_weighting_ratio.value(10.0); } else { this->spirit_reg_N_weighting_ratio.value(20.0); } GDEBUG_STREAM("spirit_reg_N_weighting_ratio: " << this->spirit_reg_N_weighting_ratio.value()); } return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); // get the encoding FOV and recon FOV encoding_FOV_.resize(NE); recon_FOV_.resize(NE); recon_size_.resize(NE); size_t e; for (e = 0; e < NE; e++) { encoding_FOV_[e].resize(3, 0); encoding_FOV_[e][0] = h.encoding[e].encodedSpace.fieldOfView_mm.x; encoding_FOV_[e][1] = h.encoding[e].encodedSpace.fieldOfView_mm.y; encoding_FOV_[e][2] = h.encoding[e].encodedSpace.fieldOfView_mm.z; recon_FOV_[e].resize(3, 0); recon_FOV_[e][0] = h.encoding[e].reconSpace.fieldOfView_mm.x; recon_FOV_[e][1] = h.encoding[e].reconSpace.fieldOfView_mm.y; recon_FOV_[e][2] = h.encoding[e].reconSpace.fieldOfView_mm.z; recon_size_[e].resize(3, 0); recon_size_[e][0] = h.encoding[e].reconSpace.matrixSize.x; recon_size_[e][1] = h.encoding[e].reconSpace.matrixSize.y; recon_size_[e][2] = h.encoding[e].reconSpace.matrixSize.z; GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - encoding FOV : [" << encoding_FOV_[e][0] << " " << encoding_FOV_[e][1] << " " << encoding_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon FOV : [" << recon_FOV_[e][0] << " " << recon_FOV_[e][1] << " " << recon_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon size : [" << recon_size_[e][0] << " " << recon_size_[e][1] << " " << recon_size_[e][2] << " ]"); } return GADGET_OK; }
int CmrParametricT2MappingGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } GDEBUG_STREAM("Read prep times from from protocol : " << this->prep_times_.size() << " [ "); // set num_T2prep_ to be number of SET this->prep_times_.resize(this->meas_max_idx_.set + 1); if (h.userParameters) { size_t i = 0; if (h.userParameters->userParameterDouble.size() > 0) { std::vector<ISMRMRD::UserParameterDouble>::const_iterator iter = h.userParameters->userParameterDouble.begin(); for (; iter != h.userParameters->userParameterDouble.end(); iter++) { std::string usrParaName = iter->name; double usrParaValue = iter->value; std::stringstream str; str << "T2PrepDuration_" << i; if (usrParaName == str.str() && i < this->prep_times_.size()) { this->prep_times_[i] = (float)usrParaValue; GDEBUG_STREAM("CmrParametricT2MappingGadget, find T2 prep time : " << i << " - " << this->prep_times_[i]); } i++; } } } // ------------------------------------------------- return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(),h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); size_t e; for (e = 0; e < h.encoding.size(); e++) { if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header for encoding " << e); acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); } } return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { if (perform_timing.value()) { gt_timer_.start("GenericReconFieldOfViewAdjustmentGadget::process"); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconFieldOfViewAdjustmentGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_before_FOV_adjustment"); } // ---------------------------------------------------------- // FOV adjustment // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->adjust_FOV(*recon_res_) == GADGET_OK, GADGET_FAIL); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_after_FOV_adjustment"); } // make sure the image header is consistent with data size_t N = recon_res_->headers_.get_number_of_elements(); for (size_t n = 0; n < N; n++) { recon_res_->headers_(n).matrix_size[0] = recon_res_->data_.get_size(0); recon_res_->headers_(n).matrix_size[1] = recon_res_->data_.get_size(1); recon_res_->headers_(n).matrix_size[2] = recon_res_->data_.get_size(2); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconFieldOfViewAdjustmentGadget::process, passing data on to next gadget"); return GADGET_FAIL; } if (perform_timing.value()) { gt_timer_.stop(); } return GADGET_OK; }
int GenericReconCartesianGrappaGadget::process_config(ACE_Message_Block *mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); recon_obj_.resize(NE); return GADGET_OK; }
int CmrParametricT2MappingGadget::perform_mapping(IsmrmrdImageArray& data, IsmrmrdImageArray& map, IsmrmrdImageArray& para, IsmrmrdImageArray& map_sd, IsmrmrdImageArray& para_sd) { try { if (perform_timing.value()) { gt_timer_.start("CmrParametricT2MappingGadget::perform_mapping"); } GDEBUG_CONDITION_STREAM(verbose.value(), "CmrParametricT2MappingGadget::perform_mapping(...) starts ... "); size_t RO = data.data_.get_size(0); size_t E1 = data.data_.get_size(1); size_t E2 = data.data_.get_size(2); size_t CHA = data.data_.get_size(3); size_t N = data.data_.get_size(4); size_t S = data.data_.get_size(5); size_t SLC = data.data_.get_size(6); size_t ro, e1, s, slc, p; GADGET_CHECK_RETURN(E2 == 1, GADGET_FAIL); GADGET_CHECK_RETURN(CHA == 1, GADGET_FAIL); GADGET_CHECK_RETURN(this->prep_times_.size() >= N, GADGET_FAIL); hoNDArray<float> mag; Gadgetron::abs(data.data_, mag); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag, debug_folder_full_path_ + "CmrParametricT2Mapping_data_mag"); } bool need_sd_map = send_sd_map.value(); Gadgetron::GadgetronTimer gt_timer(false); // ------------------------------------------------------------- // set mapping parameters Gadgetron::CmrT2Mapping<float> t2_mapper; t2_mapper.fill_holes_in_maps_ = perform_hole_filling.value(); t2_mapper.max_size_of_holes_ = max_size_hole.value(); t2_mapper.compute_SD_maps_ = need_sd_map; t2_mapper.ti_.resize(N, 0); memcpy(&(t2_mapper.ti_)[0], &this->prep_times_[0], sizeof(float)*N); t2_mapper.data_.create(RO, E1, N, S, SLC, mag.begin()); t2_mapper.max_iter_ = max_iter.value(); t2_mapper.thres_fun_ = thres_func.value(); t2_mapper.max_map_value_ = max_T2.value(); t2_mapper.verbose_ = verbose.value(); t2_mapper.debug_folder_ = debug_folder_full_path_; t2_mapper.perform_timing_ = perform_timing.value(); // ------------------------------------------------------------- // compute mask if needed if (mapping_with_masking.value()) { t2_mapper.mask_for_mapping_.create(RO, E1, SLC); // get the image with shortest prep time hoNDArray<float> mag_shortest_TE; mag_shortest_TE.create(RO, E1, SLC); for (slc = 0; slc < SLC; slc++) { size_t ind = 0; float min_te = this->prep_times_[0]; for (size_t n = 1; n < this->prep_times_.size(); n++) { if(this->prep_times_[n]<min_te) { min_te = this->prep_times_[n]; ind = n; } } memcpy(&mag_shortest_TE(0, 0, slc), &mag(0, 0, ind, 0, slc), sizeof(float)*RO*E1); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag_shortest_TE, debug_folder_full_path_ + "CmrParametricT2Mapping_mag_shortest_TE"); } double scale_factor = 1.0; if (data.meta_[0].length(GADGETRON_IMAGE_SCALE_RATIO) > 0) { scale_factor = data.meta_[0].as_double(GADGETRON_IMAGE_SCALE_RATIO); } GDEBUG_STREAM("CmrParametricT2MappingGadget, find incoming image has scale factor of " << scale_factor); if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget::compute_mask_for_mapping"); } this->compute_mask_for_mapping(mag, t2_mapper.mask_for_mapping_, (float)scale_factor); if (perform_timing.value()) { gt_timer.stop(); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(t2_mapper.mask_for_mapping_, debug_folder_full_path_ + "CmrParametricT2Mapping_mask_for_mapping"); } } // ------------------------------------------------------------- // perform mapping if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget, t2_mapper.perform_parametric_mapping"); } t2_mapper.perform_parametric_mapping(); if (perform_timing.value()) { gt_timer.stop(); } size_t num_para = t2_mapper.get_num_of_paras(); // ------------------------------------------------------------- // get the results map.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map.data_); map.headers_.create(1, S, SLC); map.meta_.resize(S*SLC); para.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para.data_); para.headers_.create(num_para, S, SLC); para.meta_.resize(num_para*S*SLC); if (need_sd_map) { map_sd.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map_sd.data_); map_sd.headers_.create(1, S, SLC); map_sd.meta_.resize(S*SLC); para_sd.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para_sd.data_); para_sd.headers_.create(num_para, S, SLC); para_sd.meta_.resize(num_para*S*SLC); } for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (e1 = 0; e1 < E1; e1++) { for (ro = 0; ro < RO; ro++) { map.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.map_(ro, e1, s, slc); if (need_sd_map) { map_sd.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.sd_map_(ro, e1, s, slc); } for (p = 0; p < num_para; p++) { para.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.para_(ro, e1, p, s, slc); if (need_sd_map) { para_sd.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.sd_para_(ro, e1, p, s, slc); } } } } size_t slc_ind = data.headers_(0, s, slc).slice; map.headers_(0, s, slc) = data.headers_(0, s, slc); map.headers_(0, s, slc).image_index = 1 + slc_ind; map.headers_(0, s, slc).image_series_index = 11; map.meta_[s+slc*S] = data.meta_[s + slc*S]; map.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2MAP); map_sd.headers_(0, s, slc) = data.headers_(0, s, slc); map_sd.headers_(0, s, slc).image_index = 1 + slc_ind; map_sd.headers_(0, s, slc).image_series_index = 12; map_sd.meta_[s + slc*S] = data.meta_[s + slc*S]; map_sd.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2SDMAP); if (need_sd_map) { for (p = 0; p < num_para; p++) { para.headers_(p, s, slc) = data.headers_(0, s, slc); para.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; para_sd.headers_(p, s, slc) = data.headers_(0, s, slc); para_sd.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para_sd.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; } } } } // ------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.stop(); } } catch (...) { GERROR_STREAM("Exceptions happened in CmrParametricT2MappingGadget::perform_mapping(...) ... "); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconPartialFourierHandlingGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } // some images do not need partial fourier handling processing if (recon_res_->meta_[0].length(skip_processing_meta_field.value().c_str())>0) { if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing incoming image array on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // call the partial foureir size_t encoding = (size_t)recon_res_->meta_[0].as_long("encoding", 0); GADGET_CHECK_RETURN(encoding<num_encoding_spaces_, GADGET_FAIL); // perform SNR unit scaling SamplingLimit sampling_limits[3]; sampling_limits[0].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 0); sampling_limits[0].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 1); sampling_limits[0].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 2); sampling_limits[1].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 0); sampling_limits[1].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 1); sampling_limits[1].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 2); sampling_limits[2].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 0); sampling_limits[2].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 1); sampling_limits[2].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 2); size_t RO = recon_res_->data_.get_size(0); size_t E1 = recon_res_->data_.get_size(1); size_t E2 = recon_res_->data_.get_size(2); size_t CHA = recon_res_->data_.get_size(3); size_t N = recon_res_->data_.get_size(4); size_t S = recon_res_->data_.get_size(5); size_t SLC = recon_res_->data_.get_size(6); // ---------------------------------------------------------- // pf kspace sampling range // ---------------------------------------------------------- // if image padding is performed, those dimension may not need partial fourier handling startRO_ = sampling_limits[0].min_; endRO_ = sampling_limits[0].max_; startE1_ = 0; endE1_ = E1 - 1; startE2_ = 0; endE2_ = E2 - 1; if (std::abs((double)(sampling_limits[1].max_ - E1 / 2) - (double)(E1 / 2 - sampling_limits[1].min_)) > acceFactorE1_[encoding]) { startE1_ = sampling_limits[1].min_; endE1_ = sampling_limits[1].max_; } if ((E2>1) && (std::abs((double)(sampling_limits[2].max_ - E2 / 2) - (double)(E2 / 2 - sampling_limits[2].min_)) > acceFactorE2_[encoding])) { startE2_ = sampling_limits[2].min_; endE2_ = sampling_limits[2].max_; } long lenRO = endRO_ - startRO_ + 1; long lenE1 = endE1_ - startE1_ + 1; long lenE2 = endE2_ - startE2_ + 1; if (lenRO == RO && lenE1 == E1 && lenE2 == E2) { GDEBUG_CONDITION_STREAM(verbose.value(), "lenRO == RO && lenE1 == E1 && lenE2 == E2"); if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // ---------------------------------------------------------- // go to kspace // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft3c(recon_res_->data_, kspace_buf_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft2c(recon_res_->data_, kspace_buf_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(kspace_buf_, debug_folder_full_path_ + "kspace_before_pf"); }*/ // ---------------------------------------------------------- // pf handling // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->perform_partial_fourier_handling() == GADGET_OK, GADGET_FAIL); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(pf_res_, debug_folder_full_path_ + "kspace_after_pf"); }*/ // ---------------------------------------------------------- // go back to image domain // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft3c(pf_res_, recon_res_->data_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft2c(pf_res_, recon_res_->data_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(recon_res_->data_, debug_folder_full_path_ + "data_after_pf"); }*/ GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconEigenChannelGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); KLT_.resize(NE); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } return GADGET_OK; }