int GenericReconPartialFourierHandlingGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(),h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); size_t e; for (e = 0; e < h.encoding.size(); e++) { if (!h.encoding[e].parallelImaging) { GDEBUG("Parallel Imaging section not found in header"); return GADGET_FAIL; } ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); } // --------------------------------------------------------------------------------------------------------- // generate the destination folder /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); // get the encoding FOV and recon FOV encoding_FOV_.resize(NE); recon_FOV_.resize(NE); recon_size_.resize(NE); size_t e; for (e = 0; e < NE; e++) { encoding_FOV_[e].resize(3, 0); encoding_FOV_[e][0] = h.encoding[e].encodedSpace.fieldOfView_mm.x; encoding_FOV_[e][1] = h.encoding[e].encodedSpace.fieldOfView_mm.y; encoding_FOV_[e][2] = h.encoding[e].encodedSpace.fieldOfView_mm.z; recon_FOV_[e].resize(3, 0); recon_FOV_[e][0] = h.encoding[e].reconSpace.fieldOfView_mm.x; recon_FOV_[e][1] = h.encoding[e].reconSpace.fieldOfView_mm.y; recon_FOV_[e][2] = h.encoding[e].reconSpace.fieldOfView_mm.z; recon_size_[e].resize(3, 0); recon_size_[e][0] = h.encoding[e].reconSpace.matrixSize.x; recon_size_[e][1] = h.encoding[e].reconSpace.matrixSize.y; recon_size_[e][2] = h.encoding[e].reconSpace.matrixSize.z; GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - encoding FOV : [" << encoding_FOV_[e][0] << " " << encoding_FOV_[e][1] << " " << encoding_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon FOV : [" << recon_FOV_[e][0] << " " << recon_FOV_[e][1] << " " << recon_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon size : [" << recon_size_[e][0] << " " << recon_size_[e][1] << " " << recon_size_[e][2] << " ]"); } return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(),h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); size_t e; for (e = 0; e < h.encoding.size(); e++) { if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header for encoding " << e); acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); } } return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { if (perform_timing.value()) { gt_timer_.start("GenericReconFieldOfViewAdjustmentGadget::process"); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconFieldOfViewAdjustmentGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_before_FOV_adjustment"); } // ---------------------------------------------------------- // FOV adjustment // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->adjust_FOV(*recon_res_) == GADGET_OK, GADGET_FAIL); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_after_FOV_adjustment"); } // make sure the image header is consistent with data size_t N = recon_res_->headers_.get_number_of_elements(); for (size_t n = 0; n < N; n++) { recon_res_->headers_(n).matrix_size[0] = recon_res_->data_.get_size(0); recon_res_->headers_(n).matrix_size[1] = recon_res_->data_.get_size(1); recon_res_->headers_(n).matrix_size[2] = recon_res_->data_.get_size(2); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconFieldOfViewAdjustmentGadget::process, passing data on to next gadget"); return GADGET_FAIL; } if (perform_timing.value()) { gt_timer_.stop(); } return GADGET_OK; }
int GenericReconBase<T>::process_config(ACE_Message_Block* mb) { if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); } return GADGET_OK; }
int GenericReconCartesianGrappaGadget::close(unsigned long flags) { GDEBUG_CONDITION_STREAM(true, "GenericReconCartesianGrappaGadget - close(flags) : " << flags); if (BaseClass::close(flags) != GADGET_OK) return GADGET_FAIL; if (flags != 0) { size_t e; for (e = 0; e < recon_obj_.size(); e++) { GDEBUG_STREAM("Clean recon_obj_ for encoding space " << e); if (recon_obj_[e].recon_res_.data_.delete_data_on_destruct()) recon_obj_[e].recon_res_.data_.clear(); if (recon_obj_[e].recon_res_.headers_.delete_data_on_destruct()) recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); if (recon_obj_[e].gfactor_.delete_data_on_destruct()) recon_obj_[e].gfactor_.clear(); if (recon_obj_[e].ref_calib_.delete_data_on_destruct()) recon_obj_[e].ref_calib_.clear(); if (recon_obj_[e].ref_calib_dst_.delete_data_on_destruct()) recon_obj_[e].ref_calib_dst_.clear(); if (recon_obj_[e].ref_coil_map_.delete_data_on_destruct()) recon_obj_[e].ref_coil_map_.clear(); if (recon_obj_[e].kernel_.delete_data_on_destruct()) recon_obj_[e].kernel_.clear(); if (recon_obj_[e].kernelIm_.delete_data_on_destruct()) recon_obj_[e].kernelIm_.clear(); if (recon_obj_[e].unmixing_coeff_.delete_data_on_destruct()) recon_obj_[e].unmixing_coeff_.clear(); if (recon_obj_[e].coil_map_.delete_data_on_destruct()) recon_obj_[e].coil_map_.clear(); } } return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::close(unsigned long flags) { GDEBUG_CONDITION_STREAM(true, "GenericReconFieldOfViewAdjustmentGadget - close(flags) : " << flags); if (BaseClass::close(flags) != GADGET_OK) return GADGET_FAIL; if (flags != 0) { } return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::close(unsigned long flags) { GDEBUG_CONDITION_STREAM(true, "GenericReconPartialFourierHandlingGadget - close(flags) : " << flags); if ( BaseClass::close(flags) != GADGET_OK ) return GADGET_FAIL; if ( flags != 0 ) { } return GADGET_OK; }
int CmrParametricT2MappingGadget::close(unsigned long flags) { GDEBUG_CONDITION_STREAM(true, "CmrParametricT2MappingGadget - close(flags) : " << flags); if (BaseClass::close(flags) != GADGET_OK) return GADGET_FAIL; if (flags != 0) { } return GADGET_OK; }
void getDebugFolderPath(const std::string& debugFolder, std::string& debugFolderPath, bool verbose) { debugFolderPath = getenv("GADGETRON_DEBUG_FOLDER"); if ( debugFolderPath.empty() ) { #ifdef _WIN32 debugFolderPath = "c:/temp/gadgetron"; #else debugFolderPath = "/tmp/gadgetron"; #endif // _WIN32 } debugFolderPath.append("/"); debugFolderPath.append(debugFolder); debugFolderPath.append("/"); createFolderWithAllPermissions(debugFolderPath); GDEBUG_CONDITION_STREAM(verbose, "Debug folder is " << debugFolderPath); }
int GenericReconCartesianGrappaGadget::process_config(ACE_Message_Block *mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); recon_obj_.resize(NE); return GADGET_OK; }
int GenericReconCartesianGrappaGadget::process(Gadgetron::GadgetContainerMessage<IsmrmrdReconData> *m1) { if (perform_timing.value()) { gt_timer_local_.start("GenericReconCartesianGrappaGadget::process"); } process_called_times_++; IsmrmrdReconData *recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } GadgetContainerMessage< std::vector<ISMRMRD::Waveform> > * wav = AsContainerMessage< std::vector<ISMRMRD::Waveform> >(m1->cont()); if (wav) { if (verbose.value()) { GDEBUG_STREAM("Incoming recon_bit with " << wav->getObjectPtr()->size() << " wave form samples "); } } // for every encoding space for (size_t e = 0; e < recon_bit_->rbit_.size(); e++) { std::stringstream os; os << "_encoding_" << e << "_" << process_called_times_; GDEBUG_CONDITION_STREAM(verbose.value(), "Calling " << process_called_times_ << " , encoding space : " << e); GDEBUG_CONDITION_STREAM(verbose.value(), "======================================================================"); // --------------------------------------------------------------- // export incoming data if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_traj" + os.str()); } } // --------------------------------------------------------------- if (recon_bit_->rbit_[e].ref_) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].ref_->data_, debug_folder_full_path_ + "ref" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].ref_->trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].ref_->trajectory_), debug_folder_full_path_ + "ref_traj" + os.str()); } } // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_ and recon_obj_[e].ref_coil_map_ are set if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::make_ref_coil_map"); } this->make_ref_coil_map(*recon_bit_->rbit_[e].ref_, *recon_bit_->rbit_[e].data_.data_.get_dimensions(), recon_obj_[e].ref_calib_, recon_obj_[e].ref_coil_map_, e); if (perform_timing.value()) { gt_timer_.stop(); } // ---------------------------------------------------------- // export prepared ref for calibration and coil map if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_calib_, debug_folder_full_path_ + "ref_calib" + os.str()); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_coil_map_, debug_folder_full_path_ + "ref_coil_map" + os.str()); } // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_dst_ and recon_obj_[e].ref_coil_map_ are modified if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::prepare_down_stream_coil_compression_ref_data"); } this->prepare_down_stream_coil_compression_ref_data(recon_obj_[e].ref_calib_, recon_obj_[e].ref_coil_map_, recon_obj_[e].ref_calib_dst_, e); if (perform_timing.value()) { gt_timer_.stop(); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_calib_dst_, debug_folder_full_path_ + "ref_calib_dst" + os.str()); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_coil_map_, debug_folder_full_path_ + "ref_coil_map_dst" + os.str()); } // --------------------------------------------------------------- // after this step, coil map is computed and stored in recon_obj_[e].coil_map_ if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_coil_map_estimation"); } this->perform_coil_map_estimation(recon_obj_[e].ref_coil_map_, recon_obj_[e].coil_map_, e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- // after this step, recon_obj_[e].kernel_, recon_obj_[e].kernelIm_, recon_obj_[e].unmixing_coeff_ are filled // gfactor is computed too if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_calib"); } this->perform_calib(recon_bit_->rbit_[e], recon_obj_[e], e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- recon_bit_->rbit_[e].ref_->clear(); recon_bit_->rbit_[e].ref_ = boost::none; } if (recon_bit_->rbit_[e].data_.data_.get_number_of_elements() > 0) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data_before_unwrapping" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].data_.trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_before_unwrapping_traj" + os.str()); } } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_unwrapping"); } this->perform_unwrapping(recon_bit_->rbit_[e], recon_obj_[e], e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::compute_image_header"); } this->compute_image_header(recon_bit_->rbit_[e], recon_obj_[e].recon_res_, e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- // pass down waveform if(wav) recon_obj_[e].recon_res_.waveform_ = *wav->getObjectPtr(); recon_obj_[e].recon_res_.acq_headers_ = recon_bit_->rbit_[e].data_.headers_; // --------------------------------------------------------------- if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].recon_res_.data_, debug_folder_full_path_ + "recon_res" + os.str()); } if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::send_out_image_array"); } this->send_out_image_array(recon_obj_[e].recon_res_, e, image_series.value() + ((int) e + 1), GADGETRON_IMAGE_REGULAR); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (send_out_gfactor.value() && recon_obj_[e].gfactor_.get_number_of_elements() > 0 && (acceFactorE1_[e] * acceFactorE2_[e] > 1)) { IsmrmrdImageArray res; Gadgetron::real_to_complex(recon_obj_[e].gfactor_, res.data_); res.headers_ = recon_obj_[e].recon_res_.headers_; res.meta_ = recon_obj_[e].recon_res_.meta_; if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::send_out_image_array, gfactor"); } this->send_out_image_array(res, e, image_series.value() + 10 * ((int) e + 2), GADGETRON_IMAGE_GFACTOR); if (perform_timing.value()) { gt_timer_.stop(); } } // --------------------------------------------------------------- if (send_out_snr_map.value()) { hoNDArray<std::complex<float> > snr_map; if (calib_mode_[e] == Gadgetron::ISMRMRD_noacceleration) { snr_map = recon_obj_[e].recon_res_.data_; } else { if (recon_obj_[e].gfactor_.get_number_of_elements() > 0) { if (perform_timing.value()) { gt_timer_.start("compute SNR map array"); } this->compute_snr_map(recon_obj_[e], snr_map); if (perform_timing.value()) { gt_timer_.stop(); } } } if (snr_map.get_number_of_elements() > 0) { if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(snr_map, debug_folder_full_path_ + "snr_map" + os.str()); } if (perform_timing.value()) { gt_timer_.start("send out gfactor array, snr map"); } IsmrmrdImageArray res; res.data_ = snr_map; res.headers_ = recon_obj_[e].recon_res_.headers_; res.meta_ = recon_obj_[e].recon_res_.meta_; res.acq_headers_ = recon_bit_->rbit_[e].data_.headers_; this->send_out_image_array(res, e, image_series.value() + 100 * ((int) e + 3), GADGETRON_IMAGE_SNR_MAP); if (perform_timing.value()) { gt_timer_.stop(); } } } } recon_obj_[e].recon_res_.data_.clear(); recon_obj_[e].gfactor_.clear(); recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); } m1->release(); if (perform_timing.value()) { gt_timer_local_.stop(); } return GADGET_OK; }
void GenericReconCartesianGrappaGadget::prepare_down_stream_coil_compression_ref_data( const hoNDArray<std::complex<float> > &ref_src, hoNDArray<std::complex<float> > &ref_coil_map, hoNDArray<std::complex<float> > &ref_dst, size_t e) { if (!downstream_coil_compression.value()) { GDEBUG_CONDITION_STREAM(verbose.value(), "Downstream coil compression is not prescribed ... "); ref_dst = ref_src; return; } if (downstream_coil_compression_thres.value() < 0 && downstream_coil_compression_num_modesKept.value() == 0) { GDEBUG_CONDITION_STREAM(verbose.value(), "Downstream coil compression is prescribed to use all input channels ... "); ref_dst = ref_src; return; } // determine how many channels to use size_t RO = ref_src.get_size(0); size_t E1 = ref_src.get_size(1); size_t E2 = ref_src.get_size(2); size_t CHA = ref_src.get_size(3); size_t N = ref_src.get_size(4); size_t S = ref_src.get_size(5); size_t SLC = ref_src.get_size(6); size_t recon_RO = ref_coil_map.get_size(0); size_t recon_E1 = ref_coil_map.get_size(1); size_t recon_E2 = ref_coil_map.get_size(2); std::complex<float> *pRef = const_cast< std::complex<float> * >(ref_src.begin()); size_t dstCHA = CHA; if (downstream_coil_compression_num_modesKept.value() > 0 && downstream_coil_compression_num_modesKept.value() <= CHA) { dstCHA = downstream_coil_compression_num_modesKept.value(); } else { std::vector<float> E(CHA, 0); long long cha; #pragma omp parallel default(none) private(cha) shared(RO, E1, E2, CHA, pRef, E) { hoNDArray<std::complex<float> > dataCha; #pragma omp for for (cha = 0; cha < (long long) CHA; cha++) { dataCha.create(RO, E1, E2, pRef + cha * RO * E1 * E2); float v = Gadgetron::nrm2(dataCha); E[cha] = v * v; } } for (cha = 1; cha < (long long) CHA; cha++) { if (std::abs(E[cha]) < downstream_coil_compression_thres.value() * std::abs(E[0])) { break; } } dstCHA = cha; } GDEBUG_CONDITION_STREAM(verbose.value(), "Downstream coil compression is prescribed to use " << dstCHA << " out of " << CHA << " channels ..."); if (dstCHA < CHA) { ref_dst.create(RO, E1, E2, dstCHA, N, S, SLC); hoNDArray<std::complex<float> > ref_coil_map_dst; ref_coil_map_dst.create(recon_RO, recon_E1, recon_E2, dstCHA, N, S, SLC); size_t slc, s, n; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (n = 0; n < N; n++) { std::complex<float> *pDst = &(ref_dst(0, 0, 0, 0, n, s, slc)); const std::complex<float> *pSrc = &(ref_src(0, 0, 0, 0, n, s, slc)); memcpy(pDst, pSrc, sizeof(std::complex<float>) * RO * E1 * E2 * dstCHA); pDst = &(ref_coil_map_dst(0, 0, 0, 0, n, s, slc)); pSrc = &(ref_coil_map(0, 0, 0, 0, n, s, slc)); memcpy(pDst, pSrc, sizeof(std::complex<float>) * recon_RO * recon_E1 * recon_E2 * dstCHA); } } } ref_coil_map = ref_coil_map_dst; } else { ref_dst = ref_src; } }
int CmrParametricT2MappingGadget::perform_mapping(IsmrmrdImageArray& data, IsmrmrdImageArray& map, IsmrmrdImageArray& para, IsmrmrdImageArray& map_sd, IsmrmrdImageArray& para_sd) { try { if (perform_timing.value()) { gt_timer_.start("CmrParametricT2MappingGadget::perform_mapping"); } GDEBUG_CONDITION_STREAM(verbose.value(), "CmrParametricT2MappingGadget::perform_mapping(...) starts ... "); size_t RO = data.data_.get_size(0); size_t E1 = data.data_.get_size(1); size_t E2 = data.data_.get_size(2); size_t CHA = data.data_.get_size(3); size_t N = data.data_.get_size(4); size_t S = data.data_.get_size(5); size_t SLC = data.data_.get_size(6); size_t ro, e1, s, slc, p; GADGET_CHECK_RETURN(E2 == 1, GADGET_FAIL); GADGET_CHECK_RETURN(CHA == 1, GADGET_FAIL); GADGET_CHECK_RETURN(this->prep_times_.size() >= N, GADGET_FAIL); hoNDArray<float> mag; Gadgetron::abs(data.data_, mag); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag, debug_folder_full_path_ + "CmrParametricT2Mapping_data_mag"); } bool need_sd_map = send_sd_map.value(); Gadgetron::GadgetronTimer gt_timer(false); // ------------------------------------------------------------- // set mapping parameters Gadgetron::CmrT2Mapping<float> t2_mapper; t2_mapper.fill_holes_in_maps_ = perform_hole_filling.value(); t2_mapper.max_size_of_holes_ = max_size_hole.value(); t2_mapper.compute_SD_maps_ = need_sd_map; t2_mapper.ti_.resize(N, 0); memcpy(&(t2_mapper.ti_)[0], &this->prep_times_[0], sizeof(float)*N); t2_mapper.data_.create(RO, E1, N, S, SLC, mag.begin()); t2_mapper.max_iter_ = max_iter.value(); t2_mapper.thres_fun_ = thres_func.value(); t2_mapper.max_map_value_ = max_T2.value(); t2_mapper.verbose_ = verbose.value(); t2_mapper.debug_folder_ = debug_folder_full_path_; t2_mapper.perform_timing_ = perform_timing.value(); // ------------------------------------------------------------- // compute mask if needed if (mapping_with_masking.value()) { t2_mapper.mask_for_mapping_.create(RO, E1, SLC); // get the image with shortest prep time hoNDArray<float> mag_shortest_TE; mag_shortest_TE.create(RO, E1, SLC); for (slc = 0; slc < SLC; slc++) { size_t ind = 0; float min_te = this->prep_times_[0]; for (size_t n = 1; n < this->prep_times_.size(); n++) { if(this->prep_times_[n]<min_te) { min_te = this->prep_times_[n]; ind = n; } } memcpy(&mag_shortest_TE(0, 0, slc), &mag(0, 0, ind, 0, slc), sizeof(float)*RO*E1); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag_shortest_TE, debug_folder_full_path_ + "CmrParametricT2Mapping_mag_shortest_TE"); } double scale_factor = 1.0; if (data.meta_[0].length(GADGETRON_IMAGE_SCALE_RATIO) > 0) { scale_factor = data.meta_[0].as_double(GADGETRON_IMAGE_SCALE_RATIO); } GDEBUG_STREAM("CmrParametricT2MappingGadget, find incoming image has scale factor of " << scale_factor); if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget::compute_mask_for_mapping"); } this->compute_mask_for_mapping(mag, t2_mapper.mask_for_mapping_, (float)scale_factor); if (perform_timing.value()) { gt_timer.stop(); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(t2_mapper.mask_for_mapping_, debug_folder_full_path_ + "CmrParametricT2Mapping_mask_for_mapping"); } } // ------------------------------------------------------------- // perform mapping if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget, t2_mapper.perform_parametric_mapping"); } t2_mapper.perform_parametric_mapping(); if (perform_timing.value()) { gt_timer.stop(); } size_t num_para = t2_mapper.get_num_of_paras(); // ------------------------------------------------------------- // get the results map.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map.data_); map.headers_.create(1, S, SLC); map.meta_.resize(S*SLC); para.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para.data_); para.headers_.create(num_para, S, SLC); para.meta_.resize(num_para*S*SLC); if (need_sd_map) { map_sd.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map_sd.data_); map_sd.headers_.create(1, S, SLC); map_sd.meta_.resize(S*SLC); para_sd.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para_sd.data_); para_sd.headers_.create(num_para, S, SLC); para_sd.meta_.resize(num_para*S*SLC); } for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (e1 = 0; e1 < E1; e1++) { for (ro = 0; ro < RO; ro++) { map.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.map_(ro, e1, s, slc); if (need_sd_map) { map_sd.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.sd_map_(ro, e1, s, slc); } for (p = 0; p < num_para; p++) { para.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.para_(ro, e1, p, s, slc); if (need_sd_map) { para_sd.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.sd_para_(ro, e1, p, s, slc); } } } } size_t slc_ind = data.headers_(0, s, slc).slice; map.headers_(0, s, slc) = data.headers_(0, s, slc); map.headers_(0, s, slc).image_index = 1 + slc_ind; map.headers_(0, s, slc).image_series_index = 11; map.meta_[s+slc*S] = data.meta_[s + slc*S]; map.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2MAP); map_sd.headers_(0, s, slc) = data.headers_(0, s, slc); map_sd.headers_(0, s, slc).image_index = 1 + slc_ind; map_sd.headers_(0, s, slc).image_series_index = 12; map_sd.meta_[s + slc*S] = data.meta_[s + slc*S]; map_sd.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2SDMAP); if (need_sd_map) { for (p = 0; p < num_para; p++) { para.headers_(p, s, slc) = data.headers_(0, s, slc); para.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; para_sd.headers_(p, s, slc) = data.headers_(0, s, slc); para_sd.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para_sd.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; } } } } // ------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.stop(); } } catch (...) { GERROR_STREAM("Exceptions happened in CmrParametricT2MappingGadget::perform_mapping(...) ... "); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconPartialFourierHandlingGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } // some images do not need partial fourier handling processing if (recon_res_->meta_[0].length(skip_processing_meta_field.value().c_str())>0) { if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing incoming image array on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // call the partial foureir size_t encoding = (size_t)recon_res_->meta_[0].as_long("encoding", 0); GADGET_CHECK_RETURN(encoding<num_encoding_spaces_, GADGET_FAIL); // perform SNR unit scaling SamplingLimit sampling_limits[3]; sampling_limits[0].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 0); sampling_limits[0].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 1); sampling_limits[0].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 2); sampling_limits[1].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 0); sampling_limits[1].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 1); sampling_limits[1].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 2); sampling_limits[2].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 0); sampling_limits[2].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 1); sampling_limits[2].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 2); size_t RO = recon_res_->data_.get_size(0); size_t E1 = recon_res_->data_.get_size(1); size_t E2 = recon_res_->data_.get_size(2); size_t CHA = recon_res_->data_.get_size(3); size_t N = recon_res_->data_.get_size(4); size_t S = recon_res_->data_.get_size(5); size_t SLC = recon_res_->data_.get_size(6); // ---------------------------------------------------------- // pf kspace sampling range // ---------------------------------------------------------- // if image padding is performed, those dimension may not need partial fourier handling startRO_ = sampling_limits[0].min_; endRO_ = sampling_limits[0].max_; startE1_ = 0; endE1_ = E1 - 1; startE2_ = 0; endE2_ = E2 - 1; if (std::abs((double)(sampling_limits[1].max_ - E1 / 2) - (double)(E1 / 2 - sampling_limits[1].min_)) > acceFactorE1_[encoding]) { startE1_ = sampling_limits[1].min_; endE1_ = sampling_limits[1].max_; } if ((E2>1) && (std::abs((double)(sampling_limits[2].max_ - E2 / 2) - (double)(E2 / 2 - sampling_limits[2].min_)) > acceFactorE2_[encoding])) { startE2_ = sampling_limits[2].min_; endE2_ = sampling_limits[2].max_; } long lenRO = endRO_ - startRO_ + 1; long lenE1 = endE1_ - startE1_ + 1; long lenE2 = endE2_ - startE2_ + 1; if (lenRO == RO && lenE1 == E1 && lenE2 == E2) { GDEBUG_CONDITION_STREAM(verbose.value(), "lenRO == RO && lenE1 == E1 && lenE2 == E2"); if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // ---------------------------------------------------------- // go to kspace // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft3c(recon_res_->data_, kspace_buf_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft2c(recon_res_->data_, kspace_buf_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(kspace_buf_, debug_folder_full_path_ + "kspace_before_pf"); }*/ // ---------------------------------------------------------- // pf handling // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->perform_partial_fourier_handling() == GADGET_OK, GADGET_FAIL); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(pf_res_, debug_folder_full_path_ + "kspace_after_pf"); }*/ // ---------------------------------------------------------- // go back to image domain // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft3c(pf_res_, recon_res_->data_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft2c(pf_res_, recon_res_->data_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(recon_res_->data_, debug_folder_full_path_ + "data_after_pf"); }*/ GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); // get the encoding FOV and recon FOV encoding_FOV_.resize(NE); recon_FOV_.resize(NE); recon_size_.resize(NE); size_t e; for (e = 0; e < NE; e++) { encoding_FOV_[e].resize(3, 0); encoding_FOV_[e][0] = h.encoding[e].encodedSpace.fieldOfView_mm.x; encoding_FOV_[e][1] = h.encoding[e].encodedSpace.fieldOfView_mm.y; encoding_FOV_[e][2] = h.encoding[e].encodedSpace.fieldOfView_mm.z; recon_FOV_[e].resize(3, 0); recon_FOV_[e][0] = h.encoding[e].reconSpace.fieldOfView_mm.x; recon_FOV_[e][1] = h.encoding[e].reconSpace.fieldOfView_mm.y; recon_FOV_[e][2] = h.encoding[e].reconSpace.fieldOfView_mm.z; recon_size_[e].resize(3, 0); recon_size_[e][0] = h.encoding[e].reconSpace.matrixSize.x; recon_size_[e][1] = h.encoding[e].reconSpace.matrixSize.y; recon_size_[e][2] = h.encoding[e].reconSpace.matrixSize.z; GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - encoding FOV : [" << encoding_FOV_[e][0] << " " << encoding_FOV_[e][1] << " " << encoding_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon FOV : [" << recon_FOV_[e][0] << " " << recon_FOV_[e][1] << " " << recon_FOV_[e][2] << " ]"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding space : " << e << " - recon size : [" << recon_size_[e][0] << " " << recon_size_[e][1] << " " << recon_size_[e][2] << " ]"); } // --------------------------------------------------------------------------------------------------------- // generate the destination folder /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int GenericReconCartesianReferencePrepGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); ref_prepared_.resize(NE, false); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << p_imaging.accelerationFactor.kspace_encoding_step_1); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << p_imaging.accelerationFactor.kspace_encoding_step_2); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- return GADGET_OK; }
int GenericReconEigenChannelGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); KLT_.resize(NE); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
bool findCalibMode(ISMRMRD::IsmrmrdHeader& h, Gadgetron::ISMRMRDCALIBMODE& CalibMode, ISMRMRDDIM& InterleaveDim, double& acceFactorE1, double& acceFactorE2, bool verbose) { try { if (!h.encoding[0].parallelImaging) { GERROR_STREAM("Parallel Imaging section not found in header"); return false; } ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1 = (double)(p_imaging.accelerationFactor.kspace_encoding_step_1); acceFactorE2 = (double)(p_imaging.accelerationFactor.kspace_encoding_step_2); GDEBUG_CONDITION_STREAM(verbose, "acceFactorE1 is " << acceFactorE1); GDEBUG_CONDITION_STREAM(verbose, "acceFactorE2 is " << acceFactorE2); if ( !p_imaging.calibrationMode.is_present() ) { GERROR_STREAM("Parallel calibration mode not found in header"); return false; } std::string calib = *p_imaging.calibrationMode; if ( calib.compare("interleaved") == 0 ) { CalibMode = Gadgetron::ISMRMRD_interleaved; GDEBUG_CONDITION_STREAM(verbose, "Calibration mode is interleaved"); if ( p_imaging.interleavingDimension ) { if ( p_imaging.interleavingDimension->compare("phase") == 0 ) { InterleaveDim = Gadgetron::DIM_Phase; } else if ( p_imaging.interleavingDimension->compare("repetition") == 0 ) { InterleaveDim = Gadgetron::DIM_Repetition; } else if ( p_imaging.interleavingDimension->compare("average") == 0 ) { InterleaveDim = Gadgetron::DIM_Average; } else if ( p_imaging.interleavingDimension->compare("contrast") == 0 ) { InterleaveDim = Gadgetron::DIM_Contrast; } else if ( p_imaging.interleavingDimension->compare("other") == 0 ) { InterleaveDim = Gadgetron::DIM_other1; } else { GERROR_STREAM("Unknown interleaving dimension. Bailing out"); return false; } } } else if ( calib.compare("embedded") == 0 ) { CalibMode = Gadgetron::ISMRMRD_embedded; GDEBUG_CONDITION_STREAM(verbose, "Calibration mode is embedded"); } else if ( calib.compare("separate") == 0 ) { CalibMode = Gadgetron::ISMRMRD_separate; GDEBUG_CONDITION_STREAM(verbose, "Calibration mode is separate"); } else if ( calib.compare("external") == 0 ) { CalibMode = Gadgetron::ISMRMRD_external; } else if ( (calib.compare("other") == 0) && acceFactorE1==1 && acceFactorE2==1 ) { CalibMode = Gadgetron::ISMRMRD_noacceleration; acceFactorE1=1; } else if ( (calib.compare("other") == 0) && (acceFactorE1>1 || acceFactorE2>1) ) { CalibMode = Gadgetron::ISMRMRD_interleaved; acceFactorE1=2; InterleaveDim = Gadgetron::DIM_Phase; } else { GERROR_STREAM("Failed to process parallel imaging calibration mode"); return false; } } catch(...) { GERROR_STREAM("Error happened in findCalibMode(...) ... "); return false; } return true; }
int MultiChannelCartesianGrappaReconGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); meas_max_idx_.resize(NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); calib_mode_.resize(NE, ISMRMRD_noacceleration); recon_obj_.resize(NE); size_t e; for (e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); meas_max_idx_[e].kspace_encode_step_1 = (uint16_t)e_space.matrixSize.y - 1; meas_max_idx_[e].set = (e_limits.set && (e_limits.set->maximum > 0)) ? e_limits.set->maximum : 0; meas_max_idx_[e].phase = (e_limits.phase && (e_limits.phase->maximum > 0)) ? e_limits.phase->maximum : 0; meas_max_idx_[e].kspace_encode_step_2 = (uint16_t)e_space.matrixSize.z - 1; meas_max_idx_[e].contrast = (e_limits.contrast && (e_limits.contrast->maximum > 0)) ? e_limits.contrast->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].repetition = e_limits.repetition ? e_limits.repetition->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].average = e_limits.average ? e_limits.average->maximum : 0; meas_max_idx_[e].segment = 0; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (acceFactorE1_[e] > 1 || acceFactorE2_[e] > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int MultiChannelCartesianGrappaReconGadget::send_out_image_array(IsmrmrdReconBit& recon_bit, IsmrmrdImageArray& res, size_t encoding, int series_num, const std::string& data_role) { try { size_t RO = res.data_.get_size(0); size_t E1 = res.data_.get_size(1); size_t E2 = res.data_.get_size(2); size_t CHA = res.data_.get_size(3); size_t N = res.data_.get_size(4); size_t S = res.data_.get_size(5); size_t SLC = res.data_.get_size(6); GDEBUG_CONDITION_STREAM(true, "sending out image array, acquisition boundary [RO E1 E2 CHA N S SLC] = [" << RO << " " << E1 << " " << E2 << " " << CHA << " " << N << " " << S << " " << SLC << "] "); // compute image numbers and fill the image meta size_t n, s, slc; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (n = 0; n < N; n++) { ISMRMRD::ImageHeader header = res.headers_(n, s, slc); if (header.measurement_uid == 0) continue; res.headers_(n, s, slc).image_index = (uint16_t)this->compute_image_number(res.headers_(n, s, slc), encoding, CHA, 0, E2); res.headers_(n, s, slc).image_series_index = series_num; size_t offset = n + s*N + slc*N*S; res.meta_[offset].set(GADGETRON_IMAGENUMBER, (long)res.headers_(n, s, slc).image_index); res.meta_[offset].set(GADGETRON_IMAGEPROCESSINGHISTORY, "GT"); if (data_role == GADGETRON_IMAGE_REGULAR) { res.headers_(n, s, slc).image_type = ISMRMRD::ISMRMRD_IMTYPE_MAGNITUDE; res.meta_[offset].append(GADGETRON_IMAGECOMMENT, "GT"); res.meta_[offset].append(GADGETRON_SEQUENCEDESCRIPTION, "_GT"); res.meta_[offset].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_REGULAR); } else if (data_role == GADGETRON_IMAGE_GFACTOR) { res.headers_(n, s, slc).image_type = ISMRMRD::ISMRMRD_IMTYPE_MAGNITUDE; res.meta_[offset].append(GADGETRON_IMAGECOMMENT, GADGETRON_IMAGE_GFACTOR); res.meta_[offset].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_GFACTOR); res.meta_[offset].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_GFACTOR); // set the skip processing flag, so gfactor map will not be processed during e.g. partial fourier handling or kspace filter gadgets res.meta_[offset].set(GADGETRON_SKIP_PROCESSING_AFTER_RECON, (long)1); // set the flag to use dedicated scaling factor res.meta_[offset].set(GADGETRON_USE_DEDICATED_SCALING_FACTOR, (long)1); } if (verbose.value()) { for (size_t cha = 0; cha < CHA; cha++) { GDEBUG_STREAM("sending out " << data_role << " image [CHA SLC CON PHS REP SET AVE] = [" << cha << " "<< res.headers_(n, s, slc).slice << " " << res.headers_(n, s, slc).contrast << " "<< res.headers_(n, s, slc).phase << " " << res.headers_(n, s, slc).repetition << " " << res.headers_(n, s, slc).set << " " << res.headers_(n, s, slc).average << " " << "] "<< " -- Image number -- " << res.headers_(n, s, slc).image_index); } } } } } // send out the images Gadgetron::GadgetContainerMessage<IsmrmrdImageArray>* cm1 = new Gadgetron::GadgetContainerMessage<IsmrmrdImageArray>(); *(cm1->getObjectPtr()) = res; if (this->next()->putq(cm1) < 0) { GERROR_STREAM("Put image array to Q failed ... "); return GADGET_FAIL; } } catch (...) { GERROR_STREAM("Errors in MultiChannelCartesianGrappaReconGadget::send_out_image_array(...) ... "); return GADGET_FAIL; } return GADGET_OK; }
int MultiChannelCartesianGrappaReconGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdReconData >* m1) { process_called_times_++; IsmrmrdReconData* recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } // for every encoding space for (size_t e = 0; e < recon_bit_->rbit_.size(); e++) { std::stringstream os; os << "_encoding_" << e; GDEBUG_CONDITION_STREAM(verbose.value(), "Calling " << process_called_times_ << " , encoding space : " << e); GDEBUG_CONDITION_STREAM(verbose.value(), "======================================================================"); // --------------------------------------------------------------- if (recon_bit_->rbit_[e].ref_) { // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_ and recon_obj_[e].ref_coil_map_ are set this->make_ref_coil_map(*recon_bit_->rbit_[e].ref_,*recon_bit_->rbit_[e].data_.data_.get_dimensions(), recon_obj_[e], e); // after this step, coil map is computed and stored in recon_obj_[e].coil_map_ if(!flatmap.value()) this->perform_coil_map_estimation(recon_bit_->rbit_[e], recon_obj_[e], e); else { recon_obj_[e].coil_map_=recon_obj_[e].ref_coil_map_; recon_obj_[e].coil_map_.fill(1);//create(*recon_bit_->rbit_[e].data_.data_.get_dimensions()); } // after this step, recon_obj_[e].kernel_, recon_obj_[e].kernelIm_, recon_obj_[e].unmixing_coeff_ are filled // gfactor is computed too this->perform_calib(recon_bit_->rbit_[e], recon_obj_[e], e); // --------------------------------------------------------------- recon_bit_->rbit_[e].ref_ = boost::none; } if (recon_bit_->rbit_[e].data_.data_.get_number_of_elements() > 0) { // --------------------------------------------------------------- this->perform_unwrapping(recon_bit_->rbit_[e], recon_obj_[e], e); // --------------------------------------------------------------- this->compute_image_header(recon_bit_->rbit_[e], recon_obj_[e], e); // --------------------------------------------------------------- this->send_out_image_array(recon_bit_->rbit_[e], recon_obj_[e].recon_res_, e, image_series.value() + ((int)e + 1), GADGETRON_IMAGE_REGULAR); } recon_obj_[e].recon_res_.data_.clear(); recon_obj_[e].gfactor_.clear(); recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); } m1->release(); return GADGET_OK; }
int GenericReconEigenChannelGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdReconData >* m1) { if (perform_timing.value()) { gt_timer_.start("GenericReconEigenChannelGadget::process"); } process_called_times_++; IsmrmrdReconData* recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } // for every encoding space, prepare the recon_bit_->rbit_[e].ref_ size_t e, n, s, slc; for (e = 0; e < recon_bit_->rbit_.size(); e++) { auto & rbit = recon_bit_->rbit_[e]; std::stringstream os; os << "_encoding_" << e; hoNDArray< std::complex<float> >& data = recon_bit_->rbit_[e].data_.data_; size_t RO = data.get_size(0); size_t E1 = data.get_size(1); size_t E2 = data.get_size(2); size_t CHA = data.get_size(3); size_t N = data.get_size(4); size_t S = data.get_size(5); size_t SLC = data.get_size(6); GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconEigenChannelGadget - incoming data array : [RO E1 E2 CHA N S SLC] - [" << RO << " " << E1 << " " << E2 << " " << CHA << " " << N << " " << S << " " << SLC << "]"); // whether it is needed to update coefficients bool recompute_coeff = false; if ( (KLT_[e].size()!=SLC) || update_eigen_channel_coefficients.value() ) { recompute_coeff = true; } else { if(KLT_[e].size() == SLC) { for (slc = 0; slc < SLC; slc++) { if (KLT_[e][slc].size() != S) { recompute_coeff = true; break; } else { for (s = 0; s < S; s++) { if (KLT_[e][slc][s].size() != N) { recompute_coeff = true; break; } } } } } } if(recompute_coeff) { bool average_N = average_all_ref_N.value(); bool average_S = average_all_ref_S.value(); if(rbit.ref_) { // use ref to compute coefficients Gadgetron::compute_eigen_channel_coefficients(rbit.ref_->data_, average_N, average_S, (calib_mode_[e] == Gadgetron::ISMRMRD_interleaved), N, S, upstream_coil_compression_thres.value(), upstream_coil_compression_num_modesKept.value(), KLT_[e]); } else { // use data to compute coefficients Gadgetron::compute_eigen_channel_coefficients(rbit.data_.data_, average_N, average_S, (calib_mode_[e] == Gadgetron::ISMRMRD_interleaved), N, S, upstream_coil_compression_thres.value(), upstream_coil_compression_num_modesKept.value(), KLT_[e]); } if (verbose.value()) { hoNDArray< std::complex<float> > E; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (n = 0; n < N; n++) { KLT_[e][slc][s][n].eigen_value(E); GDEBUG_STREAM("Number of modes kept: " << KLT_[e][slc][s][n].output_length() << "; Eigen value, slc - " << slc << ", S - " << s << ", N - " << n << " : ["); for (size_t c = 0; c < E.get_size(0); c++) { GDEBUG_STREAM(" " << E(c)); } GDEBUG_STREAM("]"); } } } } } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.data_.data_, debug_folder_full_path_ + "data_before_KLT" + os.str()); }*/ // apply KL coefficients Gadgetron::apply_eigen_channel_coefficients(KLT_[e], rbit.data_.data_); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.data_.data_, debug_folder_full_path_ + "data_after_KLT" + os.str()); }*/ if (rbit.ref_) { /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.ref_->data_, debug_folder_full_path_ + "ref_before_KLT" + os.str()); }*/ Gadgetron::apply_eigen_channel_coefficients(KLT_[e], rbit.ref_->data_); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.ref_->data_, debug_folder_full_path_ + "ref_after_KLT" + os.str()); }*/ } } if (perform_timing.value()) { gt_timer_.stop(); } if (this->next()->putq(m1) < 0) { GERROR_STREAM("Put IsmrmrdReconData to Q failed ... "); return GADGET_FAIL; } return GADGET_OK; }
void BucketToBufferGadget::allocateDataArrays(IsmrmrdDataBuffered & dataBuffer, ISMRMRD::AcquisitionHeader & acqhdr, ISMRMRD::Encoding encoding, IsmrmrdAcquisitionBucketStats & stats, bool forref) { if (dataBuffer.data_.get_number_of_elements() == 0) { //Allocate the reference data array //7D, fixed order [E0, E1, E2, CHA, N, S, LOC] //11D, fixed order [E0, E1, E2, CHA, SLC, PHS, CON, REP, SET, SEG, AVE] uint16_t NE0; if ( ((encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN)) || (encoding.trajectory == ISMRMRD::TrajectoryType::EPI) ) { // if seperate or external calibration mode, using the acq length for NE0 if (encoding.parallelImaging) { NE0 = acqhdr.number_of_samples; } else { NE0 = acqhdr.number_of_samples - acqhdr.discard_pre - acqhdr.discard_post; } } else { NE0 = acqhdr.number_of_samples - acqhdr.discard_pre - acqhdr.discard_post; } uint16_t NE1; if ( ((encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN)) || (encoding.trajectory == ISMRMRD::TrajectoryType::EPI) ) { if (encoding.parallelImaging) { if (forref && (encoding.parallelImaging.get().calibrationMode.get() == "separate" || encoding.parallelImaging.get().calibrationMode.get() == "external")) { NE1 = *stats.kspace_encode_step_1.rbegin() - *stats.kspace_encode_step_1.begin() + 1; } else { NE1 = encoding.encodedSpace.matrixSize.y; } } else { if (encoding.encodingLimits.kspace_encoding_step_1.is_present()) { NE1 = encoding.encodingLimits.kspace_encoding_step_1->maximum - encoding.encodingLimits.kspace_encoding_step_1->minimum + 1; } else { NE1 = encoding.encodedSpace.matrixSize.y; } } } else { if (encoding.encodingLimits.kspace_encoding_step_1.is_present()) { NE1 = encoding.encodingLimits.kspace_encoding_step_1->maximum - encoding.encodingLimits.kspace_encoding_step_1->minimum + 1; } else { NE1 = *stats.kspace_encode_step_1.rbegin() - *stats.kspace_encode_step_1.begin() + 1; } } uint16_t NE2; if ( ((encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN)) || (encoding.trajectory == ISMRMRD::TrajectoryType::EPI) ) { if (encoding.parallelImaging) { if (forref && (encoding.parallelImaging.get().calibrationMode.get() == "separate" || encoding.parallelImaging.get().calibrationMode.get() == "external")) { NE2 = encoding.encodingLimits.kspace_encoding_step_2->maximum - encoding.encodingLimits.kspace_encoding_step_2->minimum + 1; } else { NE2 = encoding.encodedSpace.matrixSize.z; } } else { if (encoding.encodingLimits.kspace_encoding_step_2.is_present()) { NE2 = encoding.encodingLimits.kspace_encoding_step_2->maximum - encoding.encodingLimits.kspace_encoding_step_2->minimum + 1; } else { NE2 = encoding.encodedSpace.matrixSize.z; } } } else { if (encoding.encodingLimits.kspace_encoding_step_2.is_present()) { NE2 = encoding.encodingLimits.kspace_encoding_step_2->maximum - encoding.encodingLimits.kspace_encoding_step_2->minimum + 1; } else { NE2 = *stats.kspace_encode_step_2.rbegin() - *stats.kspace_encode_step_2.begin() + 1; } } uint16_t NCHA = acqhdr.active_channels; uint16_t NLOC; if (split_slices_) { NLOC = 1; } else { if (encoding.encodingLimits.slice.is_present()) { NLOC = encoding.encodingLimits.slice->maximum - encoding.encodingLimits.slice->minimum + 1; } else { NLOC = 1; } // if the AcquisitionAccumulateTriggerGadget sort by SLC, then the stats should be used to determine NLOC size_t NLOC_received = *stats.slice.rbegin() - *stats.slice.begin() + 1; if (NLOC_received < NLOC) { NLOC = NLOC_received; } } uint16_t NN; switch (N_) { case PHASE: NN = *stats.phase.rbegin() - *stats.phase.begin() + 1; break; case CONTRAST: NN = *stats.contrast.rbegin() - *stats.contrast.begin() + 1; break; case REPETITION: NN = *stats.repetition.rbegin() - *stats.repetition.begin() + 1; break; case SET: NN = *stats.set.rbegin() - *stats.set.begin() + 1; break; case SEGMENT: NN = *stats.segment.rbegin() - *stats.segment.begin() + 1; break; case AVERAGE: NN = *stats.average.rbegin() - *stats.average.begin() + 1; break; case SLICE: NN = *stats.slice.rbegin() - *stats.slice.begin() + 1; break; default: NN = 1; } uint16_t NS; switch (S_) { case PHASE: NS = *stats.phase.rbegin() - *stats.phase.begin() + 1; break; case CONTRAST: NS = *stats.contrast.rbegin() - *stats.contrast.begin() + 1; break; case REPETITION: NS = *stats.repetition.rbegin() - *stats.repetition.begin() + 1; break; case SET: NS = *stats.set.rbegin() - *stats.set.begin() + 1; break; case SEGMENT: NS = *stats.segment.rbegin() - *stats.segment.begin() + 1; break; case AVERAGE: NS = *stats.average.rbegin() - *stats.average.begin() + 1; break; case SLICE: NS = *stats.slice.rbegin() - *stats.slice.begin() + 1; break; default: NS = 1; } GDEBUG_CONDITION_STREAM(verbose.value(), "Data dimensions [RO E1 E2 CHA N S SLC] : [" << NE0 << " " << NE1 << " " << NE2 << " " << NCHA << " " << NN << " " << NS << " " << NLOC <<"]"); //Allocate the array for the data dataBuffer.data_.create(NE0, NE1, NE2, NCHA, NN, NS, NLOC); clear(&dataBuffer.data_); //Allocate the array for the headers dataBuffer.headers_.create(NE1, NE2, NN, NS, NLOC); //Allocate the array for the trajectories uint16_t TRAJDIM = acqhdr.trajectory_dimensions; if (TRAJDIM > 0) { dataBuffer.trajectory_ = hoNDArray<float>(TRAJDIM, NE0,NE1,NE2, NN, NS, NLOC); clear(dataBuffer.trajectory_.get_ptr()); } //boost::shared_ptr< std::vector<size_t> > dims = dataBuffer.data_.get_dimensions(); //GDEBUG_STREAM("NDArray dims: "); //for( std::vector<size_t>::const_iterator i = dims->begin(); i != dims->end(); ++i) { // GDEBUG_STREAM(*i << ' '); //} //GDEBUG_STREAM(std::endl); } }
void GenericReconCartesianNonLinearSpirit2DTGadget::perform_unwrapping(IsmrmrdReconBit& recon_bit, ReconObjType& recon_obj, size_t e) { try { size_t RO = recon_bit.data_.data_.get_size(0); size_t E1 = recon_bit.data_.data_.get_size(1); size_t E2 = recon_bit.data_.data_.get_size(2); size_t dstCHA = recon_bit.data_.data_.get_size(3); size_t N = recon_bit.data_.data_.get_size(4); size_t S = recon_bit.data_.data_.get_size(5); size_t SLC = recon_bit.data_.data_.get_size(6); hoNDArray< std::complex<float> >& src = recon_obj.ref_calib_; size_t ref_RO = src.get_size(0); size_t ref_E1 = src.get_size(1); size_t ref_E2 = src.get_size(2); size_t srcCHA = src.get_size(3); size_t ref_N = src.get_size(4); size_t ref_S = src.get_size(5); size_t ref_SLC = src.get_size(6); size_t convkRO = recon_obj.kernel_.get_size(0); size_t convkE1 = recon_obj.kernel_.get_size(1); size_t convkE2 = recon_obj.kernel_.get_size(2); recon_obj.recon_res_.data_.create(RO, E1, E2, 1, N, S, SLC); Gadgetron::clear(recon_obj.recon_res_.data_); recon_obj.full_kspace_ = recon_bit.data_.data_; Gadgetron::clear(recon_obj.full_kspace_); std::stringstream os; os << "encoding_" << e; std::string suffix = os.str(); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit.data_.data_, debug_folder_full_path_ + "data_src_" + suffix); } // ------------------------------------------------------------------ // compute effective acceleration factor // ------------------------------------------------------------------ float effective_acce_factor(1), snr_scaling_ratio(1); this->compute_snr_scaling_factor(recon_bit, effective_acce_factor, snr_scaling_ratio); if (effective_acce_factor > 1) { Gadgetron::scal(snr_scaling_ratio, recon_bit.data_.data_); } Gadgetron::GadgetronTimer timer(false); // ------------------------------------------------------------------ // compute the reconstruction // ------------------------------------------------------------------ if(this->acceFactorE1_[e]<=1 && this->acceFactorE2_[e]<=1) { recon_obj.full_kspace_ = recon_bit.data_.data_; } else { hoNDArray< std::complex<float> >& kspace = recon_bit.data_.data_; hoNDArray< std::complex<float> >& res = recon_obj.full_kspace_; hoNDArray< std::complex<float> >& ref = recon_obj.ref_calib_; GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_parallel_imaging_lamda : " << this->spirit_parallel_imaging_lamda.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_image_reg_lamda : " << this->spirit_image_reg_lamda.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_data_fidelity_lamda : " << this->spirit_data_fidelity_lamda.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_nl_iter_max : " << this->spirit_nl_iter_max.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_nl_iter_thres : " << this->spirit_nl_iter_thres.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_name : " << this->spirit_reg_name.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_level : " << this->spirit_reg_level.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_keep_approx_coeff : " << this->spirit_reg_keep_approx_coeff.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_keep_redundant_dimension_coeff : " << this->spirit_reg_keep_redundant_dimension_coeff.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_proximity_across_cha : " << this->spirit_reg_proximity_across_cha.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_use_coil_sen_map : " << this->spirit_reg_use_coil_sen_map.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_RO_weighting_ratio : " << this->spirit_reg_RO_weighting_ratio.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_E1_weighting_ratio : " << this->spirit_reg_E1_weighting_ratio.value()); GDEBUG_CONDITION_STREAM(this->verbose.value(), "spirit_reg_N_weighting_ratio : " << this->spirit_reg_N_weighting_ratio.value()); size_t slc, s; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { std::stringstream os; os << "encoding_" << e << "_s" << s << "_slc" << slc; std::string suffix_2DT = os.str(); // ------------------------------ std::complex<float>* pKspace = &kspace(0, 0, 0, 0, 0, s, slc); hoNDArray< std::complex<float> > kspace2DT(RO, E1, E2, dstCHA, N, 1, 1, pKspace); // ------------------------------ long long kernelS = s; if (kernelS >= (long long)ref_S) kernelS = (long long)ref_S - 1; std::complex<float>* pKIm = &recon_obj.kernelIm2D_(0, 0, 0, 0, 0, kernelS, slc); hoNDArray< std::complex<float> > kIm2DT(RO, E1, srcCHA, dstCHA, ref_N, 1, 1, pKIm); // ------------------------------ std::complex<float>* pRef = &ref(0, 0, 0, 0, 0, kernelS, slc); hoNDArray< std::complex<float> > ref2DT(ref.get_size(0), ref.get_size(1), ref.get_size(2), dstCHA, ref_N, 1, 1, pRef); // ------------------------------ hoNDArray< std::complex<float> > coilMap2DT; if (recon_obj.coil_map_.get_size(6) == SLC) { size_t coil_S = recon_obj.coil_map_.get_size(5); std::complex<float>* pCoilMap = &recon_obj.coil_map_(0, 0, 0, 0, 0, ((s>=coil_S) ? coil_S-1 : s), slc); coilMap2DT.create(RO, E1, E2, dstCHA, ref_N, 1, 1, pCoilMap); } // ------------------------------ std::complex<float>* pRes = &res(0, 0, 0, 0, 0, s, slc); hoNDArray< std::complex<float> > res2DT(RO, E1, E2, dstCHA, N, 1, 1, pRes); // ------------------------------ if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(kspace2DT, debug_folder_full_path_ + "kspace2DT_nl_spirit_" + suffix_2DT); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(kIm2DT, debug_folder_full_path_ + "kIm2DT_nl_spirit_" + suffix_2DT); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(ref2DT, debug_folder_full_path_ + "ref2DT_nl_spirit_" + suffix_2DT); } // ------------------------------ std::string timing_str = "SPIRIT, Non-linear unwrapping, 2DT_" + suffix_2DT; if (this->perform_timing.value()) timer.start(timing_str.c_str()); this->perform_nonlinear_spirit_unwrapping(kspace2DT, kIm2DT, ref2DT, coilMap2DT, res2DT, e); if (this->perform_timing.value()) timer.stop(); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "res_nl_spirit_2DT_" + suffix_2DT); } } } } // --------------------------------------------------------------------- // compute coil combined images // --------------------------------------------------------------------- if (this->perform_timing.value()) timer.start("SPIRIT Non linear, coil combination ... "); this->perform_spirit_coil_combine(recon_obj); if (this->perform_timing.value()) timer.stop(); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_obj.recon_res_.data_, debug_folder_full_path_ + "unwrappedIm_" + suffix); } } catch (...) { GADGET_THROW("Errors happened in GenericReconCartesianNonLinearSpirit2DTGadget::perform_unwrapping(...) ... "); } }
int GenericReconCartesianFFTGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdReconData >* m1) { if (perform_timing.value()) { gt_timer_local_.start("GenericReconCartesianFFTGadget::process"); } process_called_times_++; IsmrmrdReconData* recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } // for every encoding space for (size_t e = 0; e < recon_bit_->rbit_.size(); e++) { std::stringstream os; os << "_encoding_" << e; GDEBUG_CONDITION_STREAM(verbose.value(), "Calling " << process_called_times_ << " , encoding space : " << e); GDEBUG_CONDITION_STREAM(verbose.value(), "======================================================================"); // --------------------------------------------------------------- // export incoming data if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_traj" + os.str()); } } // --------------------------------------------------------------- if (recon_bit_->rbit_[e].ref_) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].ref_->data_, debug_folder_full_path_ + "ref" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].ref_->trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].ref_->trajectory_), debug_folder_full_path_ + "ref_traj" + os.str()); } } // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_ and recon_obj_[e].ref_coil_map_ are set if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianFFTGadget::make_ref_coil_map"); } this->make_ref_coil_map(*recon_bit_->rbit_[e].ref_,*recon_bit_->rbit_[e].data_.data_.get_dimensions(), recon_obj_[e].ref_calib_, recon_obj_[e].ref_coil_map_, e); if (perform_timing.value()) { gt_timer_.stop(); } // ---------------------------------------------------------- // export prepared ref for calibration and coil map if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_calib_, debug_folder_full_path_ + "ref_calib" + os.str()); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_coil_map_, debug_folder_full_path_ + "ref_coil_map" + os.str()); } // --------------------------------------------------------------- recon_bit_->rbit_[e].ref_ = boost::none; } if (recon_bit_->rbit_[e].data_.data_.get_number_of_elements() > 0) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data_before_unwrapping" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].data_.trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_before_unwrapping_traj" + os.str()); } } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianFFTGadget::perform_fft_combine"); } this->perform_fft_combine(recon_bit_->rbit_[e], recon_obj_[e], e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianFFTGadget::compute_image_header"); } this->compute_image_header(recon_bit_->rbit_[e], recon_obj_[e].recon_res_, e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].recon_res_.data_, debug_folder_full_path_ + "recon_res" + os.str()); } if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianFFTGadget::send_out_image_array"); } this->send_out_image_array(recon_bit_->rbit_[e], recon_obj_[e].recon_res_, e, image_series.value() + ((int)e + 1), GADGETRON_IMAGE_REGULAR); if (perform_timing.value()) { gt_timer_.stop(); } } recon_obj_[e].recon_res_.data_.clear(); recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); } m1->release(); if (perform_timing.value()) { gt_timer_local_.stop(); } return GADGET_OK; }