void GadgetMessageImageExt::dump() { GDEBUG_STREAM("GadgetMessageImageExt" << std::endl); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); //dumpInfo(); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); }
int GenericReconCartesianNonLinearSpirit2DTGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } // ------------------------------------------------- // check the parameters if(this->spirit_nl_iter_max.value()==0) { this->spirit_nl_iter_max.value(15); GDEBUG_STREAM("spirit_iter_max: " << this->spirit_nl_iter_max.value()); } if (this->spirit_nl_iter_thres.value()<FLT_EPSILON) { this->spirit_nl_iter_thres.value(0.004); GDEBUG_STREAM("spirit_nl_iter_thres: " << this->spirit_nl_iter_thres.value()); } if (this->spirit_image_reg_lamda.value() < FLT_EPSILON) { if(this->spirit_reg_proximity_across_cha.value()) { this->spirit_image_reg_lamda.value(0.0002); } else { this->spirit_image_reg_lamda.value(0.00005); } GDEBUG_STREAM("spirit_image_reg_lamda: " << this->spirit_image_reg_lamda.value()); } if (this->spirit_reg_N_weighting_ratio.value() < FLT_EPSILON) { if(acceFactorE1_[0]<=5) { this->spirit_reg_N_weighting_ratio.value(10.0); } else { this->spirit_reg_N_weighting_ratio.value(20.0); } GDEBUG_STREAM("spirit_reg_N_weighting_ratio: " << this->spirit_reg_N_weighting_ratio.value()); } return GADGET_OK; }
int CmrParametricT2MappingGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } GDEBUG_STREAM("Read prep times from from protocol : " << this->prep_times_.size() << " [ "); // set num_T2prep_ to be number of SET this->prep_times_.resize(this->meas_max_idx_.set + 1); if (h.userParameters) { size_t i = 0; if (h.userParameters->userParameterDouble.size() > 0) { std::vector<ISMRMRD::UserParameterDouble>::const_iterator iter = h.userParameters->userParameterDouble.begin(); for (; iter != h.userParameters->userParameterDouble.end(); iter++) { std::string usrParaName = iter->name; double usrParaValue = iter->value; std::stringstream str; str << "T2PrepDuration_" << i; if (usrParaName == str.str() && i < this->prep_times_.size()) { this->prep_times_[i] = (float)usrParaValue; GDEBUG_STREAM("CmrParametricT2MappingGadget, find T2 prep time : " << i << " - " << this->prep_times_[i]); } i++; } } } // ------------------------------------------------- return GADGET_OK; }
int GenericReconFieldOfViewAdjustmentGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { if (perform_timing.value()) { gt_timer_.start("GenericReconFieldOfViewAdjustmentGadget::process"); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconFieldOfViewAdjustmentGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_before_FOV_adjustment"); } // ---------------------------------------------------------- // FOV adjustment // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->adjust_FOV(*recon_res_) == GADGET_OK, GADGET_FAIL); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_res_->data_, debug_folder_full_path_ + "data_after_FOV_adjustment"); } // make sure the image header is consistent with data size_t N = recon_res_->headers_.get_number_of_elements(); for (size_t n = 0; n < N; n++) { recon_res_->headers_(n).matrix_size[0] = recon_res_->data_.get_size(0); recon_res_->headers_(n).matrix_size[1] = recon_res_->data_.get_size(1); recon_res_->headers_(n).matrix_size[2] = recon_res_->data_.get_size(2); } GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconFieldOfViewAdjustmentGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconFieldOfViewAdjustmentGadget::process, passing data on to next gadget"); return GADGET_FAIL; } if (perform_timing.value()) { gt_timer_.stop(); } return GADGET_OK; }
GadgetMessageImageArray::GadgetMessageImageArray(int aSize[10]) { try { unsigned int ii; for ( ii=0; ii<10; ii++ ) { matrix_size[ii] = aSize[ii]; } unsigned int len = 1; for ( ii=3; ii<10; ii++ ) { len *= matrix_size[ii]; } if ( len > 0 ) { imageArray_ = new GadgetMessageImageExt[len]; } kSpace_centre_col_no = matrix_size[0]/2; kSpace_centre_line_no = matrix_size[1]/2; kSpace_centre_partition_no = matrix_size[4]/2; kSpace_max_acquired_col_no = matrix_size[0]-1; kSpace_max_acquired_line_no = matrix_size[1]-1; kSpace_max_acquired_partition_no = matrix_size[4]-1; } catch(...) { GDEBUG_STREAM("Failed in allocate imageArray_" << std::endl); } }
int GenericReconCartesianGrappaGadget::close(unsigned long flags) { GDEBUG_CONDITION_STREAM(true, "GenericReconCartesianGrappaGadget - close(flags) : " << flags); if (BaseClass::close(flags) != GADGET_OK) return GADGET_FAIL; if (flags != 0) { size_t e; for (e = 0; e < recon_obj_.size(); e++) { GDEBUG_STREAM("Clean recon_obj_ for encoding space " << e); if (recon_obj_[e].recon_res_.data_.delete_data_on_destruct()) recon_obj_[e].recon_res_.data_.clear(); if (recon_obj_[e].recon_res_.headers_.delete_data_on_destruct()) recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); if (recon_obj_[e].gfactor_.delete_data_on_destruct()) recon_obj_[e].gfactor_.clear(); if (recon_obj_[e].ref_calib_.delete_data_on_destruct()) recon_obj_[e].ref_calib_.clear(); if (recon_obj_[e].ref_calib_dst_.delete_data_on_destruct()) recon_obj_[e].ref_calib_dst_.clear(); if (recon_obj_[e].ref_coil_map_.delete_data_on_destruct()) recon_obj_[e].ref_coil_map_.clear(); if (recon_obj_[e].kernel_.delete_data_on_destruct()) recon_obj_[e].kernel_.clear(); if (recon_obj_[e].kernelIm_.delete_data_on_destruct()) recon_obj_[e].kernelIm_.clear(); if (recon_obj_[e].unmixing_coeff_.delete_data_on_destruct()) recon_obj_[e].unmixing_coeff_.clear(); if (recon_obj_[e].coil_map_.delete_data_on_destruct()) recon_obj_[e].coil_map_.clear(); } } return GADGET_OK; }
void correct_time_stamp_with_fitting(hoNDArray<float>& time_stamp, size_t startE1, size_t endE1) { try { size_t E1 = time_stamp.get_size(0); size_t N = time_stamp.get_size(1); size_t rE1 = endE1 - startE1 + 1; size_t e1, n; size_t num_acq_read_outs = 0; for ( n=0; n<N; n++ ) { for ( e1=0; e1<E1; e1++ ) { if ( time_stamp(e1, n) > 0 ) { num_acq_read_outs++; } } } GDEBUG_STREAM(" Number of acquired lines : " << num_acq_read_outs); float a, b; // y = a + b*x { std::vector<float> x(num_acq_read_outs), y(num_acq_read_outs); size_t ind = 0; for ( n=0; n<N; n++ ) { for ( e1=startE1; e1<=endE1; e1++ ) { float acq_time = time_stamp(e1, n); if ( acq_time > 0 ) { x[ind] = (float)(e1-startE1 + n*rE1); y[ind] = acq_time; ind++; } } } Gadgetron::simple_line_fit(x, y, a, b); } for ( n=0; n<N; n++ ) { for ( e1=startE1; e1<=endE1; e1++ ) { float x_v = (float)(e1-startE1 + n*rE1); time_stamp(e1, n) = a + b*x_v; } } } catch(...) { GADGET_THROW("Exceptions happened in correct_time_stamp_with_fitting(...) ... "); } }
void GadgetMessageImageArray::extractMessageImageArrayForREP(int rep, GadgetMessageImageArray& imageArray) { if ( rep >= matrix_size[7] ) { GDEBUG_STREAM("extractMessageImageArrayForSLC error - rep >= matrix_size[7] " << std::endl); return; } int aSize[10]; unsigned int ii; for ( ii=0; ii<10; ii++ ) { aSize[ii] = matrix_size[ii]; } aSize[7] = 1; imageArray.resize(aSize); imageArray.kSpace_centre_col_no = kSpace_centre_col_no; imageArray.kSpace_centre_line_no = kSpace_centre_line_no; imageArray.kSpace_centre_partition_no = kSpace_centre_partition_no; imageArray.kSpace_max_acquired_col_no = kSpace_max_acquired_col_no; imageArray.kSpace_max_acquired_line_no = kSpace_max_acquired_line_no; imageArray.kSpace_max_acquired_partition_no = kSpace_max_acquired_partition_no; int par, eco, phs, slc, set, seg; int SLC = matrix_size[3]; int PAR = matrix_size[4]; int ECO = matrix_size[5]; int PHS = matrix_size[6]; int SET = matrix_size[8]; int SEG = matrix_size[9]; for ( seg=0; seg<SEG; seg++ ) { for ( set=0; set<SET; set++ ) { for ( slc=0; slc<SLC; slc++ ) { for ( phs=0; phs<PHS; phs++ ) { for ( eco=0; eco<ECO; eco++ ) { for ( par=0; par<PAR; par++ ) { int offset = this->get_offset(slc, par, eco, phs, rep, set, seg); int offsetREP = imageArray.get_offset(slc, par, eco, phs, 0, set, seg); imageArray.imageArray_[offsetREP] = imageArray_[offset]; } } } } } } }
fs::path get_gadgetron_home() { const char *home = std::getenv("GADGETRON_HOME"); if (home != nullptr) { return fs::path(home); } fs::path executable_path = get_executable_path(); GDEBUG_STREAM("Executable path: " << executable_path << std::endl); fs::path gadgetron_home = executable_path .parent_path() .parent_path(); GDEBUG_STREAM("Gadgetron home: " << gadgetron_home << std::endl); return gadgetron_home; }
int GenericReconPartialFourierHandlingGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(),h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); size_t e; for (e = 0; e < h.encoding.size(); e++) { if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header for encoding " << e); acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); } } return GADGET_OK; }
int PartialFourierAdjustROGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; deserialize(mb->rd_ptr(),h); if (h.encoding.size() != 1) { GDEBUG("Number of encoding spaces: %d\n", h.encoding.size()); GDEBUG("This partial fourier gadget only supports one encoding space\n"); return GADGET_FAIL; } ISMRMRD::EncodingSpaceType e_space = h.encoding[0].encodedSpace; maxRO_ = e_space.matrixSize.x; GDEBUG_STREAM("max RO : " << maxRO_); return GADGET_OK; }
int GenericReconPartialFourierHandlingGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdImageArray >* m1) { GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) starts ... "); process_called_times_++; IsmrmrdImageArray* recon_res_ = m1->getObjectPtr(); // print out recon info if (verbose.value()) { GDEBUG_STREAM("----> GenericReconPartialFourierHandlingGadget::process(...) has been called " << process_called_times_ << " times ..."); std::stringstream os; recon_res_->data_.print(os); GDEBUG_STREAM(os.str()); } // some images do not need partial fourier handling processing if (recon_res_->meta_[0].length(skip_processing_meta_field.value().c_str())>0) { if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing incoming image array on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // call the partial foureir size_t encoding = (size_t)recon_res_->meta_[0].as_long("encoding", 0); GADGET_CHECK_RETURN(encoding<num_encoding_spaces_, GADGET_FAIL); // perform SNR unit scaling SamplingLimit sampling_limits[3]; sampling_limits[0].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 0); sampling_limits[0].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 1); sampling_limits[0].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_RO", 2); sampling_limits[1].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 0); sampling_limits[1].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 1); sampling_limits[1].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E1", 2); sampling_limits[2].min_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 0); sampling_limits[2].center_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 1); sampling_limits[2].max_ = (uint16_t)recon_res_->meta_[0].as_long("sampling_limits_E2", 2); size_t RO = recon_res_->data_.get_size(0); size_t E1 = recon_res_->data_.get_size(1); size_t E2 = recon_res_->data_.get_size(2); size_t CHA = recon_res_->data_.get_size(3); size_t N = recon_res_->data_.get_size(4); size_t S = recon_res_->data_.get_size(5); size_t SLC = recon_res_->data_.get_size(6); // ---------------------------------------------------------- // pf kspace sampling range // ---------------------------------------------------------- // if image padding is performed, those dimension may not need partial fourier handling startRO_ = sampling_limits[0].min_; endRO_ = sampling_limits[0].max_; startE1_ = 0; endE1_ = E1 - 1; startE2_ = 0; endE2_ = E2 - 1; if (std::abs((double)(sampling_limits[1].max_ - E1 / 2) - (double)(E1 / 2 - sampling_limits[1].min_)) > acceFactorE1_[encoding]) { startE1_ = sampling_limits[1].min_; endE1_ = sampling_limits[1].max_; } if ((E2>1) && (std::abs((double)(sampling_limits[2].max_ - E2 / 2) - (double)(E2 / 2 - sampling_limits[2].min_)) > acceFactorE2_[encoding])) { startE2_ = sampling_limits[2].min_; endE2_ = sampling_limits[2].max_; } long lenRO = endRO_ - startRO_ + 1; long lenE1 = endE1_ - startE1_ + 1; long lenE2 = endE2_ - startE2_ + 1; if (lenRO == RO && lenE1 == E1 && lenE2 == E2) { GDEBUG_CONDITION_STREAM(verbose.value(), "lenRO == RO && lenE1 == E1 && lenE2 == E2"); if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; } // ---------------------------------------------------------- // go to kspace // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft3c(recon_res_->data_, kspace_buf_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->fft2c(recon_res_->data_, kspace_buf_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(kspace_buf_, debug_folder_full_path_ + "kspace_before_pf"); }*/ // ---------------------------------------------------------- // pf handling // ---------------------------------------------------------- GADGET_CHECK_RETURN(this->perform_partial_fourier_handling() == GADGET_OK, GADGET_FAIL); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(pf_res_, debug_folder_full_path_ + "kspace_after_pf"); }*/ // ---------------------------------------------------------- // go back to image domain // ---------------------------------------------------------- if (E2 > 1) { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft3c(pf_res_, recon_res_->data_); } else { Gadgetron::hoNDFFT<typename realType<T>::Type>::instance()->ifft2c(pf_res_, recon_res_->data_); } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(recon_res_->data_, debug_folder_full_path_ + "data_after_pf"); }*/ GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconPartialFourierHandlingGadget::process(...) ends ... "); // ---------------------------------------------------------- // send out results // ---------------------------------------------------------- if (this->next()->putq(m1) == -1) { GERROR("GenericReconPartialFourierHandlingGadget::process, passing data on to next gadget"); return GADGET_FAIL; } return GADGET_OK; }
void BucketToBufferGadget::fillSamplingDescription(SamplingDescription & sampling, ISMRMRD::Encoding & encoding, IsmrmrdAcquisitionBucketStats & stats, ISMRMRD::AcquisitionHeader& acqhdr, bool forref) { // For cartesian trajectories, assume that any oversampling has been removed. if (encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN) { sampling.encoded_FOV_[0] = encoding.reconSpace.fieldOfView_mm.x; sampling.encoded_matrix_[0] = encoding.reconSpace.matrixSize.x; } else { sampling.encoded_FOV_[0] = encoding.encodedSpace.fieldOfView_mm.x; sampling.encoded_matrix_[0] = encoding.encodedSpace.matrixSize.x; } sampling.encoded_FOV_[1] = encoding.encodedSpace.fieldOfView_mm.y; sampling.encoded_FOV_[2] = encoding.encodedSpace.fieldOfView_mm.z; sampling.encoded_matrix_[1] = encoding.encodedSpace.matrixSize.y; sampling.encoded_matrix_[2] = encoding.encodedSpace.matrixSize.z; sampling.recon_FOV_[0] = encoding.reconSpace.fieldOfView_mm.x; sampling.recon_FOV_[1] = encoding.reconSpace.fieldOfView_mm.y; sampling.recon_FOV_[2] = encoding.reconSpace.fieldOfView_mm.z; sampling.recon_matrix_[0] = encoding.reconSpace.matrixSize.x; sampling.recon_matrix_[1] = encoding.reconSpace.matrixSize.y; sampling.recon_matrix_[2] = encoding.reconSpace.matrixSize.z; // For cartesian trajectories, assume that any oversampling has been removed. if ( ((encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN)) || (encoding.trajectory == ISMRMRD::TrajectoryType::EPI) ) { sampling.sampling_limits_[0].min_ = acqhdr.discard_pre; sampling.sampling_limits_[0].max_ = acqhdr.number_of_samples - acqhdr.discard_post - 1; sampling.sampling_limits_[0].center_ = acqhdr.number_of_samples / 2; } else { sampling.sampling_limits_[0].min_ = 0; sampling.sampling_limits_[0].max_ = encoding.encodedSpace.matrixSize.x - 1; sampling.sampling_limits_[0].center_ = encoding.encodedSpace.matrixSize.x / 2; } // if the scan is cartesian if ( ( (encoding.trajectory == ISMRMRD::TrajectoryType::CARTESIAN) && (!forref || (forref && (encoding.parallelImaging.get().calibrationMode.get() == "embedded"))) ) || ( (encoding.trajectory == ISMRMRD::TrajectoryType::EPI) && !forref) ) { int16_t space_matrix_offset_E1 = 0; if (encoding.encodingLimits.kspace_encoding_step_1.is_present()) { space_matrix_offset_E1 = (int16_t)encoding.encodedSpace.matrixSize.y / 2 - (int16_t)encoding.encodingLimits.kspace_encoding_step_1->center; } int16_t space_matrix_offset_E2 = 0; if (encoding.encodingLimits.kspace_encoding_step_2.is_present() && encoding.encodedSpace.matrixSize.z > 1) { space_matrix_offset_E2 = (int16_t)encoding.encodedSpace.matrixSize.z / 2 - (int16_t)encoding.encodingLimits.kspace_encoding_step_2->center; } // E1 sampling.sampling_limits_[1].min_ = encoding.encodingLimits.kspace_encoding_step_1->minimum + space_matrix_offset_E1; sampling.sampling_limits_[1].max_ = encoding.encodingLimits.kspace_encoding_step_1->maximum + space_matrix_offset_E1; sampling.sampling_limits_[1].center_ = sampling.encoded_matrix_[1] / 2; GADGET_CHECK_THROW(sampling.sampling_limits_[1].min_ < encoding.encodedSpace.matrixSize.y); GADGET_CHECK_THROW(sampling.sampling_limits_[1].max_ >= sampling.sampling_limits_[1].min_); GADGET_CHECK_THROW(sampling.sampling_limits_[1].center_ >= sampling.sampling_limits_[1].min_); GADGET_CHECK_THROW(sampling.sampling_limits_[1].center_ <= sampling.sampling_limits_[1].max_); // E2 sampling.sampling_limits_[2].min_ = encoding.encodingLimits.kspace_encoding_step_2->minimum + space_matrix_offset_E2; sampling.sampling_limits_[2].max_ = encoding.encodingLimits.kspace_encoding_step_2->maximum + space_matrix_offset_E2; sampling.sampling_limits_[2].center_ = sampling.encoded_matrix_[2] / 2; GADGET_CHECK_THROW(sampling.sampling_limits_[2].min_ < encoding.encodedSpace.matrixSize.y); GADGET_CHECK_THROW(sampling.sampling_limits_[2].max_ >= sampling.sampling_limits_[2].min_); GADGET_CHECK_THROW(sampling.sampling_limits_[2].center_ >= sampling.sampling_limits_[2].min_); GADGET_CHECK_THROW(sampling.sampling_limits_[2].center_ <= sampling.sampling_limits_[2].max_); } else { sampling.sampling_limits_[1].min_ = encoding.encodingLimits.kspace_encoding_step_1->minimum; sampling.sampling_limits_[1].max_ = encoding.encodingLimits.kspace_encoding_step_1->maximum; sampling.sampling_limits_[1].center_ = encoding.encodingLimits.kspace_encoding_step_1->center; sampling.sampling_limits_[2].min_ = encoding.encodingLimits.kspace_encoding_step_2->minimum; sampling.sampling_limits_[2].max_ = encoding.encodingLimits.kspace_encoding_step_2->maximum; sampling.sampling_limits_[2].center_ = encoding.encodingLimits.kspace_encoding_step_2->center; } if (verbose.value()) { GDEBUG_STREAM("Encoding space : " << int(encoding.trajectory) << " - FOV : [ " << encoding.encodedSpace.fieldOfView_mm.x << " " << encoding.encodedSpace.fieldOfView_mm.y << " " << encoding.encodedSpace.fieldOfView_mm.z << " ] " << " - Matris size : [ " << encoding.encodedSpace.matrixSize.x << " " << encoding.encodedSpace.matrixSize.y << " " << encoding.encodedSpace.matrixSize.z << " ] "); GDEBUG_STREAM("Sampling limits : " << "- RO : [ " << sampling.sampling_limits_[0].min_ << " " << sampling.sampling_limits_[0].center_ << " " << sampling.sampling_limits_[0].max_ << " ] - E1 : [ " << sampling.sampling_limits_[1].min_ << " " << sampling.sampling_limits_[1].center_ << " " << sampling.sampling_limits_[1].max_ << " ] - E2 : [ " << sampling.sampling_limits_[2].min_ << " " << sampling.sampling_limits_[2].center_ << " " << sampling.sampling_limits_[2].max_ << " ]"); } }
int WhiteNoiseInjectorGadget::process(GadgetContainerMessage<ISMRMRD::AcquisitionHeader>* m1, GadgetContainerMessage< hoNDArray< std::complex<float> > >* m2) { bool is_noise = ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_NOISE_MEASUREMENT).isSet(m1->getObjectPtr()->flags); bool is_scc_correction = ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_SURFACECOILCORRECTIONSCAN_DATA).isSet(m1->getObjectPtr()->flags); bool is_ref = ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_PARALLEL_CALIBRATION).isSet(m1->getObjectPtr()->flags); bool is_ref_kspace = ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_PARALLEL_CALIBRATION_AND_IMAGING).isSet(m1->getObjectPtr()->flags); size_t channels = m1->getObjectPtr()->active_channels; size_t samples = m1->getObjectPtr()->number_of_samples; if (!is_noise && !is_scc_correction ) { bool add_noise = true; if ( is_ref && !is_ref_kspace && (is_seperate_||is_external_) ) { add_noise = add_noise_ref_; if ( !add_noise ) { GDEBUG_STREAM("WhiteNoiseInjectorGadget, noise is not added to the ref acquisitions ... "); } } if ( add_noise ) { if ( !noise_.dimensions_equal(m2->getObjectPtr()) ) { noise_.create(m2->getObjectPtr()->get_dimensions()); noise_fl_.create(m2->getObjectPtr()->get_dimensions()); } if ( !randn_->gen(noise_) ) { GERROR_STREAM("WhiteNoiseInjectorGadget, randn_->gen(noise_) failed ... "); return GADGET_FAIL; } if ( !noise_fl_.copyFrom(noise_) ) { GERROR_STREAM("WhiteNoiseInjectorGadget, noise_fl_.copyFrom(noise_) failed ... "); return GADGET_FAIL; } try { Gadgetron::add(*m2->getObjectPtr(), noise_fl_, *m2->getObjectPtr()); } catch(...) { GERROR_STREAM("WhiteNoiseInjectorGadget, Gadgetron::add(*m2->getObjectPtr(), noise_, *m2->getObjectPtr()) failed ... "); return GADGET_FAIL; } } } if (this->next()->putq(m1) == -1) { GERROR("WhiteNoiseInjectorGadget::process, passing data on to next gadget"); return -1; } return GADGET_OK; }
bool prepOpenMP() { try { GDEBUG_STREAM("--> OpenMP info <--"); GDEBUG_STREAM("--------------------------------------------------------"); int numOpenMPProcs = omp_get_num_procs(); GDEBUG_STREAM("GtPlusRecon, numOpenMPProcs : " << numOpenMPProcs); #ifndef WIN32 int maxOpenMPLevels = omp_get_max_active_levels(); GDEBUG_STREAM("GtPlusRecon, maxOpenMPLevels : " << maxOpenMPLevels); #endif // WIN32 int maxOpenMPThreads = omp_get_max_threads(); GDEBUG_STREAM("GtPlusRecon, maxOpenMPThreads : " << maxOpenMPThreads); if ( numOpenMPProcs != maxOpenMPThreads ) { GDEBUG_STREAM("GtPlusRecon, numOpenMPProcs != maxOpenMPThreads , hyperthreading must be disabled ... "); omp_set_num_threads(numOpenMPProcs); } // omp_set_nested(1); int allowOpenMPNested = omp_get_nested(); GDEBUG_STREAM("GtPlusRecon, allowOpenMPNested : " << allowOpenMPNested); #ifdef WIN32 GDEBUG_STREAM("----------------------------------"); GDEBUG_STREAM("GtPlus, set thread affinity ... "); /// lock the threads #pragma omp parallel default(shared) { int tid = omp_get_thread_num(); DWORD_PTR mask = (1 << tid); GDEBUG_STREAM("thread id : " << tid << " - mask : " << mask); SetThreadAffinityMask( GetCurrentThread(), mask ); } #endif // WIN32 GDEBUG_STREAM("--------------------------------------------------------"); } catch(...) { GERROR_STREAM("Errors in GtPlus prepOpenMP() ... "); return false; } return true; }
int DistributeGadget::process(ACE_Message_Block* m) { int node_index = this->node_index(m); if (single_package_mode.value()) { node_index = ++started_nodes_; } if (node_index < 0) { GERROR("Negative node index received"); return GADGET_FAIL; } //If we are not supposed to use this node for compute, add one to make sure we are not on node 0 //if (!use_this_node_for_compute.value()) { // node_index = node_index+1; //} // instead of sending down the stream, processing is done by making connections //if (node_index == 0) { //process locally // if (this->next()->putq(m) == -1) { // m->release(); // GERROR("DistributeGadget::process, passing data on to next gadget\n"); // return GADGET_FAIL; // } // return GADGET_OK; //} //At this point, the node index is positive, so we need to find a suitable connector. mtx_.acquire(); auto n = node_map_.find(node_index); mtx_.release(); GadgetronConnector* con = 0; if (n != node_map_.end()) { //We have a suitable connection already. con = n->second; } else { std::vector<GadgetronNodeInfo> nl; CloudBus::instance()->get_node_info(nl); GDEBUG("Number of network nodes found: %d\n", nl.size()); GadgetronNodeInfo me; me.address = "127.0.0.1";//We may have to update this me.port = CloudBus::instance()->port(); me.uuid = CloudBus::instance()->uuid(); me.active_reconstructions = CloudBus::instance()->active_reconstructions(); //This would give the current node the lowest possible priority if (!use_this_node_for_compute.value()) { me.active_reconstructions = UINT32_MAX; } for (auto it = nl.begin(); it != nl.end(); it++) { if (it->active_reconstructions < me.active_reconstructions) { me = *it; } //Is this a free node if (me.active_reconstructions == 0) break; } // first job, send to current node if required if (use_this_node_for_compute.value() && node_index==0) { size_t num_of_ip = local_address_.size(); for (auto it = nl.begin(); it != nl.end(); it++) { for (size_t ii=0; ii<num_of_ip; ii++) { if (it->address == local_address_[ii]) { me = *it; } } } GDEBUG_STREAM("Send first job to current node : " << me.address); } con = new DistributionConnector(this); GadgetronXML::GadgetStreamConfiguration cfg; try { deserialize(node_xml_config_.c_str(), cfg); } catch (const std::runtime_error& e) { GERROR("Failed to parse Node Gadget Stream Configuration: %s\n", e.what()); return GADGET_FAIL; } //Configuration of readers for (auto i = cfg.reader.begin(); i != cfg.reader.end(); ++i) { GadgetMessageReader* r = controller_->load_dll_component<GadgetMessageReader>(i->dll.c_str(), i->classname.c_str()); if (!r) { GERROR("Failed to load GadgetMessageReader from DLL\n"); return GADGET_FAIL; } con->register_reader(i->slot, r); } for (auto i = cfg.writer.begin(); i != cfg.writer.end(); ++i) { GadgetMessageWriter* w = controller_->load_dll_component<GadgetMessageWriter>(i->dll.c_str(), i->classname.c_str()); if (!w) { GERROR("Failed to load GadgetMessageWriter from DLL\n"); return GADGET_FAIL; } con->register_writer(i->slot, w); } char buffer[10]; sprintf(buffer,"%d",me.port); if (con->open(me.address,std::string(buffer)) != 0) { GERROR("Failed to open connection to node %s : %d\n", me.address.c_str(), me.port); return GADGET_FAIL; } if (con->send_gadgetron_configuration_script(node_xml_config_) != 0) { GERROR("Failed to send XML configuration to compute node\n"); return GADGET_FAIL; } if (con->send_gadgetron_parameters(node_parameters_) != 0) { GERROR("Failed to send XML parameters to compute node\n"); return GADGET_FAIL; } mtx_.acquire(); node_map_[node_index] = con; mtx_.release(); } if (!con) { //Zero pointer for the connection means that either a) connection creation failed or b) using local chain. //Either way, we will send it down the chain. if (!use_this_node_for_compute.value()) { GERROR("This node cannot be used for computing and no other node is available\n"); m->release(); return GADGET_FAIL; } if (this->next()->putq(m) == -1) { m->release(); GERROR("DistributeGadget::process, passing data on to next gadget\n"); return GADGET_FAIL; } else { return GADGET_OK; } } else { //Let's make sure that we did not send a close message to this connector already auto c = std::find(closed_connectors_.begin(),closed_connectors_.end(),con); if (c != closed_connectors_.end()) { //This is a bad situation, we need to bail out. m->release(); GERROR("The valid connection for incoming data has already been closed. Distribute Gadget is not configured properly for this type of data\n"); return GADGET_FAIL; } //If nodes receive their data sequentially (default), we should see if we should be closing the previos connection if (nodes_used_sequentially.value() && !single_package_mode.value()) { //Is this a new connection, if so, send previous one a close if (prev_connector_ && prev_connector_ != con) { GDEBUG("Sending close to previous connector, not expecting any more data for this one\n"); auto mc = new GadgetContainerMessage<GadgetMessageIdentifier>(); mc->getObjectPtr()->id = GADGET_MESSAGE_CLOSE; if (prev_connector_->putq(mc) == -1) { GERROR("Unable to put CLOSE package on queue of previous connection\n"); return -1; } closed_connectors_.push_back(prev_connector_); } } //Update previous connection prev_connector_ = con; //We have a valid connector auto m1 = new GadgetContainerMessage<GadgetMessageIdentifier>(); m1->getObjectPtr()->id = message_id(m); m1->cont(m); if (con->putq(m1) == -1) { GERROR("Unable to put package on connector queue\n"); m1->release(); return GADGET_FAIL; } if (single_package_mode.value()) { auto m2 = new GadgetContainerMessage<GadgetMessageIdentifier>(); m2->getObjectPtr()->id = GADGET_MESSAGE_CLOSE; if (con->putq(m2) == -1) { GERROR("Unable to put CLOSE package on queue\n"); return -1; } closed_connectors_.push_back(con); } } return 0; }
int GenericReconFieldOfViewAdjustmentGadget::adjust_FOV(IsmrmrdImageArray& recon_res) { try { size_t RO = recon_res.data_.get_size(0); size_t E1 = recon_res.data_.get_size(1); size_t E2 = recon_res.data_.get_size(2); double encodingFOV_RO = recon_res.meta_[0].as_double("encoding_FOV", 0); double encodingFOV_E1 = recon_res.meta_[0].as_double("encoding_FOV", 1); double encodingFOV_E2 = recon_res.meta_[0].as_double("encoding_FOV", 2); double reconFOV_RO = recon_res.meta_[0].as_double("recon_FOV", 0); double reconFOV_E1 = recon_res.meta_[0].as_double("recon_FOV", 1); double reconFOV_E2 = recon_res.meta_[0].as_double("recon_FOV", 2); long encoding = recon_res.meta_[0].as_long("encoding", 0); size_t reconSizeRO = recon_size_[encoding][0]; size_t reconSizeE1 = recon_size_[encoding][1]; size_t reconSizeE2 = recon_size_[encoding][2]; // if 2D reconstruction, no need to process along E2 if (E2 <= 1) { reconSizeE2 = E2; reconFOV_E2 = encodingFOV_E2; } // if encoded FOV are the same as recon FOV if ((std::abs(encodingFOV_RO / 2 - reconFOV_RO)<0.1) && (std::abs(encodingFOV_E1 - reconFOV_E1)<0.1) && (std::abs(encodingFOV_E2 - reconFOV_E2)<0.1)) { if (RO <= reconSizeRO && E1 <= reconSizeE1 && E2 <= reconSizeE2) { Gadgetron::zero_pad_resize(recon_res.data_, reconSizeRO, reconSizeE1, reconSizeE2, res_); } else if (RO >= reconSizeRO && E1 >= reconSizeE1 && E2 >= reconSizeE2) { this->perform_fft(E2, recon_res.data_, kspace_buf_); Gadgetron::crop(reconSizeRO, reconSizeE1, reconSizeE2, &kspace_buf_, &res_); this->perform_ifft(E2, res_, recon_res.data_); } else { GDEBUG_STREAM("Inconsistent image size [" << RO << " " << E1 << " " << E2 << "]; recon image size [" << reconSizeRO << " " << reconSizeE1 << " " << reconSizeE2 << "] ... "); return GADGET_FAIL; } } else if ((encodingFOV_E1 >= reconFOV_E1) && (encodingFOV_E2 >= reconFOV_E2)) { size_t encodingE1 = reconSizeE1; if (encodingFOV_E1 > reconFOV_E1) { double spacingE1 = reconFOV_E1 / reconSizeE1; encodingE1 = (size_t)std::floor(encodingFOV_E1 / spacingE1 + 0.5); } size_t encodingE2 = reconSizeE2; if (encodingFOV_E2 > reconFOV_E2) { double spacingE2 = reconFOV_E2 / reconSizeE2; encodingE2 = (size_t)std::floor(encodingFOV_E2 / spacingE2 + 0.5); } hoNDArray< std::complex<float> >* pSrc = &recon_res.data_; hoNDArray< std::complex<float> >* pDst = &res_; hoNDArray< std::complex<float> >* pTmp; // adjust E1 if (encodingE1 >= E1 + 1) { Gadgetron::zero_pad_resize(*pSrc, RO, encodingE1, E2, *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } else if (encodingE1 <= E1 - 1) { this->perform_fft(E2, *pSrc, kspace_buf_); Gadgetron::crop(RO, encodingE1, E2, &kspace_buf_, pDst); this->perform_ifft(E2, *pDst, *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } // adjust E2 if (encodingE2 >= E2 + 1) { Gadgetron::zero_pad_resize(*pSrc, RO, pSrc->get_size(1), encodingE2, *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } else if (encodingE2 <= E2 - 1) { this->perform_fft(E2, *pSrc, kspace_buf_); Gadgetron::crop(RO, pSrc->get_size(1), encodingE2, &kspace_buf_, pDst); this->perform_ifft(E2, *pDst, *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } //adjust RO if (RO < reconSizeRO) { Gadgetron::zero_pad_resize(*pSrc, reconSizeRO, pSrc->get_size(1), pSrc->get_size(2), *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } else if (RO > reconSizeRO) { this->perform_fft(E2, *pSrc, kspace_buf_); Gadgetron::crop(reconSizeRO, pSrc->get_size(1), pSrc->get_size(2), &kspace_buf_, pDst); this->perform_ifft(E2, *pDst, *pDst); pTmp = pSrc; pSrc = pDst; pDst = pTmp; } // final cut on image GADGET_CHECK_EXCEPTION_RETURN_FALSE(Gadgetron::crop(reconSizeRO, reconSizeE1, reconSizeE2, pSrc, pDst)); if (pDst != &recon_res.data_) { recon_res.data_ = *pDst; } } } catch (...) { GERROR_STREAM("Errors in GenericReconFieldOfViewAdjustmentGadget::adjust_FOV(IsmrmrdImageArray& data) ... "); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconEigenChannelGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); KLT_.resize(NE); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int WhiteNoiseInjectorGadget::process_config(ACE_Message_Block* mb) { noise_mean_ = noise_mean.value(); noise_std_ = noise_std.value(); add_noise_ref_ = add_noise_ref.value(); GDEBUG_STREAM("noise mean is " << noise_mean_); GDEBUG_STREAM("noise std is " << noise_std_); GDEBUG_STREAM("add_noise_ref is " << add_noise_ref_); randn_->setPara(noise_mean_, noise_std_); // get the current time and generate a seed time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); long long seed = (long long)(1e10*(timeinfo->tm_year+1900) + 1e8*(timeinfo->tm_mon+1) + 1e6*timeinfo->tm_mday + 1e4*timeinfo->tm_hour + 1e2*timeinfo->tm_min + timeinfo->tm_sec + std::rand()); std::array<unsigned int, 10> sequence; sequence[0] = (unsigned int)(1e10*(timeinfo->tm_year+1900)); sequence[1] = (unsigned int)(1e8*(timeinfo->tm_mon+1)); sequence[2] = (unsigned int)(1e6*timeinfo->tm_mday); sequence[3] = (unsigned int)(1e4*timeinfo->tm_hour); sequence[4] = (unsigned int)(1e2*timeinfo->tm_min); sequence[5] = (unsigned int)(timeinfo->tm_sec); std::srand( (unsigned int)seed ); sequence[6] = (unsigned int)(std::rand()); sequence[7] = (unsigned int)(std::rand()); sequence[8] = (unsigned int)(std::rand()); sequence[9] = (unsigned int)(std::rand()); std::seed_seq seedSeq(sequence.begin(), sequence.end()); randn_->getRandomer().seed(seedSeq); randn_->seed( (unsigned long)seed ); // --------------------------------------------------------------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(),h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); throw; return GADGET_FAIL; } if( h.encoding.size() != 1) { GDEBUG("Number of encoding spaces: %d\n", h.encoding.size()); GDEBUG("This simple WhiteNoiseInjectorGadget only supports one encoding space\n"); return GADGET_FAIL; } if (!h.encoding[0].parallelImaging) { GDEBUG("Parallel Imaging section not found in header"); return GADGET_FAIL; } ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_ = (double)(p_imaging.accelerationFactor.kspace_encoding_step_1); acceFactorE2_ = (double)(p_imaging.accelerationFactor.kspace_encoding_step_2); GDEBUG_STREAM("acceFactorE1_ is " << acceFactorE1_); GDEBUG_STREAM("acceFactorE2_ is " << acceFactorE2_); if ( !p_imaging.calibrationMode.is_present() ) { GDEBUG("Parallel Imaging calibrationMode not found in header"); return GADGET_FAIL; } std::string calib = *p_imaging.calibrationMode; if ( calib.compare("interleaved") == 0 ) { is_interleaved_ = true; GDEBUG_STREAM("Calibration mode is interleaved"); } else if ( calib.compare("embedded") == 0 ) { is_embeded_ = true; GDEBUG_STREAM("Calibration mode is embedded"); } else if ( calib.compare("separate") == 0 ) { is_seperate_ = true; GDEBUG_STREAM("Calibration mode is separate"); } else if ( calib.compare("external") == 0 ) { is_external_ = true; GDEBUG_STREAM("Calibration mode is external"); } else if ( (calib.compare("other") == 0)) { is_other_ = true; GDEBUG_STREAM("Calibration mode is other"); } else { GDEBUG("Failed to process parallel imaging calibration mode"); return GADGET_FAIL; } return GADGET_OK; }
int GenericReconCartesianGrappaGadget::process(Gadgetron::GadgetContainerMessage<IsmrmrdReconData> *m1) { if (perform_timing.value()) { gt_timer_local_.start("GenericReconCartesianGrappaGadget::process"); } process_called_times_++; IsmrmrdReconData *recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } GadgetContainerMessage< std::vector<ISMRMRD::Waveform> > * wav = AsContainerMessage< std::vector<ISMRMRD::Waveform> >(m1->cont()); if (wav) { if (verbose.value()) { GDEBUG_STREAM("Incoming recon_bit with " << wav->getObjectPtr()->size() << " wave form samples "); } } // for every encoding space for (size_t e = 0; e < recon_bit_->rbit_.size(); e++) { std::stringstream os; os << "_encoding_" << e << "_" << process_called_times_; GDEBUG_CONDITION_STREAM(verbose.value(), "Calling " << process_called_times_ << " , encoding space : " << e); GDEBUG_CONDITION_STREAM(verbose.value(), "======================================================================"); // --------------------------------------------------------------- // export incoming data if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_traj" + os.str()); } } // --------------------------------------------------------------- if (recon_bit_->rbit_[e].ref_) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].ref_->data_, debug_folder_full_path_ + "ref" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].ref_->trajectory_) { if (recon_bit_->rbit_[e].ref_->trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].ref_->trajectory_), debug_folder_full_path_ + "ref_traj" + os.str()); } } // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_ and recon_obj_[e].ref_coil_map_ are set if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::make_ref_coil_map"); } this->make_ref_coil_map(*recon_bit_->rbit_[e].ref_, *recon_bit_->rbit_[e].data_.data_.get_dimensions(), recon_obj_[e].ref_calib_, recon_obj_[e].ref_coil_map_, e); if (perform_timing.value()) { gt_timer_.stop(); } // ---------------------------------------------------------- // export prepared ref for calibration and coil map if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_calib_, debug_folder_full_path_ + "ref_calib" + os.str()); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_coil_map_, debug_folder_full_path_ + "ref_coil_map" + os.str()); } // --------------------------------------------------------------- // after this step, the recon_obj_[e].ref_calib_dst_ and recon_obj_[e].ref_coil_map_ are modified if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::prepare_down_stream_coil_compression_ref_data"); } this->prepare_down_stream_coil_compression_ref_data(recon_obj_[e].ref_calib_, recon_obj_[e].ref_coil_map_, recon_obj_[e].ref_calib_dst_, e); if (perform_timing.value()) { gt_timer_.stop(); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_calib_dst_, debug_folder_full_path_ + "ref_calib_dst" + os.str()); } if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].ref_coil_map_, debug_folder_full_path_ + "ref_coil_map_dst" + os.str()); } // --------------------------------------------------------------- // after this step, coil map is computed and stored in recon_obj_[e].coil_map_ if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_coil_map_estimation"); } this->perform_coil_map_estimation(recon_obj_[e].ref_coil_map_, recon_obj_[e].coil_map_, e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- // after this step, recon_obj_[e].kernel_, recon_obj_[e].kernelIm_, recon_obj_[e].unmixing_coeff_ are filled // gfactor is computed too if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_calib"); } this->perform_calib(recon_bit_->rbit_[e], recon_obj_[e], e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- recon_bit_->rbit_[e].ref_->clear(); recon_bit_->rbit_[e].ref_ = boost::none; } if (recon_bit_->rbit_[e].data_.data_.get_number_of_elements() > 0) { if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(recon_bit_->rbit_[e].data_.data_, debug_folder_full_path_ + "data_before_unwrapping" + os.str()); } if (!debug_folder_full_path_.empty() && recon_bit_->rbit_[e].data_.trajectory_) { if (recon_bit_->rbit_[e].data_.trajectory_->get_number_of_elements() > 0) { gt_exporter_.export_array(*(recon_bit_->rbit_[e].data_.trajectory_), debug_folder_full_path_ + "data_before_unwrapping_traj" + os.str()); } } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::perform_unwrapping"); } this->perform_unwrapping(recon_bit_->rbit_[e], recon_obj_[e], e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::compute_image_header"); } this->compute_image_header(recon_bit_->rbit_[e], recon_obj_[e].recon_res_, e); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- // pass down waveform if(wav) recon_obj_[e].recon_res_.waveform_ = *wav->getObjectPtr(); recon_obj_[e].recon_res_.acq_headers_ = recon_bit_->rbit_[e].data_.headers_; // --------------------------------------------------------------- if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(recon_obj_[e].recon_res_.data_, debug_folder_full_path_ + "recon_res" + os.str()); } if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::send_out_image_array"); } this->send_out_image_array(recon_obj_[e].recon_res_, e, image_series.value() + ((int) e + 1), GADGETRON_IMAGE_REGULAR); if (perform_timing.value()) { gt_timer_.stop(); } // --------------------------------------------------------------- if (send_out_gfactor.value() && recon_obj_[e].gfactor_.get_number_of_elements() > 0 && (acceFactorE1_[e] * acceFactorE2_[e] > 1)) { IsmrmrdImageArray res; Gadgetron::real_to_complex(recon_obj_[e].gfactor_, res.data_); res.headers_ = recon_obj_[e].recon_res_.headers_; res.meta_ = recon_obj_[e].recon_res_.meta_; if (perform_timing.value()) { gt_timer_.start("GenericReconCartesianGrappaGadget::send_out_image_array, gfactor"); } this->send_out_image_array(res, e, image_series.value() + 10 * ((int) e + 2), GADGETRON_IMAGE_GFACTOR); if (perform_timing.value()) { gt_timer_.stop(); } } // --------------------------------------------------------------- if (send_out_snr_map.value()) { hoNDArray<std::complex<float> > snr_map; if (calib_mode_[e] == Gadgetron::ISMRMRD_noacceleration) { snr_map = recon_obj_[e].recon_res_.data_; } else { if (recon_obj_[e].gfactor_.get_number_of_elements() > 0) { if (perform_timing.value()) { gt_timer_.start("compute SNR map array"); } this->compute_snr_map(recon_obj_[e], snr_map); if (perform_timing.value()) { gt_timer_.stop(); } } } if (snr_map.get_number_of_elements() > 0) { if (!debug_folder_full_path_.empty()) { this->gt_exporter_.export_array_complex(snr_map, debug_folder_full_path_ + "snr_map" + os.str()); } if (perform_timing.value()) { gt_timer_.start("send out gfactor array, snr map"); } IsmrmrdImageArray res; res.data_ = snr_map; res.headers_ = recon_obj_[e].recon_res_.headers_; res.meta_ = recon_obj_[e].recon_res_.meta_; res.acq_headers_ = recon_bit_->rbit_[e].data_.headers_; this->send_out_image_array(res, e, image_series.value() + 100 * ((int) e + 3), GADGETRON_IMAGE_SNR_MAP); if (perform_timing.value()) { gt_timer_.stop(); } } } } recon_obj_[e].recon_res_.data_.clear(); recon_obj_[e].gfactor_.clear(); recon_obj_[e].recon_res_.headers_.clear(); recon_obj_[e].recon_res_.meta_.clear(); } m1->release(); if (perform_timing.value()) { gt_timer_local_.stop(); } return GADGET_OK; }
void GadgetMessageImageArray::dump() { unsigned int ii; GDEBUG_STREAM("GadgetMessageImageArray" << std::endl); GDEBUG_STREAM("==========================================================" << std::endl); GDEBUG_STREAM("matrix_size : "); for ( ii=0; ii<10; ii++ ) { GDEBUG_STREAM(matrix_size[ii] << " "); } GDEBUG_STREAM(std::endl); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); GDEBUG_STREAM("kSpace_centre_col_no : " << kSpace_centre_col_no << std::endl); GDEBUG_STREAM("kSpace_max_acquired_col_no : " << kSpace_max_acquired_col_no << std::endl); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); GDEBUG_STREAM("kSpace_centre_line_no : " << kSpace_centre_line_no << std::endl); GDEBUG_STREAM("kSpace_max_acquired_line_no : " << kSpace_max_acquired_line_no << std::endl); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); GDEBUG_STREAM("kSpace_centre_partition_no : " << kSpace_centre_partition_no << std::endl); GDEBUG_STREAM("kSpace_max_acquired_partition_no : " << kSpace_max_acquired_partition_no << std::endl); GDEBUG_STREAM("----------------------------------------------------------" << std::endl); if ( imageArray_ ) { int slc, par, eco, phs, rep, set, seg; for ( seg=0; seg<matrix_size[9]; seg++ ) { for ( set=0; set<matrix_size[8]; set++ ) { for ( rep=0; rep<matrix_size[7]; rep++ ) { for ( phs=0; phs<matrix_size[6]; phs++ ) { for ( eco=0; eco<matrix_size[5]; eco++ ) { for ( par=0; par<matrix_size[4]; par++ ) { for ( slc=0; slc<matrix_size[3]; slc++ ) { int offset = get_offset(slc, par, eco, phs, rep, set, seg); std::cout << "[Slice Partition Echo Phase Rep Set Seg] = [" << " " << slc << " " << par << " " << eco << " " << phs << " " << rep << " " << set << " " << seg << "]" << std::endl; imageArray_[offset].dump(); } } } } } } } } GDEBUG_STREAM("==========================================================" << std::endl); }
int GenericReconCartesianReferencePrepGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); ref_prepared_.resize(NE, false); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << p_imaging.accelerationFactor.kspace_encoding_step_1); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << p_imaging.accelerationFactor.kspace_encoding_step_2); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- return GADGET_OK; }
int CmrParametricT2MappingGadget::perform_mapping(IsmrmrdImageArray& data, IsmrmrdImageArray& map, IsmrmrdImageArray& para, IsmrmrdImageArray& map_sd, IsmrmrdImageArray& para_sd) { try { if (perform_timing.value()) { gt_timer_.start("CmrParametricT2MappingGadget::perform_mapping"); } GDEBUG_CONDITION_STREAM(verbose.value(), "CmrParametricT2MappingGadget::perform_mapping(...) starts ... "); size_t RO = data.data_.get_size(0); size_t E1 = data.data_.get_size(1); size_t E2 = data.data_.get_size(2); size_t CHA = data.data_.get_size(3); size_t N = data.data_.get_size(4); size_t S = data.data_.get_size(5); size_t SLC = data.data_.get_size(6); size_t ro, e1, s, slc, p; GADGET_CHECK_RETURN(E2 == 1, GADGET_FAIL); GADGET_CHECK_RETURN(CHA == 1, GADGET_FAIL); GADGET_CHECK_RETURN(this->prep_times_.size() >= N, GADGET_FAIL); hoNDArray<float> mag; Gadgetron::abs(data.data_, mag); if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag, debug_folder_full_path_ + "CmrParametricT2Mapping_data_mag"); } bool need_sd_map = send_sd_map.value(); Gadgetron::GadgetronTimer gt_timer(false); // ------------------------------------------------------------- // set mapping parameters Gadgetron::CmrT2Mapping<float> t2_mapper; t2_mapper.fill_holes_in_maps_ = perform_hole_filling.value(); t2_mapper.max_size_of_holes_ = max_size_hole.value(); t2_mapper.compute_SD_maps_ = need_sd_map; t2_mapper.ti_.resize(N, 0); memcpy(&(t2_mapper.ti_)[0], &this->prep_times_[0], sizeof(float)*N); t2_mapper.data_.create(RO, E1, N, S, SLC, mag.begin()); t2_mapper.max_iter_ = max_iter.value(); t2_mapper.thres_fun_ = thres_func.value(); t2_mapper.max_map_value_ = max_T2.value(); t2_mapper.verbose_ = verbose.value(); t2_mapper.debug_folder_ = debug_folder_full_path_; t2_mapper.perform_timing_ = perform_timing.value(); // ------------------------------------------------------------- // compute mask if needed if (mapping_with_masking.value()) { t2_mapper.mask_for_mapping_.create(RO, E1, SLC); // get the image with shortest prep time hoNDArray<float> mag_shortest_TE; mag_shortest_TE.create(RO, E1, SLC); for (slc = 0; slc < SLC; slc++) { size_t ind = 0; float min_te = this->prep_times_[0]; for (size_t n = 1; n < this->prep_times_.size(); n++) { if(this->prep_times_[n]<min_te) { min_te = this->prep_times_[n]; ind = n; } } memcpy(&mag_shortest_TE(0, 0, slc), &mag(0, 0, ind, 0, slc), sizeof(float)*RO*E1); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(mag_shortest_TE, debug_folder_full_path_ + "CmrParametricT2Mapping_mag_shortest_TE"); } double scale_factor = 1.0; if (data.meta_[0].length(GADGETRON_IMAGE_SCALE_RATIO) > 0) { scale_factor = data.meta_[0].as_double(GADGETRON_IMAGE_SCALE_RATIO); } GDEBUG_STREAM("CmrParametricT2MappingGadget, find incoming image has scale factor of " << scale_factor); if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget::compute_mask_for_mapping"); } this->compute_mask_for_mapping(mag, t2_mapper.mask_for_mapping_, (float)scale_factor); if (perform_timing.value()) { gt_timer.stop(); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array(t2_mapper.mask_for_mapping_, debug_folder_full_path_ + "CmrParametricT2Mapping_mask_for_mapping"); } } // ------------------------------------------------------------- // perform mapping if (perform_timing.value()) { gt_timer.start("CmrParametricT2MappingGadget, t2_mapper.perform_parametric_mapping"); } t2_mapper.perform_parametric_mapping(); if (perform_timing.value()) { gt_timer.stop(); } size_t num_para = t2_mapper.get_num_of_paras(); // ------------------------------------------------------------- // get the results map.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map.data_); map.headers_.create(1, S, SLC); map.meta_.resize(S*SLC); para.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para.data_); para.headers_.create(num_para, S, SLC); para.meta_.resize(num_para*S*SLC); if (need_sd_map) { map_sd.data_.create(RO, E1, E2, CHA, 1, S, SLC); Gadgetron::clear(map_sd.data_); map_sd.headers_.create(1, S, SLC); map_sd.meta_.resize(S*SLC); para_sd.data_.create(RO, E1, E2, CHA, num_para, S, SLC); Gadgetron::clear(para_sd.data_); para_sd.headers_.create(num_para, S, SLC); para_sd.meta_.resize(num_para*S*SLC); } for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (e1 = 0; e1 < E1; e1++) { for (ro = 0; ro < RO; ro++) { map.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.map_(ro, e1, s, slc); if (need_sd_map) { map_sd.data_(ro, e1, 0, 0, 0, s, slc) = t2_mapper.sd_map_(ro, e1, s, slc); } for (p = 0; p < num_para; p++) { para.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.para_(ro, e1, p, s, slc); if (need_sd_map) { para_sd.data_(ro, e1, 0, 0, p, s, slc) = t2_mapper.sd_para_(ro, e1, p, s, slc); } } } } size_t slc_ind = data.headers_(0, s, slc).slice; map.headers_(0, s, slc) = data.headers_(0, s, slc); map.headers_(0, s, slc).image_index = 1 + slc_ind; map.headers_(0, s, slc).image_series_index = 11; map.meta_[s+slc*S] = data.meta_[s + slc*S]; map.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2MAP); map.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2MAP); map_sd.headers_(0, s, slc) = data.headers_(0, s, slc); map_sd.headers_(0, s, slc).image_index = 1 + slc_ind; map_sd.headers_(0, s, slc).image_series_index = 12; map_sd.meta_[s + slc*S] = data.meta_[s + slc*S]; map_sd.meta_[s + slc*S].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_T2SDMAP); map_sd.meta_[s + slc*S].append(GADGETRON_IMAGEPROCESSINGHISTORY, GADGETRON_IMAGE_T2SDMAP); if (need_sd_map) { for (p = 0; p < num_para; p++) { para.headers_(p, s, slc) = data.headers_(0, s, slc); para.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; para_sd.headers_(p, s, slc) = data.headers_(0, s, slc); para_sd.headers_(p, s, slc).image_index = 1 + p + slc_ind*num_para; para_sd.meta_[p + s*num_para + slc*num_para*S] = data.meta_[s + slc*S]; } } } } // ------------------------------------------------------------- if (perform_timing.value()) { gt_timer_.stop(); } } catch (...) { GERROR_STREAM("Exceptions happened in CmrParametricT2MappingGadget::perform_mapping(...) ... "); return GADGET_FAIL; } return GADGET_OK; }
int MultiChannelCartesianGrappaReconGadget::send_out_image_array(IsmrmrdReconBit& recon_bit, IsmrmrdImageArray& res, size_t encoding, int series_num, const std::string& data_role) { try { size_t RO = res.data_.get_size(0); size_t E1 = res.data_.get_size(1); size_t E2 = res.data_.get_size(2); size_t CHA = res.data_.get_size(3); size_t N = res.data_.get_size(4); size_t S = res.data_.get_size(5); size_t SLC = res.data_.get_size(6); GDEBUG_CONDITION_STREAM(true, "sending out image array, acquisition boundary [RO E1 E2 CHA N S SLC] = [" << RO << " " << E1 << " " << E2 << " " << CHA << " " << N << " " << S << " " << SLC << "] "); // compute image numbers and fill the image meta size_t n, s, slc; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (n = 0; n < N; n++) { ISMRMRD::ImageHeader header = res.headers_(n, s, slc); if (header.measurement_uid == 0) continue; res.headers_(n, s, slc).image_index = (uint16_t)this->compute_image_number(res.headers_(n, s, slc), encoding, CHA, 0, E2); res.headers_(n, s, slc).image_series_index = series_num; size_t offset = n + s*N + slc*N*S; res.meta_[offset].set(GADGETRON_IMAGENUMBER, (long)res.headers_(n, s, slc).image_index); res.meta_[offset].set(GADGETRON_IMAGEPROCESSINGHISTORY, "GT"); if (data_role == GADGETRON_IMAGE_REGULAR) { res.headers_(n, s, slc).image_type = ISMRMRD::ISMRMRD_IMTYPE_MAGNITUDE; res.meta_[offset].append(GADGETRON_IMAGECOMMENT, "GT"); res.meta_[offset].append(GADGETRON_SEQUENCEDESCRIPTION, "_GT"); res.meta_[offset].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_REGULAR); } else if (data_role == GADGETRON_IMAGE_GFACTOR) { res.headers_(n, s, slc).image_type = ISMRMRD::ISMRMRD_IMTYPE_MAGNITUDE; res.meta_[offset].append(GADGETRON_IMAGECOMMENT, GADGETRON_IMAGE_GFACTOR); res.meta_[offset].append(GADGETRON_SEQUENCEDESCRIPTION, GADGETRON_IMAGE_GFACTOR); res.meta_[offset].set(GADGETRON_DATA_ROLE, GADGETRON_IMAGE_GFACTOR); // set the skip processing flag, so gfactor map will not be processed during e.g. partial fourier handling or kspace filter gadgets res.meta_[offset].set(GADGETRON_SKIP_PROCESSING_AFTER_RECON, (long)1); // set the flag to use dedicated scaling factor res.meta_[offset].set(GADGETRON_USE_DEDICATED_SCALING_FACTOR, (long)1); } if (verbose.value()) { for (size_t cha = 0; cha < CHA; cha++) { GDEBUG_STREAM("sending out " << data_role << " image [CHA SLC CON PHS REP SET AVE] = [" << cha << " "<< res.headers_(n, s, slc).slice << " " << res.headers_(n, s, slc).contrast << " "<< res.headers_(n, s, slc).phase << " " << res.headers_(n, s, slc).repetition << " " << res.headers_(n, s, slc).set << " " << res.headers_(n, s, slc).average << " " << "] "<< " -- Image number -- " << res.headers_(n, s, slc).image_index); } } } } } // send out the images Gadgetron::GadgetContainerMessage<IsmrmrdImageArray>* cm1 = new Gadgetron::GadgetContainerMessage<IsmrmrdImageArray>(); *(cm1->getObjectPtr()) = res; if (this->next()->putq(cm1) < 0) { GERROR_STREAM("Put image array to Q failed ... "); return GADGET_FAIL; } } catch (...) { GERROR_STREAM("Errors in MultiChannelCartesianGrappaReconGadget::send_out_image_array(...) ... "); return GADGET_FAIL; } return GADGET_OK; }
void GenericReconCartesianGrappaGadget::perform_unwrapping(IsmrmrdReconBit &recon_bit, ReconObjType &recon_obj, size_t e) { typedef std::complex<float> T; typedef std::complex<float> T; size_t RO = recon_bit.data_.data_.get_size(0); size_t E1 = recon_bit.data_.data_.get_size(1); size_t E2 = recon_bit.data_.data_.get_size(2); size_t dstCHA = recon_bit.data_.data_.get_size(3); size_t N = recon_bit.data_.data_.get_size(4); size_t S = recon_bit.data_.data_.get_size(5); size_t SLC = recon_bit.data_.data_.get_size(6); hoNDArray<std::complex<float> > &src = recon_obj.ref_calib_; size_t ref_RO = src.get_size(0); size_t ref_E1 = src.get_size(1); size_t ref_E2 = src.get_size(2); size_t srcCHA = src.get_size(3); size_t ref_N = src.get_size(4); size_t ref_S = src.get_size(5); size_t ref_SLC = src.get_size(6); size_t unmixingCoeff_CHA = recon_obj.unmixing_coeff_.get_size(3); size_t convkRO = recon_obj.kernel_.get_size(0); size_t convkE1 = recon_obj.kernel_.get_size(1); size_t convkE2 = recon_obj.kernel_.get_size(2); recon_obj.recon_res_.data_.create(RO, E1, E2, 1, N, S, SLC); if (!debug_folder_full_path_.empty()) { std::stringstream os; os << "encoding_" << e; std::string suffix = os.str(); gt_exporter_.export_array_complex(recon_bit.data_.data_, debug_folder_full_path_ + "data_src_" + suffix); } // compute aliased images data_recon_buf_.create(RO, E1, E2, dstCHA, N, S, SLC); if (E2 > 1) { Gadgetron::hoNDFFT<float>::instance()->ifft3c(recon_bit.data_.data_, complex_im_recon_buf_, data_recon_buf_); } else { Gadgetron::hoNDFFT<float>::instance()->ifft2c(recon_bit.data_.data_, complex_im_recon_buf_, data_recon_buf_); } // SNR unit scaling float effective_acce_factor(1), snr_scaling_ratio(1); this->compute_snr_scaling_factor(recon_bit, effective_acce_factor, snr_scaling_ratio); if (effective_acce_factor > 1) { // since the grappa in gadgetron is doing signal preserving scaling, to perserve noise level, we need this compensation factor double grappaKernelCompensationFactor = 1.0 / (acceFactorE1_[e] * acceFactorE2_[e]); Gadgetron::scal((float) (grappaKernelCompensationFactor * snr_scaling_ratio), complex_im_recon_buf_); if (this->verbose.value()) GDEBUG_STREAM( "GenericReconCartesianGrappaGadget, grappaKernelCompensationFactor*snr_scaling_ratio : " << grappaKernelCompensationFactor * snr_scaling_ratio); } if (!debug_folder_full_path_.empty()) { std::stringstream os; os << "encoding_" << e; std::string suffix = os.str(); gt_exporter_.export_array_complex(complex_im_recon_buf_, debug_folder_full_path_ + "aliasedIm_" + suffix); } // unwrapping long long num = N * S * SLC; long long ii; #pragma omp parallel default(none) private(ii) shared(num, N, S, RO, E1, E2, srcCHA, convkRO, convkE1, convkE2, ref_N, ref_S, recon_obj, dstCHA, unmixingCoeff_CHA, e) if(num>1) { #pragma omp for for (ii = 0; ii < num; ii++) { size_t slc = ii / (N * S); size_t s = (ii - slc * N * S) / N; size_t n = ii - slc * N * S - s * N; // combined channels T *pIm = &(complex_im_recon_buf_(0, 0, 0, 0, n, s, slc)); size_t usedN = n; if (n >= ref_N) usedN = ref_N - 1; size_t usedS = s; if (s >= ref_S) usedS = ref_S - 1; T *pUnmix = &(recon_obj.unmixing_coeff_(0, 0, 0, 0, usedN, usedS, slc)); T *pRes = &(recon_obj.recon_res_.data_(0, 0, 0, 0, n, s, slc)); hoNDArray<std::complex<float> > res(RO, E1, E2, 1, pRes); hoNDArray<std::complex<float> > unmixing(RO, E1, E2, unmixingCoeff_CHA, pUnmix); hoNDArray<std::complex<float> > aliasedIm(RO, E1, E2, ((unmixingCoeff_CHA <= srcCHA) ? unmixingCoeff_CHA : srcCHA), 1, pIm); Gadgetron::apply_unmix_coeff_aliased_image_3D(aliasedIm, unmixing, res); } } if (!debug_folder_full_path_.empty()) { std::stringstream os; os << "encoding_" << e; std::string suffix = os.str(); gt_exporter_.export_array_complex(recon_obj.recon_res_.data_, debug_folder_full_path_ + "unwrappedIm_" + suffix); } }
int MultiChannelCartesianGrappaReconGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); meas_max_idx_.resize(NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); calib_mode_.resize(NE, ISMRMRD_noacceleration); recon_obj_.resize(NE); size_t e; for (e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); meas_max_idx_[e].kspace_encode_step_1 = (uint16_t)e_space.matrixSize.y - 1; meas_max_idx_[e].set = (e_limits.set && (e_limits.set->maximum > 0)) ? e_limits.set->maximum : 0; meas_max_idx_[e].phase = (e_limits.phase && (e_limits.phase->maximum > 0)) ? e_limits.phase->maximum : 0; meas_max_idx_[e].kspace_encode_step_2 = (uint16_t)e_space.matrixSize.z - 1; meas_max_idx_[e].contrast = (e_limits.contrast && (e_limits.contrast->maximum > 0)) ? e_limits.contrast->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].repetition = e_limits.repetition ? e_limits.repetition->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].average = e_limits.average ? e_limits.average->maximum : 0; meas_max_idx_[e].segment = 0; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (acceFactorE1_[e] > 1 || acceFactorE2_[e] > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int GenericReconEigenChannelGadget::process(Gadgetron::GadgetContainerMessage< IsmrmrdReconData >* m1) { if (perform_timing.value()) { gt_timer_.start("GenericReconEigenChannelGadget::process"); } process_called_times_++; IsmrmrdReconData* recon_bit_ = m1->getObjectPtr(); if (recon_bit_->rbit_.size() > num_encoding_spaces_) { GWARN_STREAM("Incoming recon_bit has more encoding spaces than the protocol : " << recon_bit_->rbit_.size() << " instead of " << num_encoding_spaces_); } // for every encoding space, prepare the recon_bit_->rbit_[e].ref_ size_t e, n, s, slc; for (e = 0; e < recon_bit_->rbit_.size(); e++) { auto & rbit = recon_bit_->rbit_[e]; std::stringstream os; os << "_encoding_" << e; hoNDArray< std::complex<float> >& data = recon_bit_->rbit_[e].data_.data_; size_t RO = data.get_size(0); size_t E1 = data.get_size(1); size_t E2 = data.get_size(2); size_t CHA = data.get_size(3); size_t N = data.get_size(4); size_t S = data.get_size(5); size_t SLC = data.get_size(6); GDEBUG_CONDITION_STREAM(verbose.value(), "GenericReconEigenChannelGadget - incoming data array : [RO E1 E2 CHA N S SLC] - [" << RO << " " << E1 << " " << E2 << " " << CHA << " " << N << " " << S << " " << SLC << "]"); // whether it is needed to update coefficients bool recompute_coeff = false; if ( (KLT_[e].size()!=SLC) || update_eigen_channel_coefficients.value() ) { recompute_coeff = true; } else { if(KLT_[e].size() == SLC) { for (slc = 0; slc < SLC; slc++) { if (KLT_[e][slc].size() != S) { recompute_coeff = true; break; } else { for (s = 0; s < S; s++) { if (KLT_[e][slc][s].size() != N) { recompute_coeff = true; break; } } } } } } if(recompute_coeff) { bool average_N = average_all_ref_N.value(); bool average_S = average_all_ref_S.value(); if(rbit.ref_) { // use ref to compute coefficients Gadgetron::compute_eigen_channel_coefficients(rbit.ref_->data_, average_N, average_S, (calib_mode_[e] == Gadgetron::ISMRMRD_interleaved), N, S, upstream_coil_compression_thres.value(), upstream_coil_compression_num_modesKept.value(), KLT_[e]); } else { // use data to compute coefficients Gadgetron::compute_eigen_channel_coefficients(rbit.data_.data_, average_N, average_S, (calib_mode_[e] == Gadgetron::ISMRMRD_interleaved), N, S, upstream_coil_compression_thres.value(), upstream_coil_compression_num_modesKept.value(), KLT_[e]); } if (verbose.value()) { hoNDArray< std::complex<float> > E; for (slc = 0; slc < SLC; slc++) { for (s = 0; s < S; s++) { for (n = 0; n < N; n++) { KLT_[e][slc][s][n].eigen_value(E); GDEBUG_STREAM("Number of modes kept: " << KLT_[e][slc][s][n].output_length() << "; Eigen value, slc - " << slc << ", S - " << s << ", N - " << n << " : ["); for (size_t c = 0; c < E.get_size(0); c++) { GDEBUG_STREAM(" " << E(c)); } GDEBUG_STREAM("]"); } } } } } /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.data_.data_, debug_folder_full_path_ + "data_before_KLT" + os.str()); }*/ // apply KL coefficients Gadgetron::apply_eigen_channel_coefficients(KLT_[e], rbit.data_.data_); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.data_.data_, debug_folder_full_path_ + "data_after_KLT" + os.str()); }*/ if (rbit.ref_) { /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.ref_->data_, debug_folder_full_path_ + "ref_before_KLT" + os.str()); }*/ Gadgetron::apply_eigen_channel_coefficients(KLT_[e], rbit.ref_->data_); /*if (!debug_folder_full_path_.empty()) { gt_exporter_.exportArrayComplex(rbit.ref_->data_, debug_folder_full_path_ + "ref_after_KLT" + os.str()); }*/ } } if (perform_timing.value()) { gt_timer_.stop(); } if (this->next()->putq(m1) < 0) { GERROR_STREAM("Put IsmrmrdReconData to Q failed ... "); return GADGET_FAIL; } return GADGET_OK; }
void GenericReconCartesianNonLinearSpirit2DTGadget::perform_nonlinear_spirit_unwrapping(hoNDArray< std::complex<float> >& kspace, hoNDArray< std::complex<float> >& kerIm, hoNDArray< std::complex<float> >& ref2DT, hoNDArray< std::complex<float> >& coilMap2DT, hoNDArray< std::complex<float> >& res, size_t e) { try { bool print_iter = this->spirit_print_iter.value(); size_t RO = kspace.get_size(0); size_t E1 = kspace.get_size(1); size_t E2 = kspace.get_size(2); size_t CHA = kspace.get_size(3); size_t N = kspace.get_size(4); size_t S = kspace.get_size(5); size_t SLC = kspace.get_size(6); size_t ref_N = kerIm.get_size(4); size_t ref_S = kerIm.get_size(5); hoNDArray< std::complex<float> > kspaceLinear(kspace); res = kspace; // detect whether random sampling is used bool use_random_sampling = false; std::vector<long long> sampled_step_size; long long n, e1; for (n=0; n<(long long)N; n++) { long long prev_sampled_line = -1; for (e1=0; e1<(long long)E1; e1++) { if(std::abs(kspace(RO/2, e1, 0, 0, 0, 0, 0))>0 && std::abs(kspace(RO/2, e1, 0, CHA-1, 0, 0, 0))>0) { if(prev_sampled_line>0) { sampled_step_size.push_back(e1 - prev_sampled_line); } prev_sampled_line = e1; } } } if(sampled_step_size.size()>4) { size_t s; for (s=2; s<sampled_step_size.size()-1; s++) { if(sampled_step_size[s]!=sampled_step_size[s-1]) { use_random_sampling = true; break; } } } if(use_random_sampling) { GDEBUG_STREAM("SPIRIT Non linear, random sampling is detected ... "); } Gadgetron::GadgetronTimer timer(false); // compute linear solution as the initialization if(use_random_sampling) { if (this->perform_timing.value()) timer.start("SPIRIT Non linear, perform linear spirit recon ... "); this->perform_spirit_unwrapping(kspace, kerIm, kspaceLinear); if (this->perform_timing.value()) timer.stop(); } else { if (this->perform_timing.value()) timer.start("SPIRIT Non linear, perform linear recon ... "); size_t ref2DT_RO = ref2DT.get_size(0); size_t ref2DT_E1 = ref2DT.get_size(1); // mean over N hoNDArray< std::complex<float> > meanKSpace; Gadgetron::sum_over_dimension(ref2DT, meanKSpace, 4); // if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(meanKSpace, debug_folder_full_path_ + "spirit_nl_2DT_meanKSpace"); } hoNDArray< std::complex<float> > acsSrc(ref2DT_RO, ref2DT_E1, CHA, meanKSpace.begin()); hoNDArray< std::complex<float> > acsDst(ref2DT_RO, ref2DT_E1, CHA, meanKSpace.begin()); double grappa_reg_lamda = 0.0005; size_t kRO = 5; size_t kE1 = 4; hoNDArray< std::complex<float> > convKer; hoNDArray< std::complex<float> > kIm(RO, E1, CHA, CHA); Gadgetron::grappa2d_calib_convolution_kernel(acsSrc, acsDst, (size_t)this->acceFactorE1_[e], grappa_reg_lamda, kRO, kE1, convKer); Gadgetron::grappa2d_image_domain_kernel(convKer, RO, E1, kIm); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(kIm, debug_folder_full_path_ + "spirit_nl_2DT_kIm"); Gadgetron::hoNDFFT<float>::instance()->ifft2c(kspace, complex_im_recon_buf_); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(complex_im_recon_buf_, debug_folder_full_path_ + "spirit_nl_2DT_aliasedImage"); hoNDArray< std::complex<float> > resKSpace(RO, E1, CHA, N); hoNDArray< std::complex<float> > aliasedImage(RO, E1, CHA, N, complex_im_recon_buf_.begin()); Gadgetron::grappa2d_image_domain_unwrapping_aliased_image(aliasedImage, kIm, resKSpace); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(resKSpace, debug_folder_full_path_ + "spirit_nl_2DT_linearImage"); Gadgetron::hoNDFFT<float>::instance()->fft2c(resKSpace); memcpy(kspaceLinear.begin(), resKSpace.begin(), resKSpace.get_number_of_bytes()); if (this->perform_timing.value()) timer.stop(); } // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(kspaceLinear, debug_folder_full_path_ + "spirit_nl_2DT_kspaceLinear"); // perform nonlinear reconstruction { boost::shared_ptr< hoNDArray< std::complex<float> > > coilMap; bool hasCoilMap = false; if (coilMap2DT.get_size(0) == RO && coilMap2DT.get_size(1) == E1 && coilMap2DT.get_size(3)==CHA) { if (ref_N < N) { coilMap = boost::shared_ptr< hoNDArray< std::complex<float> > >(new hoNDArray< std::complex<float> >(RO, E1, CHA, coilMap2DT.begin())); } else { coilMap = boost::shared_ptr< hoNDArray< std::complex<float> > >(new hoNDArray< std::complex<float> >(RO, E1, CHA, ref_N, coilMap2DT.begin())); } hasCoilMap = true; } boost::shared_ptr<hoNDArray< std::complex<float> > > ker(new hoNDArray< std::complex<float> >(RO, E1, CHA, CHA, ref_N, kerIm.begin())); boost::shared_ptr<hoNDArray< std::complex<float> > > acq(new hoNDArray< std::complex<float> >(RO, E1, CHA, N, kspace.begin())); hoNDArray< std::complex<float> > kspaceInitial(RO, E1, CHA, N, kspaceLinear.begin()); hoNDArray< std::complex<float> > res2DT(RO, E1, CHA, N, res.begin()); if (this->spirit_data_fidelity_lamda.value() > 0) { GDEBUG_STREAM("Start the NL SPIRIT data fidelity iteration - regularization strength : " << this->spirit_image_reg_lamda.value() << " - number of iteration : " << this->spirit_nl_iter_max.value() << " - proximity across cha : " << this->spirit_reg_proximity_across_cha.value() << " - redundant dimension weighting ratio : " << this->spirit_reg_N_weighting_ratio.value() << " - using coil sen map : " << this->spirit_reg_use_coil_sen_map.value() << " - iter thres : " << this->spirit_nl_iter_thres.value()); typedef hoGdSolver< hoNDArray< std::complex<float> >, hoWavelet2DTOperator< std::complex<float> > > SolverType; SolverType solver; solver.iterations_ = this->spirit_nl_iter_max.value(); solver.set_output_mode(this->spirit_print_iter.value() ? SolverType::OUTPUT_VERBOSE : SolverType::OUTPUT_SILENT); solver.grad_thres_ = this->spirit_nl_iter_thres.value(); solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value(); boost::shared_ptr< hoNDArray< std::complex<float> > > x0 = boost::make_shared< hoNDArray< std::complex<float> > >(kspaceInitial); solver.set_x0(x0); // parallel imaging term std::vector<size_t> dims; acq->get_dimensions(dims); hoSPIRIT2DTDataFidelityOperator< std::complex<float> > spirit(&dims); spirit.set_forward_kernel(*ker, false); spirit.set_acquired_points(*acq); // image reg term hoWavelet2DTOperator< std::complex<float> > wav3DOperator(&dims); wav3DOperator.set_acquired_points(*acq); wav3DOperator.scale_factor_first_dimension_ = this->spirit_reg_RO_weighting_ratio.value(); wav3DOperator.scale_factor_second_dimension_ = this->spirit_reg_E1_weighting_ratio.value(); wav3DOperator.scale_factor_third_dimension_ = this->spirit_reg_N_weighting_ratio.value(); wav3DOperator.with_approx_coeff_ = !this->spirit_reg_keep_approx_coeff.value(); wav3DOperator.change_coeffcients_third_dimension_boundary_ = !this->spirit_reg_keep_redundant_dimension_coeff.value(); wav3DOperator.proximity_across_cha_ = this->spirit_reg_proximity_across_cha.value(); wav3DOperator.no_null_space_ = true; wav3DOperator.input_in_kspace_ = true; if (this->spirit_reg_use_coil_sen_map.value() && hasCoilMap) { wav3DOperator.coil_map_ = *coilMap; } // set operators solver.oper_system_ = &spirit; solver.oper_reg_ = &wav3DOperator; if (this->perform_timing.value()) timer.start("NonLinear SPIRIT solver for 2DT with data fidelity ... "); solver.solve(*acq, res2DT); if (this->perform_timing.value()) timer.stop(); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_data_fidelity_res"); } else { GDEBUG_STREAM("Start the NL SPIRIT iteration with regularization strength : " << this->spirit_image_reg_lamda.value() << " - number of iteration : " << this->spirit_nl_iter_max.value() << " - proximity across cha : " << this->spirit_reg_proximity_across_cha.value() << " - redundant dimension weighting ratio : " << this->spirit_reg_N_weighting_ratio.value() << " - using coil sen map : " << this->spirit_reg_use_coil_sen_map.value() << " - iter thres : " << this->spirit_nl_iter_thres.value()); typedef hoGdSolver< hoNDArray< std::complex<float> >, hoWavelet2DTOperator< std::complex<float> > > SolverType; SolverType solver; solver.iterations_ = this->spirit_nl_iter_max.value(); solver.set_output_mode(this->spirit_print_iter.value() ? SolverType::OUTPUT_VERBOSE : SolverType::OUTPUT_SILENT); solver.grad_thres_ = this->spirit_nl_iter_thres.value(); solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value(); boost::shared_ptr< hoNDArray< std::complex<float> > > x0 = boost::make_shared< hoNDArray< std::complex<float> > >(kspaceInitial); solver.set_x0(x0); // parallel imaging term std::vector<size_t> dims; acq->get_dimensions(dims); hoSPIRIT2DTOperator< std::complex<float> > spirit(&dims); spirit.set_forward_kernel(*ker, false); spirit.set_acquired_points(*acq); spirit.no_null_space_ = true; spirit.use_non_centered_fft_ = false; // image reg term std::vector<size_t> dim; acq->get_dimensions(dim); hoWavelet2DTOperator< std::complex<float> > wav3DOperator(&dim); wav3DOperator.set_acquired_points(*acq); wav3DOperator.scale_factor_first_dimension_ = this->spirit_reg_RO_weighting_ratio.value(); wav3DOperator.scale_factor_second_dimension_ = this->spirit_reg_E1_weighting_ratio.value(); wav3DOperator.scale_factor_third_dimension_ = this->spirit_reg_N_weighting_ratio.value(); wav3DOperator.with_approx_coeff_ = !this->spirit_reg_keep_approx_coeff.value(); wav3DOperator.change_coeffcients_third_dimension_boundary_ = !this->spirit_reg_keep_redundant_dimension_coeff.value(); wav3DOperator.proximity_across_cha_ = this->spirit_reg_proximity_across_cha.value(); wav3DOperator.no_null_space_ = true; wav3DOperator.input_in_kspace_ = true; if (this->spirit_reg_use_coil_sen_map.value() && hasCoilMap) { wav3DOperator.coil_map_ = *coilMap; } // set operators solver.oper_system_ = &spirit; solver.oper_reg_ = &wav3DOperator; // set call back solverCallBack cb; cb.solver_ = &solver; solver.call_back_ = &cb; hoNDArray< std::complex<float> > b(kspaceInitial); Gadgetron::clear(b); if (this->perform_timing.value()) timer.start("NonLinear SPIRIT solver for 2DT ... "); solver.solve(b, res2DT); if (this->perform_timing.value()) timer.stop(); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_res"); spirit.restore_acquired_kspace(kspace, res2DT); // if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_res_restored"); } } } catch (...) { GADGET_THROW("Errors happened in GenericReconCartesianNonLinearSpirit2DTGadget::perform_nonlinear_spirit_unwrapping(...) ... "); } }
void GenericReconCartesianNonLinearSpirit2DTGadget::perform_nonlinear_spirit_unwrapping(hoNDArray< std::complex<float> >& kspace, hoNDArray< std::complex<float> >& kerIm, hoNDArray< std::complex<float> >& ref2DT, hoNDArray< std::complex<float> >& coilMap2DT, hoNDArray< std::complex<float> >& res, size_t e) { try { bool print_iter = this->spirit_print_iter.value(); size_t RO = kspace.get_size(0); size_t E1 = kspace.get_size(1); size_t E2 = kspace.get_size(2); size_t CHA = kspace.get_size(3); size_t N = kspace.get_size(4); size_t S = kspace.get_size(5); size_t SLC = kspace.get_size(6); size_t ref_N = kerIm.get_size(4); size_t ref_S = kerIm.get_size(5); hoNDArray< std::complex<float> > kspaceLinear(kspace); res = kspace; // detect whether random sampling is used bool use_random_sampling = false; std::vector<long long> sampled_step_size; long long n, e1; for (n=0; n<(long long)N; n++) { long long prev_sampled_line = -1; for (e1=0; e1<(long long)E1; e1++) { if(std::abs(kspace(RO/2, e1, 0, 0, 0, 0, 0))>0 && std::abs(kspace(RO/2, e1, 0, CHA-1, 0, 0, 0))>0) { if(prev_sampled_line>0) { sampled_step_size.push_back(e1 - prev_sampled_line); } prev_sampled_line = e1; } } } if(sampled_step_size.size()>4) { size_t s; for (s=2; s<sampled_step_size.size()-1; s++) { if(sampled_step_size[s]!=sampled_step_size[s-1]) { use_random_sampling = true; break; } } } if(use_random_sampling) { GDEBUG_STREAM("SPIRIT Non linear, random sampling is detected ... "); } Gadgetron::GadgetronTimer timer(false); boost::shared_ptr< hoNDArray< std::complex<float> > > coilMap; bool hasCoilMap = false; if (coilMap2DT.get_size(0) == RO && coilMap2DT.get_size(1) == E1 && coilMap2DT.get_size(3)==CHA) { if (ref_N < N) { coilMap = boost::shared_ptr< hoNDArray< std::complex<float> > >(new hoNDArray< std::complex<float> >(RO, E1, CHA, coilMap2DT.begin())); } else { coilMap = boost::shared_ptr< hoNDArray< std::complex<float> > >(new hoNDArray< std::complex<float> >(RO, E1, CHA, ref_N, coilMap2DT.begin())); } hasCoilMap = true; } hoNDArray<float> gFactor; float gfactorMedian = 0; float smallest_eigen_value(0); // ----------------------------------------------------- // estimate gfactor // ----------------------------------------------------- // mean over N hoNDArray< std::complex<float> > meanKSpace; if(calib_mode_[e]==ISMRMRD_interleaved) { Gadgetron::compute_averaged_data_N_S(kspace, true, true, true, meanKSpace); } else { Gadgetron::compute_averaged_data_N_S(ref2DT, true, true, true, meanKSpace); } if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(meanKSpace, debug_folder_full_path_ + "spirit_nl_2DT_meanKSpace"); } hoNDArray< std::complex<float> > acsSrc(meanKSpace.get_size(0), meanKSpace.get_size(1), CHA, meanKSpace.begin()); hoNDArray< std::complex<float> > acsDst(meanKSpace.get_size(0), meanKSpace.get_size(1), CHA, meanKSpace.begin()); double grappa_reg_lamda = 0.0005; size_t kRO = 5; size_t kE1 = 4; hoNDArray< std::complex<float> > convKer; hoNDArray< std::complex<float> > kIm(RO, E1, CHA, CHA); Gadgetron::grappa2d_calib_convolution_kernel(acsSrc, acsDst, (size_t)this->acceFactorE1_[e], grappa_reg_lamda, kRO, kE1, convKer); Gadgetron::grappa2d_image_domain_kernel(convKer, RO, E1, kIm); hoNDArray< std::complex<float> > unmixC; if(hasCoilMap) { Gadgetron::grappa2d_unmixing_coeff(kIm, *coilMap, (size_t)acceFactorE1_[e], unmixC, gFactor); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array(gFactor, debug_folder_full_path_ + "spirit_nl_2DT_gFactor"); hoNDArray<float> gfactorSorted(gFactor); std::sort(gfactorSorted.begin(), gfactorSorted.begin()+RO*E1); gfactorMedian = gFactor((RO*E1 / 2)); GDEBUG_STREAM("SPIRIT Non linear, the median gfactor is found to be : " << gfactorMedian); } if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(kIm, debug_folder_full_path_ + "spirit_nl_2DT_kIm"); hoNDArray< std::complex<float> > complexIm; // compute linear solution as the initialization if(use_random_sampling) { if (this->perform_timing.value()) timer.start("SPIRIT Non linear, perform linear spirit recon ... "); this->perform_spirit_unwrapping(kspace, kerIm, kspaceLinear); if (this->perform_timing.value()) timer.stop(); } else { if (this->perform_timing.value()) timer.start("SPIRIT Non linear, perform linear recon ... "); //size_t ref2DT_RO = ref2DT.get_size(0); //size_t ref2DT_E1 = ref2DT.get_size(1); //// mean over N //hoNDArray< std::complex<float> > meanKSpace; //Gadgetron::sum_over_dimension(ref2DT, meanKSpace, 4); //if (!debug_folder_full_path_.empty()) { gt_exporter_.export_array_complex(meanKSpace, debug_folder_full_path_ + "spirit_nl_2DT_meanKSpace"); } //hoNDArray< std::complex<float> > acsSrc(ref2DT_RO, ref2DT_E1, CHA, meanKSpace.begin()); //hoNDArray< std::complex<float> > acsDst(ref2DT_RO, ref2DT_E1, CHA, meanKSpace.begin()); //double grappa_reg_lamda = 0.0005; //size_t kRO = 5; //size_t kE1 = 4; //hoNDArray< std::complex<float> > convKer; //hoNDArray< std::complex<float> > kIm(RO, E1, CHA, CHA); //Gadgetron::grappa2d_calib_convolution_kernel(acsSrc, acsDst, (size_t)this->acceFactorE1_[e], grappa_reg_lamda, kRO, kE1, convKer); //Gadgetron::grappa2d_image_domain_kernel(convKer, RO, E1, kIm); //if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(kIm, debug_folder_full_path_ + "spirit_nl_2DT_kIm"); Gadgetron::hoNDFFT<float>::instance()->ifft2c(kspace, complex_im_recon_buf_); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(complex_im_recon_buf_, debug_folder_full_path_ + "spirit_nl_2DT_aliasedImage"); hoNDArray< std::complex<float> > resKSpace(RO, E1, CHA, N); hoNDArray< std::complex<float> > aliasedImage(RO, E1, CHA, N, complex_im_recon_buf_.begin()); Gadgetron::grappa2d_image_domain_unwrapping_aliased_image(aliasedImage, kIm, resKSpace); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(resKSpace, debug_folder_full_path_ + "spirit_nl_2DT_linearImage"); Gadgetron::hoNDFFT<float>::instance()->fft2c(resKSpace); memcpy(kspaceLinear.begin(), resKSpace.begin(), resKSpace.get_number_of_bytes()); Gadgetron::apply_unmix_coeff_aliased_image(aliasedImage, unmixC, complexIm); if (this->perform_timing.value()) timer.stop(); } if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(kspaceLinear, debug_folder_full_path_ + "spirit_nl_2DT_kspaceLinear"); if(hasCoilMap) { if(N>=spirit_reg_minimal_num_images_for_noise_floor.value()) { // estimate the noise level if(use_random_sampling) { Gadgetron::hoNDFFT<float>::instance()->ifft2c(kspaceLinear, complex_im_recon_buf_); hoNDArray< std::complex<float> > complexLinearImage(RO, E1, CHA, N, complex_im_recon_buf_.begin()); Gadgetron::coil_combine(complexLinearImage, *coilMap, 2, complexIm); } if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(complexIm, debug_folder_full_path_ + "spirit_nl_2DT_linearImage_complexIm"); // if N is sufficiently large, we can estimate the noise floor by the smallest eigen value hoMatrix< std::complex<float> > data; data.createMatrix(RO*E1, N, complexIm.begin(), false); hoNDArray< std::complex<float> > eigenVectors, eigenValues, eigenVectorsPruned; // compute eigen hoNDKLT< std::complex<float> > klt; klt.prepare(data, (size_t)1, (size_t)0); klt.eigen_value(eigenValues); if (this->verbose.value()) { GDEBUG_STREAM("SPIRIT Non linear, computes eigen values for all 2D kspaces ... "); eigenValues.print(std::cout); for (size_t i = 0; i<eigenValues.get_size(0); i++) { GDEBUG_STREAM(i << " = " << eigenValues(i)); } } smallest_eigen_value = std::sqrt( std::abs(eigenValues(N - 1).real()) / (RO*E1) ); GDEBUG_STREAM("SPIRIT Non linear, the smallest eigen value is : " << smallest_eigen_value); } } // perform nonlinear reconstruction { boost::shared_ptr<hoNDArray< std::complex<float> > > ker(new hoNDArray< std::complex<float> >(RO, E1, CHA, CHA, ref_N, kerIm.begin())); boost::shared_ptr<hoNDArray< std::complex<float> > > acq(new hoNDArray< std::complex<float> >(RO, E1, CHA, N, kspace.begin())); hoNDArray< std::complex<float> > kspaceInitial(RO, E1, CHA, N, kspaceLinear.begin()); hoNDArray< std::complex<float> > res2DT(RO, E1, CHA, N, res.begin()); if (this->spirit_data_fidelity_lamda.value() > 0) { GDEBUG_STREAM("Start the NL SPIRIT data fidelity iteration - regularization strength : " << this->spirit_image_reg_lamda.value() << " - number of iteration : " << this->spirit_nl_iter_max.value() << " - proximity across cha : " << this->spirit_reg_proximity_across_cha.value() << " - redundant dimension weighting ratio : " << this->spirit_reg_N_weighting_ratio.value() << " - using coil sen map : " << this->spirit_reg_use_coil_sen_map.value() << " - iter thres : " << this->spirit_nl_iter_thres.value() << " - wavelet name : " << this->spirit_reg_name.value() ); typedef hoGdSolver< hoNDArray< std::complex<float> >, hoWavelet2DTOperator< std::complex<float> > > SolverType; SolverType solver; solver.iterations_ = this->spirit_nl_iter_max.value(); solver.set_output_mode(this->spirit_print_iter.value() ? SolverType::OUTPUT_VERBOSE : SolverType::OUTPUT_SILENT); solver.grad_thres_ = this->spirit_nl_iter_thres.value(); if(spirit_reg_estimate_noise_floor.value() && std::abs(smallest_eigen_value)>0) { solver.scale_factor_ = smallest_eigen_value; solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value() * gfactorMedian; GDEBUG_STREAM("SPIRIT Non linear, eigen value is used to derive the regularization strength : " << solver.proximal_strength_ratio_ << " - smallest eigen value : " << solver.scale_factor_); } else { solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value(); } boost::shared_ptr< hoNDArray< std::complex<float> > > x0 = boost::make_shared< hoNDArray< std::complex<float> > >(kspaceInitial); solver.set_x0(x0); // parallel imaging term std::vector<size_t> dims; acq->get_dimensions(dims); hoSPIRIT2DTDataFidelityOperator< std::complex<float> > spirit(&dims); spirit.set_forward_kernel(*ker, false); spirit.set_acquired_points(*acq); // image reg term hoWavelet2DTOperator< std::complex<float> > wav3DOperator(&dims); wav3DOperator.set_acquired_points(*acq); wav3DOperator.scale_factor_first_dimension_ = this->spirit_reg_RO_weighting_ratio.value(); wav3DOperator.scale_factor_second_dimension_ = this->spirit_reg_E1_weighting_ratio.value(); wav3DOperator.scale_factor_third_dimension_ = this->spirit_reg_N_weighting_ratio.value(); wav3DOperator.with_approx_coeff_ = !this->spirit_reg_keep_approx_coeff.value(); wav3DOperator.change_coeffcients_third_dimension_boundary_ = !this->spirit_reg_keep_redundant_dimension_coeff.value(); wav3DOperator.proximity_across_cha_ = this->spirit_reg_proximity_across_cha.value(); wav3DOperator.no_null_space_ = true; wav3DOperator.input_in_kspace_ = true; wav3DOperator.select_wavelet(this->spirit_reg_name.value()); if (this->spirit_reg_use_coil_sen_map.value() && hasCoilMap) { wav3DOperator.coil_map_ = *coilMap; } // set operators solver.oper_system_ = &spirit; solver.oper_reg_ = &wav3DOperator; if (this->perform_timing.value()) timer.start("NonLinear SPIRIT solver for 2DT with data fidelity ... "); solver.solve(*acq, res2DT); if (this->perform_timing.value()) timer.stop(); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_data_fidelity_res"); } else { GDEBUG_STREAM("Start the NL SPIRIT iteration with regularization strength : "<< this->spirit_image_reg_lamda.value() << " - number of iteration : " << this->spirit_nl_iter_max.value() << " - proximity across cha : " << this->spirit_reg_proximity_across_cha.value() << " - redundant dimension weighting ratio : " << this->spirit_reg_N_weighting_ratio.value() << " - using coil sen map : " << this->spirit_reg_use_coil_sen_map.value() << " - iter thres : " << this->spirit_nl_iter_thres.value() << " - wavelet name : " << this->spirit_reg_name.value() ); typedef hoGdSolver< hoNDArray< std::complex<float> >, hoWavelet2DTOperator< std::complex<float> > > SolverType; SolverType solver; solver.iterations_ = this->spirit_nl_iter_max.value(); solver.set_output_mode(this->spirit_print_iter.value() ? SolverType::OUTPUT_VERBOSE : SolverType::OUTPUT_SILENT); solver.grad_thres_ = this->spirit_nl_iter_thres.value(); if(spirit_reg_estimate_noise_floor.value() && std::abs(smallest_eigen_value)>0) { solver.scale_factor_ = smallest_eigen_value; solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value() * gfactorMedian; GDEBUG_STREAM("SPIRIT Non linear, eigen value is used to derive the regularization strength : " << solver.proximal_strength_ratio_ << " - smallest eigen value : " << solver.scale_factor_); } else { solver.proximal_strength_ratio_ = this->spirit_image_reg_lamda.value(); } boost::shared_ptr< hoNDArray< std::complex<float> > > x0 = boost::make_shared< hoNDArray< std::complex<float> > >(kspaceInitial); solver.set_x0(x0); // parallel imaging term std::vector<size_t> dims; acq->get_dimensions(dims); hoSPIRIT2DTOperator< std::complex<float> > spirit(&dims); spirit.set_forward_kernel(*ker, false); spirit.set_acquired_points(*acq); spirit.no_null_space_ = true; spirit.use_non_centered_fft_ = false; // image reg term std::vector<size_t> dim; acq->get_dimensions(dim); hoWavelet2DTOperator< std::complex<float> > wav3DOperator(&dim); wav3DOperator.set_acquired_points(*acq); wav3DOperator.scale_factor_first_dimension_ = this->spirit_reg_RO_weighting_ratio.value(); wav3DOperator.scale_factor_second_dimension_ = this->spirit_reg_E1_weighting_ratio.value(); wav3DOperator.scale_factor_third_dimension_ = this->spirit_reg_N_weighting_ratio.value(); wav3DOperator.with_approx_coeff_ = !this->spirit_reg_keep_approx_coeff.value(); wav3DOperator.change_coeffcients_third_dimension_boundary_ = !this->spirit_reg_keep_redundant_dimension_coeff.value(); wav3DOperator.proximity_across_cha_ = this->spirit_reg_proximity_across_cha.value(); wav3DOperator.no_null_space_ = true; wav3DOperator.input_in_kspace_ = true; wav3DOperator.select_wavelet(this->spirit_reg_name.value()); if (this->spirit_reg_use_coil_sen_map.value() && hasCoilMap) { wav3DOperator.coil_map_ = *coilMap; } // set operators solver.oper_system_ = &spirit; solver.oper_reg_ = &wav3DOperator; // set call back solverCallBack cb; cb.solver_ = &solver; solver.call_back_ = &cb; hoNDArray< std::complex<float> > b(kspaceInitial); Gadgetron::clear(b); if (this->perform_timing.value()) timer.start("NonLinear SPIRIT solver for 2DT ... "); solver.solve(b, res2DT); if (this->perform_timing.value()) timer.stop(); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_res"); spirit.restore_acquired_kspace(kspace, res2DT); if (!debug_folder_full_path_.empty()) gt_exporter_.export_array_complex(res2DT, debug_folder_full_path_ + "spirit_nl_2DT_res_restored"); } } } catch (...) { GADGET_THROW("Errors happened in GenericReconCartesianNonLinearSpirit2DTGadget::perform_nonlinear_spirit_unwrapping(...) ... "); } }
int DistributeGadget::process_config(ACE_Message_Block* m) { started_nodes_ = 0; node_parameters_ = std::string(m->rd_ptr()); //Grab the original XML conifguration std::string xml = controller_->get_xml_configuration(); GadgetronXML::GadgetStreamConfiguration cfg; GadgetronXML::deserialize(xml.c_str(),cfg); //Delete Gadgets up to this Gadget std::vector<GadgetronXML::Gadget>::iterator it = cfg.gadget.begin(); while ((it->name != std::string(this->module()->name())) && (it != cfg.gadget.end())) it++; it++; cfg.gadget.erase(cfg.gadget.begin(),it); //Delete Gadgets after collector it = cfg.gadget.begin(); while ((it->name != collector.value()) && (it != cfg.gadget.end())) it++; it++; cfg.gadget.erase(it,cfg.gadget.end()); std::stringstream o; GadgetronXML::serialize(cfg,o); node_xml_config_ = o.str(); Gadget* tmp = this; while (tmp->next()) { if (std::string(tmp->module()->name()) == collector.value()) break; tmp = dynamic_cast<Gadget*>(tmp->next()); } collect_gadget_ = tmp; if (!collect_gadget_) { GERROR("Failed to locate collector Gadget with name %s\n", collector.value().c_str()); return GADGET_FAIL; } else { collect_gadget_->set_parameter("pass_through_mode","true"); } // get current node ip addresses ACE_INET_Addr* the_addr_array = NULL; size_t num_of_ip = 0; int rc = ACE::get_ip_interfaces (num_of_ip, the_addr_array); if (rc != 0) { GERROR_STREAM("Retreive local ip addresses failed ... "); num_of_ip = 0; } if (the_addr_array!=NULL ) delete [] the_addr_array; for (size_t ii=0; ii<num_of_ip; ii++) { std::string ip = std::string(the_addr_array[ii].get_host_addr()); local_address_.push_back(ip); GDEBUG_STREAM("--> Local address : " << ip); } return GADGET_OK; }