int ImageWriterGadget<T> :: process( GadgetContainerMessage< ISMRMRD::ImageHeader>* m1, GadgetContainerMessage< hoNDArray< T > >* m2) { GDEBUG("Writing image\n"); char filename[1024]; switch (sizeof(T)) { case (8): //Complex float sprintf(filename, "out_%05d.cplx", (int)this->calls_); break; case (4): //Real floats sprintf(filename, "out_%05d.real", (int)this->calls_); break; case (2): //Unsigned short sprintf(filename, "out_%05d.short", (int)this->calls_); break; default: sprintf(filename, "out_%05d.cplx", (int)this->calls_); break; } std::ofstream outfile; outfile.open (filename, std::ios::out|std::ios::binary); int ndim = m2->getObjectPtr()->get_number_of_dimensions(); int* dims = new int[ndim]; size_t elements = 1; for (int d = 0; d < ndim; d++) { dims[d] = m2->getObjectPtr()->get_size(d); elements *= dims[d]; } outfile.write((char*)&ndim,sizeof(int)); outfile.write((char*)dims,sizeof(int)*ndim); outfile.write((char*)m2->getObjectPtr()->get_data_ptr(),sizeof(T)*elements); outfile.close(); delete [] dims; this->calls_++; return this->next()->putq(m1); }
PyObject* pydrizzle_connect(PyObject *obj, PyObject *args, PyObject *kwargs) { ConnectionObject *con; con = (ConnectionObject *)PyObject_NEW(ConnectionObject, &ConnectionObjectType); if (con == NULL) { return NULL; } GDEBUG("alloc ConnectionObject %p", con); if (ConnectionObject_init(con, args, kwargs) < 0) { if (con != NULL) { Py_DECREF(con); } return NULL; } return (PyObject*)con; }
int GenericReconCartesianGrappaGadget::process_config(ACE_Message_Block *mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); recon_obj_.resize(NE); return GADGET_OK; }
static gboolean test_inotify_move_init( TestInotify* test) { TestInotifyMove* move = G_CAST(test, TestInotifyMove, test); char* src; move->fname = "test"; src = g_strconcat(test->dir1, G_DIR_SEPARATOR_S, move->fname, NULL); move->dest = g_strconcat(test->dir2, G_DIR_SEPARATOR_S, move->fname, NULL); move->id1 = gutil_inotify_watch_add_handler(test->watch1, test_inotify_move_from, move); move->id2 = gutil_inotify_watch_add_handler(test->watch2, test_inotify_move_to, move); g_file_set_contents(src, "contents", -1, NULL); GDEBUG("%s -> %s", src, move->dest); rename(src, move->dest); g_free(src); return TRUE; }
static int SemaphoreObject_init(SemaphoreObject *self, PyObject *args, PyObject *kwargs) { int value = 1; GDEBUG("self:%p", self); static char *keywords[] = {"value", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:__init__", keywords, &value)) { return -1; } self->counter = value; Py_CLEAR(self->waiters); self->waiters = PySet_New(NULL); if (self->waiters == NULL) { return -1; } DEBUG("self:%p counter:%d waiters:%p", self, self->counter, self->waiters); return 1; }
bool NoiseAdjustGadget::loadNoiseCovariance() { std::ifstream infile; infile.open (full_name_stored_noise_dependency_.c_str(), std::ios::in|std::ios::binary); if (infile.good()) { //Read the XML header of the noise scan uint32_t xml_length; infile.read( reinterpret_cast<char*>(&xml_length), 4); std::string xml_str(xml_length,'\0'); infile.read(const_cast<char*>(xml_str.c_str()), xml_length); ISMRMRD::deserialize(xml_str.c_str(), noise_ismrmrd_header_); infile.read( reinterpret_cast<char*>(&noise_dwell_time_us_), sizeof(float)); size_t len; infile.read( reinterpret_cast<char*>(&len), sizeof(size_t)); char* buf = new char[len]; if ( buf == NULL ) return false; infile.read(buf, len); if ( !noise_covariance_matrixf_.deserialize(buf, len) ) { delete [] buf; return false; } delete [] buf; infile.close(); } else { GDEBUG("Noise prewhitener file is not found. Proceeding without stored noise\n"); return false; } return true; }
TimerObject* TimerObject_new(long seconds, PyObject *callback, PyObject *args, PyObject *kwargs) { TimerObject *self; time_t now; //self = PyObject_NEW(TimerObject, &TimerObjectType); self = PyObject_GC_New(TimerObject, &TimerObjectType); if(self == NULL){ return NULL; } //DEBUG("args seconds:%ld callback:%p args:%p kwargs:%p", seconds, callback, args, kwargs); if(seconds > 0){ now = time(NULL); self->seconds = now + seconds; }else{ self->seconds = 0; } Py_XINCREF(callback); Py_XINCREF(args); Py_XINCREF(kwargs); self->callback = callback; if(args != NULL){ self->args = args; }else{ PyObject *temp = PyTuple_New(0); self->args = temp; } self->kwargs = kwargs; self->called = 0; PyObject_GC_Track(self); GDEBUG("self:%p", self); return self; }
int AcquisitionAccumulateTriggerGadget ::process(GadgetContainerMessage<ISMRMRD::AcquisitionHeader>* m1, GadgetContainerMessage< hoNDArray< std::complex<float> > >* m2) { //Ignore noise scans if (m1->getObjectPtr()->isFlagSet(ISMRMRD::ISMRMRD_ACQ_IS_NOISE_MEASUREMENT)) { m1->release(); return GADGET_OK; } //It is enough to put the first one, since they are linked unsigned short sorting_index = 0; switch (sort_) { case KSPACE_ENCODE_STEP_1: sorting_index = m1->getObjectPtr()->idx.kspace_encode_step_1; break; case KSPACE_ENCODE_STEP_2: sorting_index = m1->getObjectPtr()->idx.kspace_encode_step_2; break; case AVERAGE: sorting_index = m1->getObjectPtr()->idx.average; break; case SLICE: sorting_index = m1->getObjectPtr()->idx.slice; break; case CONTRAST: sorting_index = m1->getObjectPtr()->idx.contrast; break; case PHASE: sorting_index = m1->getObjectPtr()->idx.phase; break; case REPETITION: sorting_index = m1->getObjectPtr()->idx.repetition; break; case SET: sorting_index = m1->getObjectPtr()->idx.set; break; case SEGMENT: sorting_index = m1->getObjectPtr()->idx.segment; break; case USER_0: sorting_index = m1->getObjectPtr()->idx.user[0]; break; case USER_1: sorting_index = m1->getObjectPtr()->idx.user[1]; break; case USER_2: sorting_index = m1->getObjectPtr()->idx.user[2]; break; case USER_3: sorting_index = m1->getObjectPtr()->idx.user[3]; break; case USER_4: sorting_index = m1->getObjectPtr()->idx.user[4]; break; case USER_5: sorting_index = m1->getObjectPtr()->idx.user[5]; break; case USER_6: sorting_index = m1->getObjectPtr()->idx.user[6]; break; case USER_7: sorting_index = m1->getObjectPtr()->idx.user[7]; break; case NONE: sorting_index = 0; break; default: GDEBUG("Unknown sorting condition %d\n", sort_); m1->release(); return GADGET_FAIL; } //Create the data structure that will go in the bucket IsmrmrdAcquisitionData d(m1,m2,AsContainerMessage< hoNDArray<float> >(m2->cont())); //Now let's figure out if a trigger condition has occurred. if (prev_.head_) { //Make sure this is not the first acquisition we are receiving switch (trigger_) { case KSPACE_ENCODE_STEP_1: if (prev_.head_->getObjectPtr()->idx.kspace_encode_step_1 != d.head_->getObjectPtr()->idx.kspace_encode_step_1) { trigger(); } break; case KSPACE_ENCODE_STEP_2: if (prev_.head_->getObjectPtr()->idx.kspace_encode_step_2 != d.head_->getObjectPtr()->idx.kspace_encode_step_2) { trigger(); } break; case AVERAGE: if (prev_.head_->getObjectPtr()->idx.average != d.head_->getObjectPtr()->idx.average) { trigger(); } break; case SLICE: if (prev_.head_->getObjectPtr()->idx.slice != d.head_->getObjectPtr()->idx.slice) { trigger(); } break; case CONTRAST: if (prev_.head_->getObjectPtr()->idx.contrast != d.head_->getObjectPtr()->idx.contrast) { trigger(); } break; case PHASE: if (prev_.head_->getObjectPtr()->idx.phase != d.head_->getObjectPtr()->idx.phase) { trigger(); } break; case REPETITION: if (prev_.head_->getObjectPtr()->idx.repetition != d.head_->getObjectPtr()->idx.repetition) { trigger(); } break; case SET: if (prev_.head_->getObjectPtr()->idx.set != d.head_->getObjectPtr()->idx.set) { trigger(); } break; case SEGMENT: if (prev_.head_->getObjectPtr()->idx.segment != d.head_->getObjectPtr()->idx.segment) { trigger(); } break; case USER_0: if (prev_.head_->getObjectPtr()->idx.user[0] != d.head_->getObjectPtr()->idx.user[0]) { trigger(); } break; case USER_1: if (prev_.head_->getObjectPtr()->idx.user[1] != d.head_->getObjectPtr()->idx.user[1]) { trigger(); } break; case USER_2: if (prev_.head_->getObjectPtr()->idx.user[2] != d.head_->getObjectPtr()->idx.user[2]) { trigger(); } break; case USER_3: if (prev_.head_->getObjectPtr()->idx.user[3] != d.head_->getObjectPtr()->idx.user[3]) { trigger(); } break; case USER_4: if (prev_.head_->getObjectPtr()->idx.user[4] != d.head_->getObjectPtr()->idx.user[4]) { trigger(); } break; case USER_5: if (prev_.head_->getObjectPtr()->idx.user[5] != d.head_->getObjectPtr()->idx.user[5]) { trigger(); } break; case USER_6: if (prev_.head_->getObjectPtr()->idx.user[6] != d.head_->getObjectPtr()->idx.user[6]) { trigger(); } break; case USER_7: if (prev_.head_->getObjectPtr()->idx.user[7] != d.head_->getObjectPtr()->idx.user[7]) { trigger(); } break; case NONE: break; default: GDEBUG("Unknown trigger condition %d\n", trigger_); return GADGET_FAIL; } } //Now we can update the previous data item that we store for //purposes of determining if trigger condition has occurred. prev_ = d; //Find the bucket the data should go in map_type_::iterator it = buckets_.find(sorting_index); if (it == buckets_.end()) { //Bucket does not exist, create it buckets_[sorting_index] = new GadgetContainerMessage<IsmrmrdAcquisitionBucket>; } IsmrmrdAcquisitionBucket* bucket = buckets_[sorting_index]->getObjectPtr(); uint16_t espace = m1->getObjectPtr()->encoding_space_ref; if (use_calib_as_data.value() ||!ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_PARALLEL_CALIBRATION).isSet(m1->getObjectPtr()->flags)) { bucket->data_.push_back(d); if (bucket->datastats_.size() < (espace+1)) { bucket->datastats_.resize(espace+1); } bucket->datastats_[espace].kspace_encode_step_1.insert(m1->getObjectPtr()->idx.kspace_encode_step_1); bucket->datastats_[espace].kspace_encode_step_2.insert(m1->getObjectPtr()->idx.kspace_encode_step_2); bucket->datastats_[espace].slice.insert(m1->getObjectPtr()->idx.slice); bucket->datastats_[espace].phase.insert(m1->getObjectPtr()->idx.phase); bucket->datastats_[espace].contrast.insert(m1->getObjectPtr()->idx.contrast); bucket->datastats_[espace].set.insert(m1->getObjectPtr()->idx.set); bucket->datastats_[espace].segment.insert(m1->getObjectPtr()->idx.segment); bucket->datastats_[espace].average.insert(m1->getObjectPtr()->idx.average); bucket->datastats_[espace].repetition.insert(m1->getObjectPtr()->idx.repetition); } if ( ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_PARALLEL_CALIBRATION).isSet(m1->getObjectPtr()->flags) || ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_PARALLEL_CALIBRATION_AND_IMAGING).isSet(m1->getObjectPtr()->flags) ) { bucket->ref_.push_back(d); if (bucket->refstats_.size() < (espace+1)) { bucket->refstats_.resize(espace+1); } bucket->refstats_[espace].kspace_encode_step_1.insert(m1->getObjectPtr()->idx.kspace_encode_step_1); bucket->refstats_[espace].kspace_encode_step_2.insert(m1->getObjectPtr()->idx.kspace_encode_step_2); bucket->refstats_[espace].slice.insert(m1->getObjectPtr()->idx.slice); bucket->refstats_[espace].phase.insert(m1->getObjectPtr()->idx.phase); bucket->refstats_[espace].contrast.insert(m1->getObjectPtr()->idx.contrast); bucket->refstats_[espace].set.insert(m1->getObjectPtr()->idx.set); bucket->refstats_[espace].segment.insert(m1->getObjectPtr()->idx.segment); bucket->refstats_[espace].average.insert(m1->getObjectPtr()->idx.average); bucket->refstats_[espace].repetition.insert(m1->getObjectPtr()->idx.repetition); } //We can release the data now. It is reference counted and counter have been incremented through operations above. m1->release(); //TODO: // At this point it would make sense to check the data flags for trigger conditions. return GADGET_OK; }
void MainWindow::handleGeneratePDFs() { QString savedir (QDir(Global::getPath()).canonicalPath()); GDEBUG("Writing out PDFs to [%s]", qPrintable(savedir)); Global::generatePDFs(savedir); }
int gpuBufferSensePrepGadget::process( GadgetContainerMessage<IsmrmrdReconData>* m1) { IsmrmrdReconData* recondata= m1->getObjectPtr(); if (recondata->rbit_.size() != 1){ throw std::runtime_error("gpuBufferSensePrepGadget only support a single encoding space"); } IsmrmrdReconBit& reconbit = recondata->rbit_[0]; GenericReconJob job; IsmrmrdDataBuffered* buffer = &reconbit.data_; //Use reference data if available. if (reconbit.ref_){ GDEBUG("Using Reference data for CSM estimation\n"); buffer = reconbit.ref_.get_ptr(); } size_t ncoils = buffer->headers_[0].active_channels; std::vector<size_t> new_order = {0,1,2,4,5,6,3}; boost::shared_ptr<cuNDArray<float>> dcw; boost::shared_ptr<cuNDArray<floatd2>> traj; if (buffer->trajectory_){ auto & trajectory = *buffer->trajectory_; if (buffer->headers_[0].trajectory_dimensions == 3){ auto traj_dcw = separate_traj_and_dcw(&trajectory); dcw = boost::make_shared<cuNDArray<float>>(std::get<1>(traj_dcw).get()); traj = boost::make_shared<cuNDArray<floatd2>>(std::get<0>(traj_dcw).get()); } else if (buffer->headers_[0].trajectory_dimensions == 2){ auto old_traj_dims = *trajectory.get_dimensions(); std::vector<size_t> traj_dims (old_traj_dims.begin()+1,old_traj_dims.end()); //Remove first element hoNDArray<floatd2> tmp_traj(traj_dims,(floatd2*)trajectory.get_data_ptr()); traj = boost::make_shared<cuNDArray<floatd2>>(tmp_traj); } else { throw std::runtime_error("Unsupported number of trajectory dimensions"); } } { auto tmpdim = *buffer->data_.get_dimensions(); for (auto dim : tmpdim) std::cout << dim << " "; std::cout << std::endl; auto permuted = permute((hoNDArray<float_complext>*)&buffer->data_,&new_order); cuNDArray<float_complext> data(*permuted); if (dcw){ float scale_factor = float(prod(image_dims_recon_os_))/asum(dcw.get()); *dcw *= scale_factor; } auto reg_images = reconstruct_regularization(&data,traj.get(),dcw.get(),ncoils); //reg_images->squeeze(); auto csm = estimate_b1_map<float,2>(reg_images.get()); *reg_images *= *csm; auto combined = sum(reg_images.get(),reg_images->get_number_of_dimensions()-1); auto tmp_combined = abs(reg_images.get()); auto tmpcsm = abs(csm.get()); job.csm_host_ = csm->to_host(); job.reg_host_ = combined->to_host(); } IsmrmrdDataBuffered* mainbuffer = &reconbit.data_; //Permute as Sensegadgets expect last dimension to be coils. *Sigh* job.dat_host_ =permute((hoNDArray<float_complext>*)&mainbuffer->data_,&new_order); if (mainbuffer->trajectory_){ auto & trajectory = *mainbuffer->trajectory_; if (mainbuffer->headers_[0].trajectory_dimensions >2 ){ auto traj_dcw = separate_traj_and_dcw(&trajectory); job.tra_host_ = std::get<0>(traj_dcw); job.dcw_host_ = std::get<1>(traj_dcw); } else if (mainbuffer->headers_[0].trajectory_dimensions == 2){ auto old_traj_dims = *trajectory.get_dimensions(); std::vector<size_t> traj_dims (old_traj_dims.begin()+1,old_traj_dims.end()); //Remove first element hoNDArray<floatd2> tmp_traj(traj_dims,(floatd2*)trajectory.get_data_ptr()); job.tra_host_ = boost::make_shared<hoNDArray<floatd2>>(tmp_traj); auto host_dcw = boost::make_shared<hoNDArray<float>>(traj_dims); fill(host_dcw.get(),1.0f); job.dcw_host_ = host_dcw; } else { throw std::runtime_error("Unsupported number of trajectory dimensions"); } } { float scale_factor = float(prod(image_dims_recon_os_))/asum(job.dcw_host_.get()); *job.dcw_host_ *= scale_factor; } auto data_dims = *job.dat_host_->get_dimensions(); //Sense gadgets expect only 1 dimension for encoding, so collapse the first size_t elements = std::accumulate(data_dims.begin(),data_dims.end()-1,1,std::multiplies<size_t>()); std::vector<size_t> new_data_dims = {elements,data_dims.back()}; job.dat_host_->reshape(&new_data_dims); size_t traj_elements = job.tra_host_->get_number_of_elements(); auto traj_dims = *job.tra_host_->get_dimensions(); size_t kpoints_per_frame = traj_dims[0]*profiles_per_frame_; if (traj_elements%kpoints_per_frame){ std::stringstream ss; ss << "Profiles per frame (" << profiles_per_frame_ << ") must be a divisor of total number of profiles (" << traj_elements/traj_dims[0] << ")"; throw std::runtime_error(ss.str()); } std::vector<size_t> new_traj_dims ={kpoints_per_frame,traj_elements/kpoints_per_frame}; job.tra_host_->reshape(&new_traj_dims); job.dcw_host_->reshape(&new_traj_dims); //Let's invent some image headers! size_t total_frames = profiles_per_frame_ > 0 ? mainbuffer->headers_.get_number_of_elements()/profiles_per_frame_ : 1 ; job.image_headers_ = boost::shared_array<ISMRMRD::ImageHeader>(new ISMRMRD::ImageHeader[total_frames]); for (size_t i = 0; i < total_frames; i++){ job.image_headers_[i] = create_image_header(mainbuffer->headers_[i*profiles_per_frame_],mainbuffer->sampling_,i,total_frames); } m1->release(); //We be done with everything now. auto header_message = new GadgetContainerMessage<ISMRMRD::ImageHeader>(job.image_headers_[0]); auto job_message = new GadgetContainerMessage<GenericReconJob>(job); header_message->cont(job_message); if (!this->next()->putq(header_message)){ GDEBUG("Failed to put message on que"); return GADGET_FAIL; } else return GADGET_OK; //cuNDArray<float_complext> reg_images = reconstruct_regularization(reconbit.data_); }
int gpuCSICoilEstimationGadget::process( GadgetContainerMessage<IsmrmrdAcquisitionBucket>* m1) { IsmrmrdAcquisitionBucket* bucket = m1->getObjectPtr(); auto cm1 = new GadgetContainerMessage<cuSenseData>(); auto senseData = cm1->getObjectPtr(); coils = bucket->data_.front().head_->getObjectPtr()->active_channels; GDEBUG("Active channels %i \n",coils); { hoNDArray<std::complex<float>> * ho_data; hoNDArray<float>* ho_traj; std::tie(ho_data,ho_traj) = combine_data(bucket->data_); if (skip_lines_ > 0){ auto cal_dims = *ho_data->get_dimensions(); cal_dims.back() = skip_lines_; auto data_dims = *ho_data->get_dimensions(); data_dims.back() -= skip_lines_; hoNDArray<float_complext> cal_view(cal_dims,(float_complext*) ho_data->get_data_ptr()); senseData->freq_calibration = boost::make_shared<cuNDArray<float_complext>>(cal_view); senseData->freq_calibration->squeeze(); hoNDArray<float_complext> data_view(data_dims,(float_complext*)ho_data->get_data_ptr()+cal_view.get_number_of_elements()); senseData->data = boost::make_shared<cuNDArray<float_complext>>(data_view); } else { senseData->data = boost::make_shared<cuNDArray<float_complext>>(reinterpret_cast<hoNDArray<float_complext>*>(ho_data)); } if (ho_traj->get_size(0) > 2){ //We have dcw auto traj_dcw = separate_traj_and_dcw(ho_traj); senseData->traj = boost::make_shared<cuNDArray<floatd2>>(*std::get<0>(traj_dcw)); senseData->dcw = boost::make_shared<cuNDArray<float>>(*std::get<1>(traj_dcw)); } else { std::vector<size_t> tdims = *ho_traj->get_dimensions(); std::vector<size_t> tmp_dim(tdims.begin()+1,tdims.end()); hoNDArray<floatd2> tmp(tmp_dim,reinterpret_cast<floatd2*>(ho_traj->get_data_ptr())); senseData->traj = boost::make_shared<cuNDArray<floatd2>>(tmp); } delete ho_data; delete ho_traj; } //Remove Initial Spirals boost::shared_ptr< cuNDArray<float_complext> > ref_data; boost::shared_ptr< cuNDArray<floatd2> > ref_traj; boost::shared_ptr<cuNDArray<float> > ref_dcw; if (bucket->ref_.empty()){ ref_data = senseData->data; ref_traj = senseData->traj; ref_dcw = senseData->dcw; } else { hoNDArray<std::complex<float>> * ho_data; hoNDArray<float>* ho_traj; std::tie(ho_data,ho_traj) = combine_data(bucket->ref_); ref_data = boost::make_shared<cuNDArray<float_complext>>(reinterpret_cast<hoNDArray<float_complext>*>(ho_data)); if (ho_traj->get_size(0) > 2){ auto traj_dcw = separate_traj_and_dcw(ho_traj); ref_traj =boost::make_shared<cuNDArray<floatd2>>(*std::get<0>(traj_dcw)); ref_dcw = boost::make_shared<cuNDArray<float>>(*std::get<1>(traj_dcw)); } else { std::vector<size_t> tdims = *ho_traj->get_dimensions(); std::vector<size_t> tmp_dim(tdims.begin()+1,tdims.end()); hoNDArray<floatd2> tmp(tmp_dim,reinterpret_cast<floatd2*>(ho_traj->get_data_ptr())); ref_traj = boost::make_shared<cuNDArray<floatd2>>(tmp); } delete ho_data; delete ho_traj; } senseData->csm = calculate_CSM(ref_data.get(),ref_traj.get(),ref_dcw.get()); if (this->next()->putq(cm1) == GADGET_FAIL){ GERROR("Failed to put message on que\n"); return GADGET_FAIL; } return GADGET_OK; }
void Global::generatePDFs(QString dirname) { size_t pageCount = 0; for (size_t s = 0; s < getNumStudents(); s++) { Student& student = db()->getStudent(s); // Use the student name to form the file name for the repot QString clean = student.getStudentName(); // Convert all non alpha/num chars into an underscore for (QString::iterator i = clean.begin(); i != clean.end(); i++) { if (!i->isLetterOrNumber()) *i = '_'; } if (clean.length() == 0) { GINFODIALOG(QString("Cannot render PDF because student %1 does not have a name assigned").arg(s+1)); return; } QString pdfname (dirname + "/report-" + clean + ".pdf"); GDEBUG ("Generating PDF [%s] for student [%s]", qPrintable(pdfname), qPrintable(student.getStudentId())); QPrinter printer (QPrinter::HighResolution); printer.setOutputFormat (QPrinter::PdfFormat); printer.setOutputFileName (pdfname); printer.setPageSize(QPrinter::Letter); printer.setResolution(150); // DPI for the printing printer.setColorMode(QPrinter::GrayScale); QPainter painter; if (!painter.begin(&printer)) // Check for errors here GFATAL("Failed to do QPainter begin()"); // Can use this code to change the text color, but causes larger PDF files // since it must use a color output format instead. //QPen penColor(QColor("#000090")); // Change text to dark blue //painter.setPen(penColor); for (size_t p = 0; p < getNumPagesPerStudent(); p++) { pageCount++; // Add spaces at the end so the widget can resize into the reserved space without a re-layout Global::getStatusLabel()->setText(QString("Generating PDF for student %1 of %2, page %3 of %4 (%5 percent) "). arg(s+1).arg(getNumStudents()).arg(p+1).arg(getNumPagesPerStudent()).arg(rint(0.5+100.0*pageCount/(1.0*getNumPages())))); // Flush out Qt events so that the UI update occurs inside this handler Global::getQApplication()->processEvents(); GDEBUG ("Printing page %zu of %zu for report [%s]", p+1, getNumPagesPerStudent(), qPrintable(pdfname)); QPixmap pix = getPages()->getQPixmap(p+s*getNumPagesPerStudent()); // Scale the pixmap to fit the printer pix = pix.scaled(printer.pageRect().width(), printer.pageRect().height(), Qt::KeepAspectRatio); // Draw the pixmap to the printer painter.drawPixmap (0, 0, pix); // Print out the student details at the top of the page QString title = QString("Name: %1 ID: %2 Page: %3 of %4 Final Grade: %5 of %6").arg(student.getStudentName()).arg(student.getStudentId()).arg(p+1).arg(getNumPagesPerStudent()).arg(student.getTotal()).arg(db()->getTotalMaximum()); painter.drawText(0, 0, title); // Build up a results string to print onto the page QString grades ("Results:"); size_t pageTotal = 0; size_t pageMax = 0; for (size_t q = 0; q < getNumQuestions(); q++) { // See if the question is on this page GASSERT(Global::db()->getQuestionPage(q) != 0, "Cannot have page 0 assigned for question %zu", q); if (Global::db()->getQuestionPage(q) < 0) { GINFODIALOG(QString("Cannot render PDF because question %1 does not have a page assigned").arg(q+1)); return; } if (Global::db()->getQuestionPage(q) == ((int)p+1)) { if (student.getGrade(q) < 0) { GINFODIALOG(QString("Cannot render PDF for student [%1] because question %2 has no grade assigned").arg(student.getStudentName()).arg(q+1)); return; } pageTotal += student.getGrade(q); pageMax += Global::db()->getQuestionMaximum(q); grades += QString(" Q%1 = %2/%3").arg(q+1).arg(student.getGrade(q)).arg(Global::db()->getQuestionMaximum(q)); if (student.getFeedback(q) != "") grades += QString(" [%1]").arg(student.getFeedback(q)); } } grades += QString(" Totals = %1/%2").arg(pageTotal).arg(pageMax); if (pageMax == 0) grades = QString("No Results For This Page"); // Wrap the text to fit a bounding box that is the width of the page, align to the bottom of the page painter.drawText(0, 30, printer.pageRect().width(), printer.pageRect().height()-30, Qt::TextWordWrap | Qt::AlignBottom, grades); // Insert a new page except on the last one if (p < getNumPagesPerStudent()-1) if (!printer.newPage()) // Check for errors here GFATAL("Failed to do newPage() call"); } painter.end(); } Global::getStatusLabel()->setText(""); }
int GenericReconCartesianReferencePrepGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); ref_prepared_.resize(NE, false); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << p_imaging.accelerationFactor.kspace_encoding_step_1); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << p_imaging.accelerationFactor.kspace_encoding_step_2); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- return GADGET_OK; }
int SpiralToGenericGadget:: process(GadgetContainerMessage<ISMRMRD::AcquisitionHeader> *m1, GadgetContainerMessage< hoNDArray< std::complex<float> > > *m2) { // Noise should have been consumed by the noise adjust, but just in case... // bool is_noise = m1->getObjectPtr()->isFlagSet(ISMRMRD::ISMRMRD_ACQ_IS_NOISE_MEASUREMENT); if (is_noise) { m1->release(); return GADGET_OK; } // Delete previously attached trajectories if (m2->cont()) { m2->cont()->release(); } // Compute hoNDArray of trajectory and weights at first pass // if (!prepared_) { int nfov = 1; /* number of fov coefficients. */ int ngmax = 1e5; /* maximum number of gradient samples */ double *xgrad; /* x-component of gradient. */ double *ygrad; /* y-component of gradient. */ double *x_trajectory; double *y_trajectory; double *weighting; int ngrad; double sample_time = (1.0*Tsamp_ns_) * 1e-9; // Calculate gradients calc_vds(smax_,gmax_,sample_time,sample_time,Nints_,&fov_,nfov,krmax_,ngmax,&xgrad,&ygrad,&ngrad); samples_per_interleave_ = std::min(ngrad,static_cast<int>(m1->getObjectPtr()->number_of_samples)); GDEBUG("Using %d samples per interleave\n", samples_per_interleave_); // Calculate the trajectory and weights calc_traj(xgrad, ygrad, samples_per_interleave_, Nints_, sample_time, krmax_, &x_trajectory, &y_trajectory, &weighting); std::vector<size_t> trajectory_dimensions; trajectory_dimensions.push_back(3); trajectory_dimensions.push_back(samples_per_interleave_*Nints_); host_traj_ = boost::shared_ptr< hoNDArray<float> >(new hoNDArray<float>(&trajectory_dimensions)); { float* co_ptr = reinterpret_cast<float*>(host_traj_->get_data_ptr()); for (int i = 0; i < (samples_per_interleave_*Nints_); i++) { co_ptr[i*3+0] = -x_trajectory[i]/2; co_ptr[i*3+1] = -y_trajectory[i]/2; co_ptr[i*3+2] = weighting[i]; } } delete [] xgrad; delete [] ygrad; delete [] x_trajectory; delete [] y_trajectory; delete [] weighting; prepared_ = true; } // Adjustments based in the incoming data // if (samples_to_skip_end_ == -1) { samples_to_skip_end_ = m1->getObjectPtr()->number_of_samples-samples_per_interleave_; GDEBUG("Adjusting samples_to_skip_end_ = %d\n", samples_to_skip_end_); } // Define some utility variables // unsigned int samples_to_copy = m1->getObjectPtr()->number_of_samples-samples_to_skip_end_; unsigned int interleave = m1->getObjectPtr()->idx.kspace_encode_step_1; // Prepare for a new array continuation for the trajectory/weights of the incoming profile // std::vector<size_t> trajectory_dimensions; trajectory_dimensions.push_back(3); trajectory_dimensions.push_back(samples_per_interleave_); hoNDArray<float> *traj_source = new hoNDArray<float> (&trajectory_dimensions, host_traj_->get_data_ptr()+3*samples_per_interleave_*interleave); // Make a new array as continuation of m1, and pass along // GadgetContainerMessage< hoNDArray<float> > *cont = new GadgetContainerMessage< hoNDArray<float> >(); *(cont->getObjectPtr()) = *traj_source; m2->cont(cont); //We need to make sure that the trajectory dimensions are attached. m1->getObjectPtr()->trajectory_dimensions = 3; if (this->next()->putq(m1) < 0) { GDEBUG("Failed to put job on queue.\n"); return GADGET_FAIL; } return GADGET_OK; }
int SpiralToGenericGadget::process_config(ACE_Message_Block* mb) { // Start parsing the ISMRMRD XML header // ISMRMRD::IsmrmrdHeader h; ISMRMRD::deserialize(mb->rd_ptr(),h); if (h.encoding.size() != 1) { GDEBUG("This Gadget only supports one encoding space\n"); return GADGET_FAIL; } // Get the encoding space and trajectory description ISMRMRD::EncodingSpace e_space = h.encoding[0].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[0].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[0].encodingLimits; ISMRMRD::TrajectoryDescription traj_desc; if (h.encoding[0].trajectoryDescription) { traj_desc = *h.encoding[0].trajectoryDescription; } else { GDEBUG("Trajectory description missing"); return GADGET_FAIL; } if (traj_desc.identifier != "HargreavesVDS2000") { GDEBUG("Expected trajectory description identifier 'HargreavesVDS2000', not found."); return GADGET_FAIL; } long interleaves = -1; long fov_coefficients = -1; long sampling_time_ns = -1; double max_grad = -1.0; double max_slew = -1.0; double fov_coeff = -1.0; double kr_max = -1.0; for (std::vector<ISMRMRD::UserParameterLong>::iterator i (traj_desc.userParameterLong.begin()); i != traj_desc.userParameterLong.end(); ++i) { if (i->name == "interleaves") { interleaves = i->value; } else if (i->name == "fov_coefficients") { fov_coefficients = i->value; } else if (i->name == "SamplingTime_ns") { sampling_time_ns = i->value; } else { GDEBUG("WARNING: unused trajectory parameter %s found\n", i->name.c_str()); } } for (std::vector<ISMRMRD::UserParameterDouble>::iterator i (traj_desc.userParameterDouble.begin()); i != traj_desc.userParameterDouble.end(); ++i) { if (i->name == "MaxGradient_G_per_cm") { max_grad = i->value; } else if (i->name == "MaxSlewRate_G_per_cm_per_s") { max_slew = i->value; } else if (i->name == "FOVCoeff_1_cm") { fov_coeff = i->value; } else if (i->name == "krmax_per_cm") { kr_max= i->value; } else { GDEBUG("WARNING: unused trajectory parameter %s found\n", i->name.c_str()); } } if ((interleaves < 0) || (fov_coefficients < 0) || (sampling_time_ns < 0) || (max_grad < 0) || (max_slew < 0) || (fov_coeff < 0) || (kr_max < 0)) { GDEBUG("Appropriate parameters for calculating spiral trajectory not found in XML configuration\n"); return GADGET_FAIL; } Tsamp_ns_ = sampling_time_ns; Nints_ = interleaves; interleaves_ = static_cast<int>(Nints_); gmax_ = max_grad; smax_ = max_slew; krmax_ = kr_max; fov_ = fov_coeff; samples_to_skip_start_ = 0; //n.get<int>(std::string("samplestoskipstart.value"))[0]; samples_to_skip_end_ = -1; //n.get<int>(std::string("samplestoskipend.value"))[0]; GDEBUG("smax: %f\n", smax_); GDEBUG("gmax: %f\n", gmax_); GDEBUG("Tsamp_ns: %d\n", Tsamp_ns_); GDEBUG("Nints: %d\n", Nints_); GDEBUG("fov: %f\n", fov_); GDEBUG("krmax: %f\n", krmax_); GDEBUG("samples_to_skip_start_ : %d\n", samples_to_skip_start_); GDEBUG("samples_to_skip_end_ : %d\n", samples_to_skip_end_); return GADGET_OK; }
int MultiChannelCartesianGrappaReconGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); meas_max_idx_.resize(NE); acceFactorE1_.resize(NE, 1); acceFactorE2_.resize(NE, 1); calib_mode_.resize(NE, ISMRMRD_noacceleration); recon_obj_.resize(NE); size_t e; for (e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; GDEBUG_CONDITION_STREAM(verbose.value(), "---> Encoding space : " << e << " <---"); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding matrix size: " << e_space.matrixSize.x << " " << e_space.matrixSize.y << " " << e_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Encoding field_of_view : " << e_space.fieldOfView_mm.x << " " << e_space.fieldOfView_mm.y << " " << e_space.fieldOfView_mm.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon matrix size : " << r_space.matrixSize.x << " " << r_space.matrixSize.y << " " << r_space.matrixSize.z); GDEBUG_CONDITION_STREAM(verbose.value(), "Recon field_of_view : " << r_space.fieldOfView_mm.x << " " << r_space.fieldOfView_mm.y << " " << r_space.fieldOfView_mm.z); meas_max_idx_[e].kspace_encode_step_1 = (uint16_t)e_space.matrixSize.y - 1; meas_max_idx_[e].set = (e_limits.set && (e_limits.set->maximum > 0)) ? e_limits.set->maximum : 0; meas_max_idx_[e].phase = (e_limits.phase && (e_limits.phase->maximum > 0)) ? e_limits.phase->maximum : 0; meas_max_idx_[e].kspace_encode_step_2 = (uint16_t)e_space.matrixSize.z - 1; meas_max_idx_[e].contrast = (e_limits.contrast && (e_limits.contrast->maximum > 0)) ? e_limits.contrast->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].repetition = e_limits.repetition ? e_limits.repetition->maximum : 0; meas_max_idx_[e].slice = (e_limits.slice && (e_limits.slice->maximum > 0)) ? e_limits.slice->maximum : 0; meas_max_idx_[e].average = e_limits.average ? e_limits.average->maximum : 0; meas_max_idx_[e].segment = 0; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; acceFactorE1_[e] = 1; acceFactorE2_[e] = 1; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; acceFactorE1_[e] = p_imaging.accelerationFactor.kspace_encoding_step_1; acceFactorE2_[e] = p_imaging.accelerationFactor.kspace_encoding_step_2; GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE1 is " << acceFactorE1_[e]); GDEBUG_CONDITION_STREAM(verbose.value(), "acceFactorE2 is " << acceFactorE2_[e]); std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (acceFactorE1_[e] > 1 || acceFactorE2_[e] > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int gpuCgSpiritGadget::process(GadgetContainerMessage<ISMRMRD::ImageHeader> *m1, GadgetContainerMessage<GenericReconJob> *m2) { // Is this data for this gadget's set/slice? // if( m1->getObjectPtr()->set != set_number_ || m1->getObjectPtr()->slice != slice_number_ ) { // No, pass it downstream... return this->next()->putq(m1); } //GDEBUG("gpuCgSpiritGadget::process\n"); boost::shared_ptr<GPUTimer> process_timer; if( output_timing_ ) process_timer = boost::shared_ptr<GPUTimer>( new GPUTimer("gpuCgSpiritGadget::process()") ); if (!is_configured_) { GDEBUG("Data received before configuration was completed\n"); return GADGET_FAIL; } GenericReconJob* j = m2->getObjectPtr(); // Some basic validation of the incoming Spirit job if (!j->csm_host_.get() || !j->dat_host_.get() || !j->tra_host_.get() || !j->dcw_host_.get() || !j->reg_host_.get()) { GDEBUG("Received an incomplete Spirit job\n"); return GADGET_FAIL; } unsigned int samples = j->dat_host_->get_size(0); unsigned int channels = j->dat_host_->get_size(1); unsigned int rotations = samples / j->tra_host_->get_number_of_elements(); unsigned int frames = j->tra_host_->get_size(1)*rotations; if( samples%j->tra_host_->get_number_of_elements() ) { GDEBUG("Mismatch between number of samples (%d) and number of k-space coordinates (%d).\nThe first should be a multiplum of the latter.\n", samples, j->tra_host_->get_number_of_elements()); return GADGET_FAIL; } boost::shared_ptr< cuNDArray<floatd2> > traj(new cuNDArray<floatd2> (j->tra_host_.get())); boost::shared_ptr< cuNDArray<float> > dcw(new cuNDArray<float> (j->dcw_host_.get())); sqrt_inplace(dcw.get()); //Take square root to use for weighting boost::shared_ptr< cuNDArray<float_complext> > csm(new cuNDArray<float_complext> (j->csm_host_.get())); boost::shared_ptr< cuNDArray<float_complext> > device_samples(new cuNDArray<float_complext> (j->dat_host_.get())); cudaDeviceProp deviceProp; if( cudaGetDeviceProperties( &deviceProp, device_number_ ) != cudaSuccess) { GDEBUG( "Error: unable to query device properties.\n" ); return GADGET_FAIL; } unsigned int warp_size = deviceProp.warpSize; matrix_size_ = uint64d2( j->reg_host_->get_size(0), j->reg_host_->get_size(1) ); matrix_size_os_ = uint64d2(((static_cast<unsigned int>(std::ceil(matrix_size_[0]*oversampling_factor_))+warp_size-1)/warp_size)*warp_size, ((static_cast<unsigned int>(std::ceil(matrix_size_[1]*oversampling_factor_))+warp_size-1)/warp_size)*warp_size); if( !matrix_size_reported_ ) { GDEBUG("Matrix size : [%d,%d] \n", matrix_size_[0], matrix_size_[1]); GDEBUG("Matrix size OS : [%d,%d] \n", matrix_size_os_[0], matrix_size_os_[1]); matrix_size_reported_ = true; } std::vector<size_t> image_dims = to_std_vector(matrix_size_); image_dims.push_back(frames); image_dims.push_back(channels); GDEBUG("Number of coils: %d %d \n",channels,image_dims.size()); E_->set_domain_dimensions(&image_dims); E_->set_codomain_dimensions(device_samples->get_dimensions().get()); E_->set_dcw(dcw); E_->setup( matrix_size_, matrix_size_os_, static_cast<float>(kernel_width_) ); E_->preprocess(traj.get()); boost::shared_ptr< cuNDArray<float_complext> > csm_device( new cuNDArray<float_complext>( csm.get() )); S_->set_calibration_kernels(csm_device); S_->set_domain_dimensions(&image_dims); S_->set_codomain_dimensions(&image_dims); /* boost::shared_ptr< cuNDArray<float_complext> > reg_image(new cuNDArray<float_complext> (j->reg_host_.get())); R_->compute(reg_image.get()); // Define preconditioning weights boost::shared_ptr< cuNDArray<float> > _precon_weights = sum(abs_square(csm.get()).get(), 2); boost::shared_ptr<cuNDArray<float> > R_diag = R_->get(); *R_diag *= float(kappa_); *_precon_weights += *R_diag; R_diag.reset(); reciprocal_sqrt_inplace(_precon_weights.get()); boost::shared_ptr< cuNDArray<float_complext> > precon_weights = real_to_complex<float_complext>( _precon_weights.get() ); _precon_weights.reset(); D_->set_weights( precon_weights ); */ /*{ static int counter = 0; char filename[256]; sprintf((char*)filename, "_traj_%d.real", counter); write_nd_array<floatd2>( traj->to_host().get(), filename ); sprintf((char*)filename, "_dcw_%d.real", counter); write_nd_array<float>( dcw->to_host().get(), filename ); sprintf((char*)filename, "_csm_%d.cplx", counter); write_nd_array<float_complext>( csm->to_host().get(), filename ); sprintf((char*)filename, "_samples_%d.cplx", counter); write_nd_array<float_complext>( device_samples->to_host().get(), filename ); sprintf((char*)filename, "_reg_%d.cplx", counter); write_nd_array<float_complext>( reg_image->to_host().get(), filename ); counter++; }*/ // Invoke solver // boost::shared_ptr< cuNDArray<float_complext> > cgresult; { boost::shared_ptr<GPUTimer> solve_timer; if( output_timing_ ) solve_timer = boost::shared_ptr<GPUTimer>( new GPUTimer("gpuCgSpiritGadget::solve()") ); cgresult = cg_.solve(device_samples.get()); if( output_timing_ ) solve_timer.reset(); } if (!cgresult.get()) { GDEBUG("Iterative_spirit_compute failed\n"); return GADGET_FAIL; } /* static int counter = 0; char filename[256]; sprintf((char*)filename, "recon_%d.real", counter); write_nd_array<float>( abs(cgresult.get())->to_host().get(), filename ); counter++; */ // If the recon matrix size exceeds the sequence matrix size then crop if( matrix_size_seq_ != matrix_size_ ) cgresult = crop<float_complext,2>( (matrix_size_-matrix_size_seq_)>>1, matrix_size_seq_, cgresult.get() ); // Combine coil images // cgresult = real_to_complex<float_complext>(sqrt(sum(abs_square(cgresult.get()).get(), 3).get()).get()); // RSS //cgresult = sum(cgresult.get(), 2); // Pass on the reconstructed images // put_frames_on_que(frames,rotations,j,cgresult.get()); frame_counter_ += frames; if( output_timing_ ) process_timer.reset(); m1->release(); return GADGET_OK; }
int IsmrmrdDumpGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader ismrmrd_header; ISMRMRD::deserialize(mb->rd_ptr(), ismrmrd_header); std::string measurement_id = ""; std::string ismrmrd_filename = ""; if ( ismrmrd_header.measurementInformation ) { if ( ismrmrd_header.measurementInformation->measurementID ) { measurement_id = *ismrmrd_header.measurementInformation->measurementID; } } GDEBUG("Measurement ID: %s\n", measurement_id.c_str()); bf::path p(folder.value()); if (!exists(p)) { try { bf::create_directory(p); } catch(...) { GERROR("Error caught trying to create folder %s\n", folder.value().c_str()); return GADGET_FAIL; } } else { if (!is_directory(p)) { GERROR("Specified path is not a directory\n"); return GADGET_FAIL; } } if ( file_prefix.value().empty() ) { ismrmrd_filename = "ISMRMRD_DUMP"; } else { ismrmrd_filename = file_prefix.value(); } if (append_id.value() && measurement_id.size()) { ismrmrd_filename.append("_"); ismrmrd_filename.append(measurement_id); } //Generate filename if (append_timestamp.value()) { ismrmrd_filename.append("_"); ismrmrd_filename.append(get_date_time_string()); } ismrmrd_filename.append(".h5"); p /= ismrmrd_filename; ismrmrd_filename = p.string(); ismrmrd_dataset_ = boost::shared_ptr<ISMRMRD::Dataset>(new ISMRMRD::Dataset(ismrmrd_filename.c_str(), "dataset")); std::string xml_config(mb->rd_ptr()); try { ismrmrd_dataset_->writeHeader(xml_config); } catch (...) { GDEBUG("Failed to write XML header to HDF file\n"); return GADGET_FAIL; } return GADGET_OK; }
int AcquisitionAccumulateTriggerGadget ::process_config(ACE_Message_Block* mb) { std::string trigger_dimension_local = trigger_dimension.value(); std::string sorting_dimension_local = sorting_dimension.value(); if (trigger_dimension_local.size() == 0) { trigger_ = NONE; } else if (trigger_dimension_local.compare("kspace_encode_step_1") == 0) { trigger_ = KSPACE_ENCODE_STEP_1; } else if (trigger_dimension_local.compare("kspace_encode_step_2") == 0) { trigger_ = KSPACE_ENCODE_STEP_2; } else if (trigger_dimension_local.compare("average") == 0) { trigger_ = AVERAGE; } else if (trigger_dimension_local.compare("slice") == 0) { trigger_ = SLICE; } else if (trigger_dimension_local.compare("contrast") == 0) { trigger_ = CONTRAST; } else if (trigger_dimension_local.compare("phase") == 0) { trigger_ = PHASE; } else if (trigger_dimension_local.compare("repetition") == 0) { trigger_ = REPETITION; } else if (trigger_dimension_local.compare("set") == 0) { trigger_ = SET; } else if (trigger_dimension_local.compare("segment") == 0) { trigger_ = SEGMENT; } else if (trigger_dimension_local.compare("user_0") == 0) { trigger_ = USER_0; } else if (trigger_dimension_local.compare("user_1") == 0) { trigger_ = USER_1; } else if (trigger_dimension_local.compare("user_2") == 0) { trigger_ = USER_2; } else if (trigger_dimension_local.compare("user_3") == 0) { trigger_ = USER_3; } else if (trigger_dimension_local.compare("user_4") == 0) { trigger_ = USER_4; } else if (trigger_dimension_local.compare("user_5") == 0) { trigger_ = USER_5; } else if (trigger_dimension_local.compare("user_6") == 0) { trigger_ = USER_6; } else if (trigger_dimension_local.compare("user_7") == 0) { trigger_ = USER_7; } else { GDEBUG("WARNING: Unknown trigger dimension (%s), trigger condition set to NONE (end of scan)", trigger_dimension_local.c_str()); trigger_ = NONE; } GDEBUG("TRIGGER DIMENSION IS: %s (%d)\n", trigger_dimension_local.c_str(), trigger_); if (sorting_dimension_local.size() == 0) { sort_ = NONE; } else if (sorting_dimension_local.compare("kspace_encode_step_1") == 0) { sort_ = KSPACE_ENCODE_STEP_1; } else if (sorting_dimension_local.compare("kspace_encode_step_2") == 0) { sort_ = KSPACE_ENCODE_STEP_2; } else if (sorting_dimension_local.compare("average") == 0) { sort_ = AVERAGE; } else if (sorting_dimension_local.compare("slice") == 0) { sort_ = SLICE; } else if (sorting_dimension_local.compare("contrast") == 0) { sort_ = CONTRAST; } else if (sorting_dimension_local.compare("phase") == 0) { sort_ = PHASE; } else if (sorting_dimension_local.compare("repetition") == 0) { sort_ = REPETITION; } else if (sorting_dimension_local.compare("set") == 0) { sort_ = SET; } else if (sorting_dimension_local.compare("segment") == 0) { sort_ = SEGMENT; } else if (sorting_dimension_local.compare("user_0") == 0) { sort_ = USER_0; } else if (sorting_dimension_local.compare("user_1") == 0) { sort_ = USER_1; } else if (sorting_dimension_local.compare("user_2") == 0) { sort_ = USER_2; } else if (sorting_dimension_local.compare("user_3") == 0) { sort_ = USER_3; } else if (sorting_dimension_local.compare("user_4") == 0) { sort_ = USER_4; } else if (sorting_dimension_local.compare("user_5") == 0) { sort_ = USER_5; } else if (sorting_dimension_local.compare("user_6") == 0) { sort_ = USER_6; } else if (sorting_dimension_local.compare("user_7") == 0) { sort_ = USER_7; } else { GDEBUG("WARNING: Unknown sort dimension (%s), sorting set to NONE\n", sorting_dimension_local.c_str()); sort_ = NONE; } GDEBUG("SORTING DIMENSION IS: %s (%d)\n", sorting_dimension_local.c_str(), sort_); trigger_events_ = 0; return GADGET_OK; }
int NoiseAdjustGadget::process_config(ACE_Message_Block* mb) { if ( !workingDirectory.value().empty() ) { noise_dependency_folder_ = workingDirectory.value(); } else { #ifdef _WIN32 noise_dependency_folder_ = std::string("c:\\temp\\gadgetron\\"); #else noise_dependency_folder_ = std::string("/tmp/gadgetron/"); #endif // _WIN32 } GDEBUG("Folder to store noise dependencies is %s\n", noise_dependency_folder_.c_str()); if ( !noise_dependency_prefix.value().empty() ) noise_dependency_prefix_ = noise_dependency_prefix.value(); perform_noise_adjust_ = perform_noise_adjust.value(); GDEBUG("NoiseAdjustGadget::perform_noise_adjust_ is %d\n", perform_noise_adjust_); pass_nonconformant_data_ = pass_nonconformant_data.value(); GDEBUG("NoiseAdjustGadget::pass_nonconformant_data_ is %d\n", pass_nonconformant_data_); noise_dwell_time_us_preset_ = noise_dwell_time_us_preset.value(); ISMRMRD::deserialize(mb->rd_ptr(),current_ismrmrd_header_); if ( current_ismrmrd_header_.acquisitionSystemInformation ) { receiver_noise_bandwidth_ = (float)(current_ismrmrd_header_.acquisitionSystemInformation->relativeReceiverNoiseBandwidth ? *current_ismrmrd_header_.acquisitionSystemInformation->relativeReceiverNoiseBandwidth : 0.793f); GDEBUG("receiver_noise_bandwidth_ is %f\n", receiver_noise_bandwidth_); } // find the measurementID of this scan if ( current_ismrmrd_header_.measurementInformation ) { if ( current_ismrmrd_header_.measurementInformation->measurementID ) { measurement_id_ = *current_ismrmrd_header_.measurementInformation->measurementID; GDEBUG("Measurement ID is %s\n", measurement_id_.c_str()); } // find the noise depencies if any if ( current_ismrmrd_header_.measurementInformation->measurementDependency.size() > 0 ) { measurement_id_of_noise_dependency_.clear(); std::vector<ISMRMRD::MeasurementDependency>::const_iterator iter = current_ismrmrd_header_.measurementInformation->measurementDependency.begin(); for ( ; iter!= current_ismrmrd_header_.measurementInformation->measurementDependency.end(); iter++ ) { std::string dependencyType = iter->dependencyType; std::string dependencyID = iter->measurementID; GDEBUG("Found dependency measurement : %s with ID %s\n", dependencyType.c_str(), dependencyID.c_str()); if ( dependencyType=="Noise" || dependencyType=="noise" ) { measurement_id_of_noise_dependency_ = dependencyID; } } if ( !measurement_id_of_noise_dependency_.empty() ) { GDEBUG("Measurement ID of noise dependency is %s\n", measurement_id_of_noise_dependency_.c_str()); full_name_stored_noise_dependency_ = this->generateNoiseDependencyFilename(generateMeasurementIdOfNoiseDependency(measurement_id_of_noise_dependency_)); GDEBUG("Stored noise dependency is %s\n", full_name_stored_noise_dependency_.c_str()); // try to load the precomputed noise prewhitener if ( !this->loadNoiseCovariance() ) { GDEBUG("Stored noise dependency is NOT found : %s\n", full_name_stored_noise_dependency_.c_str()); noiseCovarianceLoaded_ = false; noise_dwell_time_us_ = -1; noise_covariance_matrixf_.clear(); } else { GDEBUG("Stored noise dependency is found : %s\n", full_name_stored_noise_dependency_.c_str()); GDEBUG("Stored noise dwell time in us is %f\n", noise_dwell_time_us_); GDEBUG("Stored noise channel number is %d\n", noise_covariance_matrixf_.get_size(0)); if (noise_ismrmrd_header_.acquisitionSystemInformation) { if (noise_ismrmrd_header_.acquisitionSystemInformation->coilLabel.size() != current_ismrmrd_header_.acquisitionSystemInformation->coilLabel.size()) { GDEBUG("Length of coil label arrays do not match"); return GADGET_FAIL; } bool labels_match = true; for (size_t l = 0; l < noise_ismrmrd_header_.acquisitionSystemInformation->coilLabel.size(); l++) { if (noise_ismrmrd_header_.acquisitionSystemInformation->coilLabel[l].coilNumber != current_ismrmrd_header_.acquisitionSystemInformation->coilLabel[l].coilNumber) { labels_match = false; break; } if (noise_ismrmrd_header_.acquisitionSystemInformation->coilLabel[l].coilName != current_ismrmrd_header_.acquisitionSystemInformation->coilLabel[l].coilName) { labels_match = false; break; } } if (!labels_match) { GDEBUG("Noise and measurement coil labels don't match\n"); return GADGET_FAIL; } } else if (current_ismrmrd_header_.acquisitionSystemInformation) { GDEBUG("Noise ismrmrd header does not have acquisition system information but current header does\n"); return GADGET_FAIL; } noiseCovarianceLoaded_ = true; number_of_noise_samples_ = 1; //When we load the matrix, it is already scaled. } } } } //Let's figure out if some channels are "scale_only" std::string uncomb_str = scale_only_channels_by_name.value(); std::vector<std::string> uncomb; if (uncomb_str.size()) { GDEBUG("SCALE ONLY: %s\n", uncomb_str.c_str()); boost::split(uncomb, uncomb_str, boost::is_any_of(",")); for (unsigned int i = 0; i < uncomb.size(); i++) { std::string ch = boost::algorithm::trim_copy(uncomb[i]); if (current_ismrmrd_header_.acquisitionSystemInformation) { for (size_t i = 0; i < current_ismrmrd_header_.acquisitionSystemInformation->coilLabel.size(); i++) { if (ch == current_ismrmrd_header_.acquisitionSystemInformation->coilLabel[i].coilName) { scale_only_channels_.push_back(i);//This assumes that the channels are sorted in the header break; } } } } } #ifdef USE_OMP omp_set_num_threads(1); #endif // USE_OMP return GADGET_OK; }
static void main_loop(int listen_fd, int event_sub_fd) { int i, nready; int new_fd; int deleted_entry; struct sockaddr_in client_addr; socklen_t client_len; pollfd_add(pfdinfo, listen_fd, POLLIN, NULL); pollfd_add(pfdinfo, event_sub_fd, POLLIN, NULL); /* Main loop */ while(1) { deleted_entry = 0; GDEBUG(1, "Before poll():"); PRINT_POLLFDINFO(pfdinfo); /* Poll */ nready = poll(pfdinfo->pollfd, pfdinfo->nfds, -1); /* There is an error? */ if(nready == -1) { fprintf(stderr, "poll() error: %s; I continue.\n", strerror(errno)); continue; } for(i = 0; i < pfdinfo->nfds; i++) { if(pfdinfo->pollfd[i].revents == 0) continue; GDEBUG(1, "fd = %d is ready for event 0x%X\n", pfdinfo->pollfd[i].fd, pfdinfo->pollfd[i].revents); /* If there is an error, I close the connection */ if( pfdinfo->pollfd[i].revents & POLLERR || pfdinfo->pollfd[i].revents & POLLHUP || pfdinfo->pollfd[i].revents & POLLNVAL) { /* printf("Error, getchar():\n"); getchar(); */ close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } /*********************************************/ /* New Connection/Event Subscribe management */ /*********************************************/ if((pfdinfo->pollfd[i].fd == listen_fd || pfdinfo->pollfd[i].fd == event_sub_fd) && pfdinfo->pollfd[i].revents & POLLIN) { enum client_type type; int size; enum client_state state; if(pfdinfo->pollfd[i].fd == listen_fd) { type = REQ_RESP; size = sizeof(struct handshake); state = WAITING_ARCH; } else { type = EVENT_SUB; size = sizeof(struct rsc_es_hdr); state = CONN_READING_HDR; } bzero(&client_addr, sizeof(client_addr)); client_len = sizeof(client_addr); /* I accept the new connection */ new_fd = accept(pfdinfo->pollfd[i].fd, (struct sockaddr *)&client_addr, &client_len); if(new_fd == -1) { fprintf(stderr, "Accept() error: %s\n", strerror(errno)); } else { /* I create the new client structure */ struct client *new_client; void *data; new_client = create_client(new_fd, type, state); data = malloc(sizeof(struct handshake)); if(data == NULL) { close(new_fd); if(--nready < 0) break; continue; } if(new_client == NULL || data == NULL) { fprintf(stderr, "I cannot create a new client struct for fd %d\n", new_fd); if(new_client == NULL) free(new_client); if(data == NULL); free(data); close(new_fd); } else { buff_enq(new_client->rbuf, data, size); GDEBUG(1, "Accepting new connection from "); print_addr_port(new_client->fd); GDEBUG(1, " (fd = %d).\n", new_client->fd); pollfd_add(pfdinfo, new_client->fd, POLLIN, new_client); } } if(--nready <= 0) break; /*************************************************************************/ /* Management of descriptors ready to read of type REQ_RESP or EVENT_SUB */ /*************************************************************************/ } else if(pfdinfo->clients[i]->type == REQ_RESP || pfdinfo->clients[i]->type == EVENT_SUB) { struct client *client = pfdinfo->clients[i]; /***********************************************/ /* POLLIN */ /***********************************************/ if(pfdinfo->pollfd[i].revents & POLLIN) { void *buf; int size, nread; /* If there are data to read, but the read buffer is empty * I create a new message */ if(client->rbuf->first == NULL) { int size = 0; void *data; if(pfdinfo->clients[i]->type == REQ_RESP && pfdinfo->clients[i]->state == CONN_READING_HDR) size = sizeof(struct req_header); else if(pfdinfo->clients[i]->type == EVENT_SUB && pfdinfo->clients[i]->state == CONN_READING_HDR) size = sizeof(struct rsc_es_hdr); if(size != 0) { data = malloc(size); if(data == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } buff_enq(client->rbuf, data, size); } } GDEBUG(1, "There are data ready do be read for fd %d", pfdinfo->pollfd[i].fd); /* I read the data from the first message */ buf = client->rbuf->first->data + client->rbuf->first->n; size = client->rbuf->first->tot - client->rbuf->first->n; nread = read(client->fd, buf, size); if(nread <= 0 ) { /* If there is an error or the connection was close, * I close the connection from my side */ close_connection(pfdinfo, i); deleted_entry = 1; if(--nready <= 0) break; continue; } else { client->rbuf->first->n += nread; } /* If I've read all the data, I remove the buffer from client->rbuf * and I process the data */ if(client->rbuf->first->n == client->rbuf->first->tot) { void *read_data = buff_deq(client->rbuf); if(pfdinfo->clients[i]->type == REQ_RESP) { if(client->state == WAITING_ARCH) { /* I read the architecture of the client */ struct handshake *client_arch, *server_arch; client_arch = (struct handshake *)read_data; client->arch = ntohl(client_arch->arch); GDEBUG(1, "Client (%d) architecture is %s\n", client->fd, aconv_arch2str(client->arch)); free(read_data); /* Now I can send my architecture */ client->state = SENDING_ARCH; server_arch = calloc(1, sizeof(struct handshake)); if(server_arch == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } server_arch->arch = htonl(my_arch); buff_enq(client->wbuf, server_arch, sizeof(struct handshake)); pfdinfo->pollfd[i].events |= POLLOUT; client->state = SENDING_ARCH; }else if(client->state == CONN_READING_HDR) { struct req_header *req_hd; struct msg *m; int req_size; void *new_data; /* I've read all the request header, now I've to read all the request body */ client->state = CONN_READING_BODY; req_hd = (struct req_header *)read_data; req_size = rsc_req_msg_size(req_hd); new_data = realloc(read_data, req_size); if(new_data == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } m = buff_enq(client->rbuf, new_data, req_size); /* I've already read the req_header, so I need to update m->n field */ m->n = sizeof(struct req_header); }else if(client->state == CONN_READING_BODY) { /* Now I've read all the request and I can pass it to RSC function */ struct iovec *resp; resp = rscs_manage_request(client->arch, read_data); /* If there is an error, I close the connection */ if(resp == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } buff_enq(client->wbuf, resp[0].iov_base, resp[0].iov_len); pfdinfo->pollfd[i].events |= POLLOUT; client->state = CONN_SENDING_RESP; free(read_data); } } else { /* type == EVENT_SUB */ if(client->state == CONN_READING_HDR) { struct rsc_es_hdr *hdr; int size; void *new_data; struct msg *m; hdr = (struct rsc_es_hdr *)read_data; size = rsc_es_msg_size(hdr->type); if(size == -1) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } new_data = realloc(read_data, size); if(new_data == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } m = buff_enq(client->rbuf, new_data, size); m->n = sizeof(struct rsc_es_hdr); client->state = CONN_READING_BODY; } else if(client->state == CONN_READING_BODY) { struct rsc_es_ack *ack; ack = rscs_es_manage_msg(client->fd, read_data); free(read_data); /* I take the appropriate action based on ack->response field. * If the response is ACK_FD_REG I've to insert the fd into the * pollfd set. If the response is ACK_FD_DEREG_NOT_READY or ACK_FD_READY, * I remove the fd from the pollfd. */ if(ack->response == ACK_FD_REG) { struct client *c; /* Into the client structure I insert the stream fd and not the * fd to subscribe, In this way I can know where to send data */ c = create_client(client->fd, SUBSCRIBED_FD, CONN_SENDING_RESP); if(c == NULL) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } c->esfd_index = i; pollfd_add(pfdinfo, ntohl(ack->fd), ntohl(ack->how), c); } else if(ack->response == ACK_FD_DEREG_NOT_READY || ack->response == ACK_FD_DEREG_READY) { int j; for(j = 0; j < pfdinfo->size; j++) if( pfdinfo->pollfd[j].fd != -1 && pfdinfo->clients[j] != NULL && pfdinfo->clients[j]->type == SUBSCRIBED_FD && pfdinfo->clients[j]->fd == client->fd && pfdinfo->pollfd[j].fd == ntohl(ack->fd) && pfdinfo->pollfd[j].events == ntohl(ack->how)) break; if(j < pfdinfo->size) pollfd_del(pfdinfo, j); } GDEBUG(1, "After rscem_manage_msg:"); PRINT_POLLFDINFO(pfdinfo); /* Now I can send ack back */ buff_enq(client->wbuf, ack, sizeof(struct rsc_es_ack)); pfdinfo->pollfd[i].events |= POLLOUT; /* It's not an error, I don't need to keep trace of the sending state */ client->state = CONN_READING_HDR; } } } /***********************************************/ /* POLLOUT */ /***********************************************/ } else if(pfdinfo->pollfd[i].revents & POLLOUT) { void *buf; int size, nwrite; /* If write buffer is empty, I remove the POLLOUT event and I continue */ if(client->wbuf->first == NULL) { pfdinfo->pollfd[i].events &= (~POLLOUT); if(--nready <= 0) break; continue; } GDEBUG(1, "There are data ready do be written for fd %d", pfdinfo->pollfd[i].fd); buf = client->wbuf->first->data + client->wbuf->first->n; size = client->wbuf->first->tot - client->wbuf->first->n; nwrite = write(client->fd, buf, size); if(nwrite < 0) { close_connection(pfdinfo, i); deleted_entry = 1; if(--nready < 0) break; continue; } else { client->wbuf->first->n += nwrite; } if(client->wbuf->first->n == client->wbuf->first->tot) { /* I remove the message from the buffer and I free it */ void *data = buff_deq(client->wbuf); free(data); /* If it's a request/response fd and I've sent an arch or response message, * I change my state to reading header */ if( pfdinfo->clients[i]->type == REQ_RESP && ( client->state == SENDING_ARCH || client->state == CONN_SENDING_RESP) ) client->state = CONN_READING_HDR; /* if client->type is EVENT_SUB there is nothing to do: I need only * to continue to send the buffered data */ } } if(--nready <= 0) break; /*******************************************/ /* An event subscribed fd is waken up */ /*******************************************/ /* The event is occurred, I send back a response I didn't it before */ }else if(pfdinfo->clients[i]->type == SUBSCRIBED_FD) { struct rsc_es_resp *resp; int esfd_index = pfdinfo->clients[i]->esfd_index; resp = rscs_es_event_occurred(pfdinfo->pollfd[esfd_index].fd, pfdinfo->pollfd[i].fd, pfdinfo->pollfd[i].revents); if(resp != NULL) { buff_enq(pfdinfo->clients[esfd_index]->wbuf, resp, sizeof(struct rsc_es_resp)); pfdinfo->pollfd[esfd_index].events |= POLLOUT; } if(--nready <= 0) break; } } /* for(i = 0; i < nready; i++) */ /* If I've deleted a pfdinfo, I compact it */ if(deleted_entry) pollfd_compact(pfdinfo); } /* while(1) */ }
int MaxwellCorrectionGadget:: process(GadgetContainerMessage<ISMRMRD::ImageHeader>* m1, GadgetContainerMessage< hoNDArray< std::complex<float> > >* m2) { if (maxwell_coefficients_present_) { //GDEBUG("Got coefficients\n"); int Nx = m2->getObjectPtr()->get_size(0); int Ny = m2->getObjectPtr()->get_size(1); int Nz = m2->getObjectPtr()->get_size(2); float dx = m1->getObjectPtr()->field_of_view[0] / Nx; float dy = m1->getObjectPtr()->field_of_view[1] / Ny; float dz = m1->getObjectPtr()->field_of_view[2] / Nz; /* GDEBUG("Nx = %d, Ny = %d, Nz = %d\n", Nx, Ny, Nz); GDEBUG("dx = %f, dy = %f, dz = %f\n", dx, dy, dz); GDEBUG("img_pos_x = %f, img_pos_y = %f, img_pos_z = %f\n", m1->getObjectPtr()->position[0], m1->getObjectPtr()->position[1], m1->getObjectPtr()->position[2]); */ std::vector<float> dR(3,0); std::vector<float> dP(3,0); std::vector<float> dS(3,0); std::vector<float> p(3,0); for (int z = 0; z < Nz; z++) { for (int y = 0; y < Ny; y++) { for (int x = 0; x < Nx; x++) { dR[0] = (x-Nx/2+0.5) * dx * m1->getObjectPtr()->read_dir[0]; dR[1] = (x-Nx/2+0.5) * dx * m1->getObjectPtr()->read_dir[1]; dR[2] = (x-Nx/2+0.5) * dx * m1->getObjectPtr()->read_dir[2]; dP[0] = (y-Ny/2+0.5) * dy * m1->getObjectPtr()->phase_dir[0]; dP[1] = (y-Ny/2+0.5) * dy * m1->getObjectPtr()->phase_dir[1]; dP[2] = (y-Ny/2+0.5) * dy * m1->getObjectPtr()->phase_dir[2]; if (Nz > 1) { dS[0] = (z-Nz/2+0.5) * dz * m1->getObjectPtr()->slice_dir[0]; dS[1] = (z-Nz/2+0.5) * dz * m1->getObjectPtr()->slice_dir[1]; dS[2] = (z-Nz/2+0.5) * dz * m1->getObjectPtr()->slice_dir[2]; } p[0] = m1->getObjectPtr()->position[0] + dP[0] + dR[0] + dS[0]; p[1] = m1->getObjectPtr()->position[1] + dP[1] + dR[1] + dS[1]; p[2] = m1->getObjectPtr()->position[2] + dP[2] + dR[2] + dS[2]; //Convert to centimeters p[0] = p[0]/1000.0; p[1] = p[1]/1000.0; p[2] = p[2]/1000.0; float delta_phi = maxwell_coefficients_[0]*p[2]*p[2] + maxwell_coefficients_[1]*(p[0]*p[0] + p[1]*p[1]) + maxwell_coefficients_[2]*p[0]*p[2] + maxwell_coefficients_[3]*p[1]*p[2]; long index = z*Ny*Nx+y*Nx+x; std::complex<float>* data_ptr = m2->getObjectPtr()->get_data_ptr(); std::complex<float> correction = std::polar(1.0f,static_cast<float>(2*M_PI*delta_phi)); data_ptr[index] *= correction; } } } } if (this->next()->putq(m1) < 0) { GDEBUG("Unable to put data on next Gadgets Q\n"); return GADGET_FAIL; } return GADGET_OK; }
static int ConditionObject_init(ConditionObject *self, PyObject *args, PyObject *kwargs) { PyObject *lock = NULL, *m = NULL; static char *keywords[] = {"lock", NULL}; DEBUG("self:%p", self); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:__init__", keywords, &lock)) { return -1; } Py_CLEAR(self->lock); if (lock == NULL) { DEBUG("create RLock self:%p", self); lock = PyObject_CallFunctionObjArgs((PyObject*)&RLockObjectType, NULL); if (lock == NULL) { return -1; } self->lock = lock; } else { self->lock = lock; Py_INCREF(self->lock); } DEBUG("self:%p lock:%p", self, self->lock); Py_CLEAR(self->waiters); self->waiters = PyList_New(0); GDEBUG("list:%p", self->waiters); if (self->waiters == NULL) { Py_CLEAR(self->lock); return -1; } m = PyObject_GetAttrString(lock, "acquire"); if (m == NULL) { return -1; } if (PyObject_SetAttrString((PyObject*)self, "acquire", m) == -1) { Py_DECREF(m); return -1; } Py_XDECREF(m); m = PyObject_GetAttrString(lock, "release"); if (m == NULL) { return -1; } if (PyObject_SetAttrString((PyObject*)self, "release", m) == -1) { Py_DECREF(m); return -1; } Py_XDECREF(m); m = PyObject_GetAttrString(lock, "_release_save"); if (m != NULL) { if (PyObject_SetAttrString((PyObject*)self, "_release_save", m) == -1) { Py_DECREF(m); return -1; } DEBUG("import _release_save self:%p lock:%p", self, lock); } PyErr_Clear(); Py_XDECREF(m); m = PyObject_GetAttrString(lock, "_acquire_restore"); if (m != NULL) { if (PyObject_SetAttrString((PyObject*)self, "_acquire_restore", m) == -1) { Py_DECREF(m); return -1; } DEBUG("import _acquire_restore self:%p lock:%p", self, lock); } PyErr_Clear(); Py_XDECREF(m); m = PyObject_GetAttrString(lock, "_is_owned"); if (m != NULL) { if (PyObject_SetAttrString((PyObject*)self, "_is_owned", m) == -1) { Py_DECREF(m); return -1; } DEBUG("import _is_owned self:%p lock:%p", self, lock); } PyErr_Clear(); Py_XDECREF(m); return 1; }
void Global::initDatabase(QString filename) { GASSERT(_db == NULL, "Cannot initialize database twice"); QFile fd (filename); if (!fd.open(QIODevice::ReadOnly)) GEXITDIALOG("Could not open project file for reading " + filename); QTextStream file (&fd); // The first five lines contain the following information // VERSION=%s // IMAGE_PATH=%s // QUESTIONS_PER_STUDENT=%d // PAGES_PER_STUDENT=%d // TOTAL_STUDENTS=%d // SAVE_DATE_TIME=%s // NUM_IMAGES=%d struct _keyvalue { QString key; QString string; size_t number; }; _keyvalue intro[7]; QString header_names[7] = { "VERSION", "IMAGE_PATH", "QUESTIONS_PER_STUDENT", "PAGES_PER_STUDENT", "TOTAL_STUDENTS", "SAVE_DATE_TIME", "NUM_IMAGES" }; bool expect_numbers[7] = { false, false, true, true, true, false, true }; for (size_t i = 0; i < 7; i++) { if (file.atEnd()) GEXITDIALOG(QString("Encountered end of file reading line %1").arg(i)); QString in = file.readLine(); QStringList fields = in.split("="); if (fields.size() != 2) GEXITDIALOG(QString("Could not find key/value pair separated by = in [%1] reading line %2").arg(in).arg(i)); intro[i].key = fields.at(0); if (header_names[i] != fields.at(0)) GEXITDIALOG(QString("Line %1 header was not [%2] but found instead [%3]").arg(i).arg(header_names[i]).arg(intro[i].key)); intro[i].string = fields.at(1); if (fields.at(0).size() == 0) GEXITDIALOG(QString("Key in [%1] was empty reading line %2").arg(in).arg(i)); if (fields.at(1).size() == 0) GEXITDIALOG(QString("Value in [%1] was empty reading line %2").arg(in).arg(i)); intro[i].number = -1; if (expect_numbers[i]) { bool ok; int num = fields.at(1).toInt(&ok); if (!ok) GEXITDIALOG(QString("Value [%1] in [%2] was not an integer").arg(fields.at(1)).arg(in)); if (num <= 0) GEXITDIALOG(QString("Value [%1] in [%2] was not a positive integer").arg(fields.at(1)).arg(in)); intro[i].number = num; } if (expect_numbers[i]) GDEBUG ("Parsed key [%s] = number [%zu]", qPrintable(intro[i].key), intro[i].number); else GDEBUG ("Parsed key [%s] = string [%s]", qPrintable(intro[i].key), qPrintable(intro[i].string)); } QString version = intro[0].string; QString image_path = intro[1].string; size_t numQuestions = intro[2].number; size_t numPagesPerStudent = intro[3].number; size_t numStudents = intro[4].number; QString unused_date_time = intro[5].string; size_t numImages = intro[6].number; // Initialize the database since we know all the dimensions now initDatabase(numStudents, numQuestions, numPagesPerStudent); // Check the version is what we are expecting if (version != DB_VERSION) GEXITDIALOG(QString("Found version [%1] when only version [%2] is supported").arg(version).arg(DB_VERSION)); // Image paths should always be . right now, nothing else is supported if (image_path != ".") GEXITDIALOG("Non-empty path found for image loading"); int slash = filename.lastIndexOf("/"); if (slash < 0) GEXITDIALOG("Could not find ending slash in file name"); QString full_image_path = filename.left(slash); GDEBUG("Using image path [%s] from input file [%s]", qPrintable(full_image_path), qPrintable(filename)); initPages(full_image_path); // Check that the number of images loaded matches what we are expecting GDEBUG ("Loaded in %zu images from [%s], input file says %zu images", getPages()->size(), qPrintable(image_path), numImages); if (getPages()->size() != numImages) GEXITDIALOG(QString("Found different number of images than was used for the save")); // Now populate the database. Each entry is of the following form: // IDNUM,STUDENTID,STUDENTNAME,TOTAL,Q0,Q1,..,QN,Feed0,Feed1,..,FeedN // All fields will contain something except for Feed0..FeedN for (int student = -2; student < (int)numStudents; student++) { if (file.atEnd()) GEXITDIALOG(QString("Encountered end of file reading student %1 of %2").arg(student+1).arg(numStudents)); QString in = file.readLine(); QStringList fields = in.split("\t"); if (fields.size() != (2*(int)numQuestions + 4)) GEXITDIALOG(QString("Found only %1 columns in [%2] for student %3 of %4 when expected 2x%5+4 columns").arg(fields.size()).arg(in).arg(student+1).arg(numStudents).arg(numQuestions)); for (size_t col = 4; col < numQuestions+4; col++) // Do not search student id/name, or any feedback, since these can be empty if (fields.at(col) == "") GEXITDIALOG(QString("Found empty column %1 for student %3 of %4").arg(col+1).arg(student+1).arg(numStudents)); bool ok; { int num = fields.at(0).toInt(&ok); if ((!ok) || (num != student)) GEXITDIALOG(QString("Expected row number %1 but found [%2] instead for student %3 of %4").arg(student).arg(fields.at(0)).arg(student+1).arg(numStudents)); } if (student == -2) { if ((fields.at(1) != "PAGE") && (fields.at(2) != "PAGE")) GEXITDIALOG(QString("Expected PAGE headers for first row of data, but found [%1] [%2] instead").arg(fields.at(1).arg(fields.at(2)))); for (size_t q = 0; q < numQuestions; q++) { int page = fields.at(q+4).toInt(&ok); if ((!ok) || (page < -1)) GEXITDIALOG(QString("Page value [%1] for question %2 was not valid for maximum values").arg(fields.at(q+4)).arg(q+1)); Global::db()->setQuestionPage(q, page, false); } int total = fields.at(3).toInt(&ok); if ((!ok) || (total != -1)) GEXITDIALOG(QString("Page total in file [%1] was not -1").arg(fields.at(3))); } else if (student == -1) { if ((fields.at(1) != "MAX") && (fields.at(2) != "MAX")) GEXITDIALOG(QString("Expected MAX headers for first row of data, but found [%1] [%2] instead").arg(fields.at(1).arg(fields.at(2)))); for (size_t q = 0; q < numQuestions; q++) { int score = fields.at(q+4).toInt(&ok); if ((!ok) || (score < -1)) GEXITDIALOG(QString("Score value [%1] for question %2 was not valid for maximum values").arg(fields.at(q+4)).arg(q+1)); Global::db()->setQuestionMaximum(q, score, false); } int total = fields.at(3).toInt(&ok); if ((!ok) || (total != Global::db()->getTotalMaximum())) GEXITDIALOG(QString("Maximum total in file [%1] did not match calculated maximum total %2").arg(fields.at(3)).arg(Global::db()->getTotalMaximum())); } else { Student &entry = Global::db()->getStudent(student); entry.setStudentId(fields.at(1), false); entry.setStudentName(fields.at(2), false); for (size_t q = 0; q < numQuestions; q++) { int score = fields.at(q+4).toInt(&ok); if ((!ok) || (score < -1) || (score > Global::db()->getQuestionMaximum(q))) GEXITDIALOG(QString("Score value [%1] for question %2 was not valid student %3 of %4").arg(fields.at(q+4)).arg(q+1).arg(student+1).arg(numStudents)); entry.setGrade(q, score, false); } for (size_t q = 0; q < numQuestions; q++) entry.setFeedback(q, fields.at(q+4+numQuestions), false); int total = fields.at(3).toInt(&ok); if ((!ok) || (total != entry.getTotal())) GEXITDIALOG(QString("Total in file [%1] did not match calculated total %2").arg(fields.at(3)).arg(entry.getTotal())); } } }
int NoiseAdjustGadget::process(GadgetContainerMessage<ISMRMRD::AcquisitionHeader>* m1, GadgetContainerMessage< hoNDArray< std::complex<float> > >* m2) { bool is_noise = m1->getObjectPtr()->isFlagSet(ISMRMRD::ISMRMRD_ACQ_IS_NOISE_MEASUREMENT); unsigned int channels = m1->getObjectPtr()->active_channels; unsigned int samples = m1->getObjectPtr()->number_of_samples; //TODO: Remove this if ( measurement_id_.empty() ) { unsigned int muid = m1->getObjectPtr()->measurement_uid; std::ostringstream ostr; ostr << muid; measurement_id_ = ostr.str(); } if ( is_noise ) { if (noiseCovarianceLoaded_) { m1->release(); //Do not accumulate noise when we have a loaded noise covariance return GADGET_OK; } // this noise can be from a noise scan or it can be from the built-in noise if ( number_of_noise_samples_per_acquisition_ == 0 ) { number_of_noise_samples_per_acquisition_ = samples; } if ( noise_dwell_time_us_ < 0 ) { if (noise_dwell_time_us_preset_ > 0.0) { noise_dwell_time_us_ = noise_dwell_time_us_preset_; } else { noise_dwell_time_us_ = m1->getObjectPtr()->sample_time_us; } } //If noise covariance matrix is not allocated if (noise_covariance_matrixf_.get_number_of_elements() != channels*channels) { std::vector<size_t> dims(2, channels); try { noise_covariance_matrixf_.create(&dims); noise_covariance_matrixf_once_.create(&dims); } catch (std::runtime_error& err) { GEXCEPTION(err, "Unable to allocate storage for noise covariance matrix\n" ); return GADGET_FAIL; } Gadgetron::clear(noise_covariance_matrixf_); Gadgetron::clear(noise_covariance_matrixf_once_); number_of_noise_samples_ = 0; } std::complex<float>* cc_ptr = noise_covariance_matrixf_.get_data_ptr(); std::complex<float>* data_ptr = m2->getObjectPtr()->get_data_ptr(); hoNDArray< std::complex<float> > readout(*m2->getObjectPtr()); gemm(noise_covariance_matrixf_once_, readout, true, *m2->getObjectPtr(), false); Gadgetron::add(noise_covariance_matrixf_once_, noise_covariance_matrixf_, noise_covariance_matrixf_); number_of_noise_samples_ += samples; m1->release(); return GADGET_OK; } //We should only reach this code if this data is not noise. if ( perform_noise_adjust_ ) { //Calculate the prewhitener if it has not been done if (!noise_decorrelation_calculated_ && (number_of_noise_samples_ > 0)) { if (number_of_noise_samples_ > 1) { //Scale noise_covariance_matrixf_ *= std::complex<float>(1.0/(float)(number_of_noise_samples_-1)); number_of_noise_samples_ = 1; //Scaling has been done } computeNoisePrewhitener(); acquisition_dwell_time_us_ = m1->getObjectPtr()->sample_time_us; if ((noise_dwell_time_us_ == 0.0f) || (acquisition_dwell_time_us_ == 0.0f)) { noise_bw_scale_factor_ = 1.0f; } else { noise_bw_scale_factor_ = (float)std::sqrt(2.0*acquisition_dwell_time_us_/noise_dwell_time_us_*receiver_noise_bandwidth_); } noise_prewhitener_matrixf_ *= std::complex<float>(noise_bw_scale_factor_,0.0); GDEBUG("Noise dwell time: %f\n", noise_dwell_time_us_); GDEBUG("Acquisition dwell time: %f\n", acquisition_dwell_time_us_); GDEBUG("receiver_noise_bandwidth: %f\n", receiver_noise_bandwidth_); GDEBUG("noise_bw_scale_factor: %f", noise_bw_scale_factor_); } if (noise_decorrelation_calculated_) { //Apply prewhitener if ( noise_prewhitener_matrixf_.get_size(0) == m2->getObjectPtr()->get_size(1) ) { hoNDArray<std::complex<float> > tmp(*m2->getObjectPtr()); gemm(*m2->getObjectPtr(), tmp, noise_prewhitener_matrixf_); } else { if (!pass_nonconformant_data_) { m1->release(); GERROR("Number of channels in noise prewhitener %d is incompatible with incoming data %d\n", noise_prewhitener_matrixf_.get_size(0), m2->getObjectPtr()->get_size(1)); return GADGET_FAIL; } } } } if (this->next()->putq(m1) == -1) { GDEBUG("Error passing on data to next gadget\n"); return GADGET_FAIL; } return GADGET_OK; }
int BucketToBufferGadget ::process(GadgetContainerMessage<IsmrmrdAcquisitionBucket>* m1) { size_t key; std::map<size_t, GadgetContainerMessage<IsmrmrdReconData>* > recon_data_buffers; //GDEBUG("BucketToBufferGadget::process\n"); //Some information about the bucket //GDEBUG_STREAM("The Reference part: " << m1->getObjectPtr()->refstats_.size() << std::endl); //GDEBUG_STREAM(" nslices: " << m1->getObjectPtr()->refstats_[0].slice.size() << std::endl); //for (int e=0; e<m1->getObjectPtr()->refstats_.size() ; e++) { // for (std::set<uint16_t>::iterator it = m1->getObjectPtr()->refstats_[e].kspace_encode_step_1.begin(); // it != m1->getObjectPtr()->refstats_[e].kspace_encode_step_1.end(); ++it) { // GDEBUG_STREAM(" K1: " << *it << std::endl); // } //} //GDEBUG_STREAM("The data part: " << m1->getObjectPtr()->datastats_.size() << std::endl); //GDEBUG_STREAM(" nslices: " << m1->getObjectPtr()->datastats_[0].slice.size() << std::endl); //for (int e=0; e<m1->getObjectPtr()->datastats_.size() ; e++) { // for (std::set<uint16_t>::iterator it = m1->getObjectPtr()->datastats_[e].kspace_encode_step_1.begin(); // it != m1->getObjectPtr()->datastats_[e].kspace_encode_step_1.end(); ++it) { // GDEBUG_STREAM(" K1: " << *it << std::endl); // } //} //Iterate over the reference data of the bucket IsmrmrdDataBuffered* pCurrDataBuffer = NULL; for (std::vector<IsmrmrdAcquisitionData>::iterator it = m1->getObjectPtr()->ref_.begin(); it != m1->getObjectPtr()->ref_.end(); ++it) { //Get a reference to the header for this acquisition ISMRMRD::AcquisitionHeader & acqhdr = *it->head_->getObjectPtr(); //Generate the key to the corresponding ReconData buffer key = getKey(acqhdr.idx); //The storage is based on the encoding space uint16_t espace = acqhdr.encoding_space_ref; //GDEBUG_STREAM("espace: " << acqhdr.encoding_space_ref << std::endl); //GDEBUG_STREAM("slice: " << acqhdr.idx.slice << std::endl); //GDEBUG_STREAM("rep: " << acqhdr.idx.repetition << std::endl); //GDEBUG_STREAM("k1: " << acqhdr.idx.kspace_encode_step_1 << std::endl); //GDEBUG_STREAM("k2: " << acqhdr.idx.kspace_encode_step_2 << std::endl); //GDEBUG_STREAM("seg: " << acqhdr.idx.segment << std::endl); //GDEBUG_STREAM("key: " << key << std::endl); //Get some references to simplify the notation //the reconstruction bit corresponding to this ReconDataBuffer and encoding space IsmrmrdReconBit & rbit = getRBit(recon_data_buffers, key, espace); //and the corresponding data buffer for the reference data if (!rbit.ref_) rbit.ref_ = IsmrmrdDataBuffered(); IsmrmrdDataBuffered & dataBuffer = *rbit.ref_; //this encoding space's xml header info ISMRMRD::Encoding & encoding = hdr_.encoding[espace]; //this bucket's reference stats IsmrmrdAcquisitionBucketStats & stats = m1->getObjectPtr()->refstats_[espace]; //Fill the sampling description for this data buffer, only need to fill the sampling_ once per recon bit if (&dataBuffer != pCurrDataBuffer) { fillSamplingDescription(dataBuffer.sampling_, encoding, stats, acqhdr, true); pCurrDataBuffer = &dataBuffer; } //Make sure that the data storage for this data buffer has been allocated //TODO should this check the limits, or should that be done in the stuff function? allocateDataArrays(dataBuffer, acqhdr, encoding, stats, true); // Stuff the data, header and trajectory into this data buffer stuff(it, dataBuffer, encoding, stats, true); } //Iterate over the imaging data of the bucket // this is exactly the same code as for the reference data except for // the chunk of the data buffer. pCurrDataBuffer = NULL; for (std::vector<IsmrmrdAcquisitionData>::iterator it = m1->getObjectPtr()->data_.begin(); it != m1->getObjectPtr()->data_.end(); ++it) { //Get a reference to the header for this acquisition ISMRMRD::AcquisitionHeader & acqhdr = *it->head_->getObjectPtr(); //Generate the key to the corresponding ReconData buffer key = getKey(acqhdr.idx); //The storage is based on the encoding space uint16_t espace = acqhdr.encoding_space_ref; //GDEBUG_STREAM("espace: " << acqhdr.encoding_space_ref << std::endl); //GDEBUG_STREAM("slice: " << acqhdr.idx.slice << std::endl); //GDEBUG_STREAM("rep: " << acqhdr.idx.repetition << std::endl); //GDEBUG_STREAM("k1: " << acqhdr.idx.kspace_encode_step_1 << std::endl); //GDEBUG_STREAM("k2: " << acqhdr.idx.kspace_encode_step_2 << std::endl); //GDEBUG_STREAM("seg: " << acqhdr.idx.segment << std::endl); //GDEBUG_STREAM("key: " << key << std::endl); //Get some references to simplify the notation //the reconstruction bit corresponding to this ReconDataBuffer and encoding space IsmrmrdReconBit & rbit = getRBit(recon_data_buffers, key, espace); //and the corresponding data buffer for the imaging data IsmrmrdDataBuffered & dataBuffer = rbit.data_; //this encoding space's xml header info ISMRMRD::Encoding & encoding = hdr_.encoding[espace]; //this bucket's imaging data stats IsmrmrdAcquisitionBucketStats & stats = m1->getObjectPtr()->datastats_[espace]; //Fill the sampling description for this data buffer, only need to fill sampling_ once per recon bit if (&dataBuffer != pCurrDataBuffer) { fillSamplingDescription(dataBuffer.sampling_, encoding, stats, acqhdr, false); pCurrDataBuffer = &dataBuffer; } //Make sure that the data storage for this data buffer has been allocated //TODO should this check the limits, or should that be done in the stuff function? allocateDataArrays(dataBuffer, acqhdr, encoding, stats, false); // Stuff the data, header and trajectory into this data buffer stuff(it, dataBuffer, encoding, stats, false); } //Send all the ReconData messages GDEBUG("End of bucket reached, sending out %d ReconData buffers\n", recon_data_buffers.size()); for(std::map<size_t, GadgetContainerMessage<IsmrmrdReconData>* >::iterator it = recon_data_buffers.begin(); it != recon_data_buffers.end(); it++) { //GDEBUG_STREAM("Sending: " << it->first << std::endl); if (it->second) { size_t num_rbit = it->second->getObjectPtr()->rbit_.size(); size_t total_data = 0; for (size_t r=0; r<num_rbit; r++) { total_data += it->second->getObjectPtr()->rbit_[r].data_.data_.get_number_of_elements(); if(it->second->getObjectPtr()->rbit_[r].ref_) total_data += it->second->getObjectPtr()->rbit_[r].ref_.get().data_.get_number_of_elements(); } if(total_data>0) { if (this->next()->putq(it->second) == -1) { it->second->release(); throw std::runtime_error("Failed to pass bucket down the chain\n"); } } } } //Clear the recondata buffer map recon_data_buffers.clear(); // is this necessary? //We can release the incoming bucket now. This will release all of the data it contains. m1->release(); return GADGET_OK; }
int GenericReconEigenChannelGadget::process_config(ACE_Message_Block* mb) { ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } if (!h.acquisitionSystemInformation) { GDEBUG("acquisitionSystemInformation not found in header. Bailing out"); return GADGET_FAIL; } // ------------------------------------------------- size_t NE = h.encoding.size(); num_encoding_spaces_ = NE; GDEBUG_CONDITION_STREAM(verbose.value(), "Number of encoding spaces: " << NE); calib_mode_.resize(NE, ISMRMRD_noacceleration); KLT_.resize(NE); for (size_t e = 0; e < h.encoding.size(); e++) { ISMRMRD::EncodingSpace e_space = h.encoding[e].encodedSpace; ISMRMRD::EncodingSpace r_space = h.encoding[e].reconSpace; ISMRMRD::EncodingLimits e_limits = h.encoding[e].encodingLimits; if (!h.encoding[e].parallelImaging) { GDEBUG_STREAM("Parallel Imaging section not found in header"); calib_mode_[e] = ISMRMRD_noacceleration; } else { ISMRMRD::ParallelImaging p_imaging = *h.encoding[0].parallelImaging; std::string calib = *p_imaging.calibrationMode; bool separate = (calib.compare("separate") == 0); bool embedded = (calib.compare("embedded") == 0); bool external = (calib.compare("external") == 0); bool interleaved = (calib.compare("interleaved") == 0); bool other = (calib.compare("other") == 0); calib_mode_[e] = Gadgetron::ISMRMRD_noacceleration; if (p_imaging.accelerationFactor.kspace_encoding_step_1 > 1 || p_imaging.accelerationFactor.kspace_encoding_step_2 > 1) { if (interleaved) calib_mode_[e] = Gadgetron::ISMRMRD_interleaved; else if (embedded) calib_mode_[e] = Gadgetron::ISMRMRD_embedded; else if (separate) calib_mode_[e] = Gadgetron::ISMRMRD_separate; else if (external) calib_mode_[e] = Gadgetron::ISMRMRD_external; else if (other) calib_mode_[e] = Gadgetron::ISMRMRD_other; } } } // --------------------------------------------------------------------------------------------------------- /*if (!debug_folder.value().empty()) { Gadgetron::get_debug_folder_path(debug_folder.value(), debug_folder_full_path_); GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is " << debug_folder_full_path_); } else { GDEBUG_CONDITION_STREAM(verbose.value(), "Debug folder is not set ... "); }*/ return GADGET_OK; }
int PartialFourierAdjustROGadget ::process(GadgetContainerMessage<ISMRMRD::AcquisitionHeader>* m1, GadgetContainerMessage< hoNDArray< std::complex<float> > >* m2) { bool is_noise = ISMRMRD::FlagBit(ISMRMRD::ISMRMRD_ACQ_IS_NOISE_MEASUREMENT).isSet(m1->getObjectPtr()->flags); size_t channels = m1->getObjectPtr()->active_channels; size_t samples = m1->getObjectPtr()->number_of_samples; size_t centre_column = m1->getObjectPtr()->center_sample; if (!is_noise) { // adjust the center echo int az = addPrePostZeros(centre_column, samples); if ( az!= 0 && samples < maxRO_ ) { GadgetContainerMessage< hoNDArray< std::complex<float> > >* m3 = new GadgetContainerMessage< hoNDArray< std::complex<float> > >(); if (!m3) { return GADGET_FAIL; } std::vector<size_t> data_out_dims = *m2->getObjectPtr()->get_dimensions(); data_out_dims[0] = maxRO_; try { m3->getObjectPtr()->create(&data_out_dims); } catch(...) { GDEBUG("Unable to create new data array for downsampled data\n"); return GADGET_FAIL; } m3->getObjectPtr()->fill(0); std::complex<float>* pM3 = m3->getObjectPtr()->get_data_ptr(); std::complex<float>* pM2 = m2->getObjectPtr()->get_data_ptr(); size_t c; if ( az == 1 ) // pre zeros { for ( c=0; c<channels; c++ ) { memcpy(pM3+c*maxRO_+maxRO_-samples, pM2+c*samples, sizeof( std::complex<float> )*samples); } } if ( az == 2 ) // post zeros { for ( c=0; c<channels; c++ ) { memcpy(pM3+c*maxRO_, pM2+c*samples, sizeof( std::complex<float> )*samples); } } m2->release(); //We are done with this data m1->cont(m3); m1->getObjectPtr()->number_of_samples = data_out_dims[0]; } if (this->next()->putq(m1) == -1) { GERROR("NoiseAdjustGadget::process, passing data on to next gadget"); return -1; } } else { if (this->next()->putq(m1) == -1) { GERROR("NoiseAdjustGadget::process, passing data on to next gadget"); return -1; } } return GADGET_OK; }
int GenericReconCartesianNonLinearSpirit2DTGadget::process_config(ACE_Message_Block* mb) { GADGET_CHECK_RETURN(BaseClass::process_config(mb) == GADGET_OK, GADGET_FAIL); // ------------------------------------------------- ISMRMRD::IsmrmrdHeader h; try { deserialize(mb->rd_ptr(), h); } catch (...) { GDEBUG("Error parsing ISMRMRD Header"); } // ------------------------------------------------- // check the parameters if(this->spirit_nl_iter_max.value()==0) { this->spirit_nl_iter_max.value(15); GDEBUG_STREAM("spirit_iter_max: " << this->spirit_nl_iter_max.value()); } if (this->spirit_nl_iter_thres.value()<FLT_EPSILON) { this->spirit_nl_iter_thres.value(0.004); GDEBUG_STREAM("spirit_nl_iter_thres: " << this->spirit_nl_iter_thres.value()); } if (this->spirit_image_reg_lamda.value() < FLT_EPSILON) { if(this->spirit_reg_proximity_across_cha.value()) { if(spirit_reg_estimate_noise_floor.value()) { this->spirit_image_reg_lamda.value(0.001); } else { this->spirit_image_reg_lamda.value(0.0002); } } else { if(spirit_reg_estimate_noise_floor.value()) { this->spirit_image_reg_lamda.value(0.002); } else { this->spirit_image_reg_lamda.value(0.00005); } } GDEBUG_STREAM("spirit_image_reg_lamda: " << this->spirit_image_reg_lamda.value()); } if (this->spirit_reg_N_weighting_ratio.value() < FLT_EPSILON) { if(acceFactorE1_[0]<=5) { this->spirit_reg_N_weighting_ratio.value(10.0); } else { this->spirit_reg_N_weighting_ratio.value(20.0); } GDEBUG_STREAM("spirit_reg_N_weighting_ratio: " << this->spirit_reg_N_weighting_ratio.value()); } return GADGET_OK; }
boost::shared_ptr<cuNDArray<float_complext> > gpuCSICoilEstimationGadget::calculate_CSM( cuNDArray<float_complext>* data, cuNDArray<floatd2>* traj, cuNDArray<float>* dcw ) { if (dcw) { //We have density compensation, so we can get away with gridding cuNFFT_plan<float,2> plan(from_std_vector<size_t,2>(img_size),from_std_vector<size_t,2>(img_size)*size_t(2),kernel_width_); std::vector<size_t> csm_dims = img_size; csm_dims.push_back(coils); cuNDArray<float_complext> tmp(csm_dims); GDEBUG("Coils %i \n\n",tmp.get_size(2)); std::vector<size_t> flat_dims = {traj->get_number_of_elements()}; cuNDArray<floatd2> flat_traj(flat_dims,traj->get_data_ptr()); std::vector<size_t> spiral_dims{data->get_size(0),data->get_size(1)}; //Trajectories, coils cuNDArray<complext<float>> second_spiral(spiral_dims,data->get_data_ptr()+spiral_dims[0]*spiral_dims[1]*0); std::vector<size_t> spiral_traj_dims{spiral_dims[0]}; cuNDArray<floatd2> spiral_traj(spiral_traj_dims,traj->get_data_ptr()+spiral_dims[0]*0); cuNDArray<float> spiral_dcw(spiral_traj_dims,dcw->get_data_ptr()+spiral_dims[0]*0); GDEBUG("Preprocessing\n\n"); plan.preprocess(&spiral_traj,cuNFFT_plan<float,2>::NFFT_PREP_NC2C); GDEBUG("Computing\n\n"); plan.compute(&second_spiral,&tmp,&spiral_dcw,cuNFFT_plan<float,2>::NFFT_BACKWARDS_NC2C); auto tmp_abs = abs(&tmp); return estimate_b1_map<float,2>(&tmp); } else { //No density compensation, we have to do iterative reconstruction. std::vector<size_t> csm_dims = img_size; csm_dims.push_back(coils); auto E = boost::make_shared<cuNFFTOperator<float,2>>(); E->setup(from_std_vector<size_t,2>(img_size),from_std_vector<size_t,2>(img_size)*size_t(2),kernel_width_); std::vector<size_t> flat_dims = {traj->get_number_of_elements()}; cuNDArray<floatd2> flat_traj(flat_dims,traj->get_data_ptr()); E->set_domain_dimensions(&csm_dims); cuCgSolver<float_complext> solver; solver.set_max_iterations(20); solver.set_encoding_operator(E); std::vector<size_t> spiral_dims{data->get_size(0),data->get_size(1)}; //Trajectories, coils cuNDArray<complext<float>> second_spiral(spiral_dims,data->get_data_ptr()+spiral_dims[0]*spiral_dims[1]*0); E->set_codomain_dimensions(&spiral_dims); std::vector<size_t> spiral_traj_dims{spiral_dims[0]}; cuNDArray<floatd2> spiral_traj(spiral_traj_dims,traj->get_data_ptr()+spiral_dims[0]*0); E->preprocess(&spiral_traj); auto tmp = solver.solve(&second_spiral); auto tmp_abs = abs(tmp.get()); auto res = estimate_b1_map<float,2>(tmp.get()); //fill(res.get(),float_complext(1,0)); //auto res= boost::make_shared<cuNDArray<float_complext>>(csm_dims); //fill(res.get(),float_complext(1,0)); return res; } }