/** * Load an NX log entry a group type that has value and time entries. * @param file :: A reference to the NeXus file handle opened at the parent * group * @param entry_name :: The name of the log entry * @param entry_class :: The type of the entry * @param workspace :: A pointer to the workspace to store the logs */ void LoadNexusLogs::loadNXLog( ::NeXus::File &file, const std::string &entry_name, const std::string &entry_class, boost::shared_ptr<API::MatrixWorkspace> workspace) const { g_log.debug() << "processing " << entry_name << ":" << entry_class << "\n"; file.openGroup(entry_name, entry_class); // Validate the NX log class. std::map<std::string, std::string> entries = file.getEntries(); if ((entries.find("value") == entries.end()) || (entries.find("time") == entries.end())) { g_log.warning() << "Invalid NXlog entry " << entry_name << " found. Did not contain 'value' and 'time'.\n"; file.closeGroup(); return; } // whether or not to overwrite logs on workspace bool overwritelogs = this->getProperty("OverwriteLogs"); try { if (overwritelogs || !(workspace->run().hasProperty(entry_name))) { Kernel::Property *logValue = createTimeSeries(file, entry_name); workspace->mutableRun().addProperty(logValue, overwritelogs); } } catch (::NeXus::Exception &e) { g_log.warning() << "NXlog entry " << entry_name << " gave an error when loading:'" << e.what() << "'.\n"; } file.closeGroup(); }
void SaveNXTomo::writeImageKeyValue(const DataObjects::Workspace2D_sptr workspace, ::NeXus::File &nxFile, int thisFileInd) { // Add ImageKey to instrument/image_key if present, use 0 if not try { nxFile.openPath("/entry1/tomo_entry/instrument/detector"); } catch (...) { throw std::runtime_error("Unable to create a valid NXTomo file"); } // Set the default key value for this WS std::vector<double> keyValue; keyValue.push_back(0); if (workspace->run().hasProperty("ImageKey")) { std::string tmpVal = workspace->run().getLogData("ImageKey")->value(); try { keyValue[0] = boost::lexical_cast<double>(tmpVal); } catch (...) { } // Invalid Cast is handled below } nxFile.openData("image_key"); nxFile.putSlab(keyValue, thisFileInd, 1); nxFile.closeData(); nxFile.closeGroup(); }
void SaveNXTomo::writeIntensityValue(const DataObjects::Workspace2D_sptr workspace, ::NeXus::File &nxFile, int thisFileInd) { // Add Intensity to control if present, use 1 if not try { nxFile.openPath("/entry1/tomo_entry/control"); } catch (...) { throw std::runtime_error("Unable to create a valid NXTomo file"); } std::vector<double> intensityValue; intensityValue.push_back(1); if (workspace->run().hasProperty("Intensity")) { std::string tmpVal = workspace->run().getLogData("Intensity")->value(); try { intensityValue[0] = boost::lexical_cast<double>(tmpVal); } catch (...) { } // Invalid Cast is handled below } nxFile.openData("data"); nxFile.putSlab(intensityValue, thisFileInd, 1); nxFile.closeData(); }
/** Load the pulse times, if needed. This sets * thisBankPulseTimes to the right pointer. * */ void LoadBankFromDiskTask::loadPulseTimes(::NeXus::File &file) { try { // First, get info about the event_time_zero field in this bank file.openData("event_time_zero"); } catch (::NeXus::Exception &) { // Field not found error is most likely. // Use the "proton_charge" das logs. thisBankPulseTimes = m_loader.alg->m_allBanksPulseTimes; return; } std::string thisStartTime; size_t thisNumPulses = 0; file.getAttr("offset", thisStartTime); if (!file.getInfo().dims.empty()) thisNumPulses = file.getInfo().dims[0]; file.closeData(); // Now, we look through existing ones to see if it is already loaded // thisBankPulseTimes = NULL; for (auto &bankPulseTime : m_loader.m_bankPulseTimes) { if (bankPulseTime->equals(thisNumPulses, thisStartTime)) { thisBankPulseTimes = bankPulseTime; return; } } // Not found? Need to load and add it thisBankPulseTimes = boost::make_shared<BankPulseTimes>(boost::ref(file), m_framePeriodNumbers); m_loader.m_bankPulseTimes.push_back(thisBankPulseTimes); }
/** Load the event_index field (a list of size of # of pulses giving the index in the event list for that pulse) * @param file :: File handle for the NeXus file * @param event_index :: ref to the vector */ void LoadBankFromDiskTask::loadEventIndex(::NeXus::File &file, std::vector<uint64_t> &event_index) { // Get the event_index (a list of size of # of pulses giving the index in // the event list for that pulse) file.openData("event_index"); // Must be uint64 if (file.getInfo().type == ::NeXus::UINT64) file.getData(event_index); else { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_index field is not UINT64! It will be skipped.\n"; m_loadError = true; } file.closeData(); // Look for the sign that the bank is empty if (event_index.size() == 1) { if (event_index[0] == 0) { // One entry, only zero. This means NO events in this bank. m_loadError = true; m_loader.alg->getLogger().debug() << "Bank " << entry_name << " is empty.\n"; } } }
/** * Open the first NXentry of the supplied nexus file. * * @param handle Object to work on. */ void MuonNexusReader::openFirstNXentry(NeXus::File &handle) { std::map<string, string> entries = handle.getEntries(); const auto entry = std::find_if(entries.cbegin(), entries.cend(), [](const auto entry) { return entry.second == NXENTRY; }); if (entry == entries.cend()) throw std::runtime_error("Failed to find NXentry"); handle.openGroup(entry->first, NXENTRY); }
void SaveNXTomo::writeLogValues(const DataObjects::Workspace2D_sptr workspace, ::NeXus::File &nxFile, int thisFileInd) { // Add Log information (minus special values - Rotation, ImageKey, Intensity) // Unable to add multidimensional string data, storing strings as // multidimensional data set of uint8 values try { nxFile.openPath("/entry1/log_info"); } catch (...) { throw std::runtime_error("Unable to create a valid NXTomo file"); } // Loop through all log values, create it if it doesn't exist. Then append // value std::vector<Property *> logVals = workspace->run().getLogData(); for (auto it = logVals.begin(); it != logVals.end(); ++it) { auto prop = *it; if (prop->name() != "ImageKey" && prop->name() != "Rotation" && prop->name() != "Intensity" && prop->name() != "Axis1" && prop->name() != "Axis2") { try { nxFile.openData(prop->name()); } catch (::NeXus::Exception &) { // Create the data entry if it doesn't exist yet, and open. std::vector<int64_t> infDim; infDim.push_back(NX_UNLIMITED); infDim.push_back(NX_UNLIMITED); nxFile.makeData(prop->name(), ::NeXus::UINT8, infDim, true); } size_t strSize = prop->value().length(); char *val = new char[80](); // If log value is from FITS file as it should be, // it won't be greater than this. Otherwise Shorten it if (strSize > 80) strSize = 80; strncpy(val, prop->value().c_str(), strSize); std::vector<int64_t> start, size; start.push_back(thisFileInd); start.push_back(0); size.push_back(1); size.push_back(strSize); // single item nxFile.putSlab(val, start, size); nxFile.closeData(); } } }
/** * Open the first NXentry of the supplied nexus file. * * @param handle Object to work on. */ void MuonNexusReader::openFirstNXentry(NeXus::File &handle) { std::map<string, string> entries = handle.getEntries(); bool found = false; for (auto &entrie : entries) { if (entrie.second == NXENTRY) { handle.openGroup(entrie.first, NXENTRY); found = true; break; } } if (!found) throw std::runtime_error("Failed to find NXentry"); }
/** * Can we get a histogram (non event data) for every monitor? * * @param file :: NeXus file object (open) * @param monitorNames :: names of monitors of interest * @return If there seems to be histograms for all monitors (they have "data") **/ bool LoadNexusMonitors2::allMonitorsHaveHistoData( ::NeXus::File &file, const std::vector<std::string> &monitorNames) { bool res = true; try { for (std::size_t i = 0; i < m_monitor_count; ++i) { file.openGroup(monitorNames[i], "NXmonitor"); file.openData("data"); file.closeData(); file.closeGroup(); } } catch (::NeXus::Exception &) { file.closeGroup(); res = false; } return res; }
/** * * @param nxsFile A reference to the open NeXus fileIt should be opened at the * "full_reference_detector" group * @param detInfo A reference to the struct that will hold the data from the file */ void LoadDetectorInfo::readLibisisNxs(::NeXus::File & nxsFile, DetectorInfo & detInfo) const { nxsFile.readData<int32_t>("det_no", detInfo.ids); nxsFile.readData<int32_t>("det_type", detInfo.codes); nxsFile.readData<double>("delay_time", detInfo.delays); const size_t numDets = detInfo.ids.size(); if(m_moveDets) { nxsFile.readData<double>("L2", detInfo.l2); nxsFile.readData<double>("theta", detInfo.theta); nxsFile.readData<double>("phi", detInfo.phi); } else { // these will get ignored detInfo.l2.resize(numDets, -1.0); detInfo.theta.resize(numDets, -1.0); detInfo.phi.resize(numDets, -1.0); } // pressure & wall thickness are global here double pressure = -1.0; double thickness = -1.0; nxsFile.openGroup("det_he3", "NXIXTdet_he3"); nxsFile.readData<double>("gas_pressure", pressure); nxsFile.readData<double>("wall_thickness", thickness); nxsFile.closeGroup(); if(pressure <= 0.0) { g_log.warning("The data file does not contain correct He3 pressure, " "default value of 10 bar is used instead"); pressure = 10.0; } if(thickness <= 0.0) { g_log.warning("The data file does not contain correct detector's wall " "thickness, default value of 0.8mm is used instead"); thickness = 0.0008; } detInfo.pressures.resize(numDets, pressure); detInfo.thicknesses.resize(numDets, thickness); }
void LoadNexusLogs::loadNPeriods( ::NeXus::File &file, boost::shared_ptr<API::MatrixWorkspace> workspace) const { int value = 1; // Default to 1-period unless try { file.openGroup("periods", "IXperiods"); file.openData("number"); file.getData(&value); file.closeData(); file.closeGroup(); } catch (::NeXus::Exception &) { // Likely missing IXperiods. return; } API::Run &run = workspace->mutableRun(); const std::string nPeriodsLabel = "nperiods"; if (!run.hasProperty(nPeriodsLabel)) { run.addProperty(new PropertyWithValue<int>(nPeriodsLabel, value)); } }
/** Load weight of weigthed events if they exist * @param file An NeXus::File object opened at the correct group * @returns A new array containing the weights or a nullptr if the weights * are not present */ std::unique_ptr<float[]> LoadBankFromDiskTask::loadEventWeights(::NeXus::File &file) { try { // First, get info about the event_weight field in this bank file.openData("event_weight"); } catch (::NeXus::Exception &) { // Field not found error is most likely. m_have_weight = false; return std::unique_ptr<float[]>(); } // OK, we've got them m_have_weight = true; // Allocate the array auto event_weight = Mantid::Kernel::make_unique<float[]>(m_loadSize[0]); ::NeXus::Info weight_info = file.getInfo(); int64_t weight_dim0 = recalculateDataSize(weight_info.dims[0]); if (weight_dim0 < m_loadSize[0] + m_loadStart[0]) { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_weight field is too small to load the desired data.\n"; m_loadError = true; } // Check that the type is what it is supposed to be if (weight_info.type == ::NeXus::FLOAT32) file.getSlab(event_weight.get(), m_loadStart, m_loadSize); else { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_weight field is not FLOAT32! It will be skipped.\n"; m_loadError = true; } if (!m_loadError) { file.closeData(); } return event_weight; }
/** Try to load the "Veto_pulse" field in DASLogs * and convert it to a sample log. * * @param file :: open nexus file at the DASLogs group * @param workspace :: workspace to add to. */ void LoadNexusLogs::loadVetoPulses( ::NeXus::File &file, boost::shared_ptr<API::MatrixWorkspace> workspace) const { try { file.openGroup("Veto_pulse", "NXgroup"); } catch (::NeXus::Exception &) { // No group. This is common in older files return; } file.openData("veto_pulse_time"); // Load the start date/time as ISO8601 string. std::string start_time; file.getAttr("start_time", start_time); DateAndTime start(start_time); // Read the offsets std::vector<double> time_double; file.getData(time_double); // Fake values with zeroes. std::vector<double> values(time_double.size(), 0.0); TimeSeriesProperty<double> *tsp = new TimeSeriesProperty<double>("veto_pulse_time"); tsp->create(start, time_double, values); tsp->setUnits(""); // Add the log workspace->mutableRun().addProperty(tsp); file.closeData(); file.closeGroup(); }
/** Open and load the times-of-flight data * @param file An NeXus::File object opened at the correct group * @returns A new array containing the time of flights for this bank */ std::unique_ptr<float[]> LoadBankFromDiskTask::loadTof(::NeXus::File &file) { // Allocate the array auto event_time_of_flight = Mantid::Kernel::make_unique<float[]>(m_loadSize[0]); // Get the list of event_time_of_flight's std::string key, tof_unit; if (!m_oldNexusFileNames) key = "event_time_offset"; else key = "event_time_of_flight"; file.openData(key); // Check that the required space is there in the file. ::NeXus::Info tof_info = file.getInfo(); int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]); if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field is too small " "to load the desired data.\n"; m_loadError = true; } // The Nexus standard does not specify if event_time_offset should be float or // integer, so we use the NeXusIOHelper to perform the conversion to float on // the fly. If the data field already contains floats, the conversion is // skipped. auto vec = NeXus::NeXusIOHelper::readNexusSlab<float>(file, key, m_loadStart, m_loadSize); file.getAttr("units", tof_unit); file.closeData(); // Convert Tof to microseconds Kernel::Units::timeConversionVector(vec, tof_unit, "microseconds"); std::copy(vec.begin(), vec.end(), event_time_of_flight.get()); return event_time_of_flight; }
/** * Load log entries from the given group * @param file :: A reference to the NeXus file handle opened such that the * next call can be to open the named group * @param entry_name :: The name of the log entry * @param entry_class :: The class type of the log entry * @param workspace :: A pointer to the workspace to store the logs */ void LoadNexusLogs::loadLogs( ::NeXus::File &file, const std::string &entry_name, const std::string &entry_class, boost::shared_ptr<API::MatrixWorkspace> workspace) const { file.openGroup(entry_name, entry_class); std::map<std::string, std::string> entries = file.getEntries(); std::map<std::string, std::string>::const_iterator iend = entries.end(); for (std::map<std::string, std::string>::const_iterator itr = entries.begin(); itr != iend; ++itr) { std::string log_class = itr->second; if (log_class == "NXlog" || log_class == "NXpositioner") { loadNXLog(file, itr->first, log_class, workspace); } else if (log_class == "IXseblock") { loadSELog(file, itr->first, workspace); } else if (log_class == "NXcollection") { int jj = 0; ++jj; } } loadVetoPulses(file, workspace); file.closeGroup(); }
/** * Fix the detector numbers if the defaults are not correct. Currently checks * the isis_vms_compat block and reads them from there if possible. * * @param det_ids :: An array of prefilled detector IDs * @param file :: A reference to the NeXus file opened at the root entry * @param spec_ids :: An array of spectrum numbers that the monitors have * @param nmonitors :: The size of the det_ids and spec_ids arrays */ void LoadNexusMonitors2::fixUDets(boost::scoped_array<detid_t> &det_ids, ::NeXus::File &file, const boost::scoped_array<specid_t> &spec_ids, const size_t nmonitors) const { try { file.openGroup("isis_vms_compat", "IXvms"); } catch (::NeXus::Exception &) { return; } // UDET file.openData("UDET"); std::vector<int32_t> udet; file.getData(udet); file.closeData(); // SPEC file.openData("SPEC"); std::vector<int32_t> spec; file.getData(spec); file.closeData(); // This is a little complicated: Each value in the spec_id array is a value // found in the // SPEC block of isis_vms_compat. The index that this value is found at then // corresponds // to the index within the UDET block that holds the detector ID std::vector<int32_t>::const_iterator beg = spec.begin(); for (size_t mon_index = 0; mon_index < nmonitors; ++mon_index) { std::vector<int32_t>::const_iterator itr = std::find(spec.begin(), spec.end(), spec_ids[mon_index]); if (itr == spec.end()) { det_ids[mon_index] = -1; continue; } std::vector<int32_t>::difference_type udet_index = std::distance(beg, itr); det_ids[mon_index] = udet[udet_index]; } file.closeGroup(); }
/** Open and load the times-of-flight data */ void LoadBankFromDiskTask::loadTof(::NeXus::File &file) { // Allocate the array auto temp = new float[m_loadSize[0]]; delete[] m_event_time_of_flight; m_event_time_of_flight = temp; // Get the list of event_time_of_flight's if (!m_oldNexusFileNames) file.openData("event_time_offset"); else file.openData("event_time_of_flight"); // Check that the required space is there in the file. ::NeXus::Info tof_info = file.getInfo(); int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]); if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field is too small " "to load the desired data.\n"; m_loadError = true; } // Check that the type is what it is supposed to be if (tof_info.type == ::NeXus::FLOAT32) file.getSlab(m_event_time_of_flight, m_loadStart, m_loadSize); else { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field is not FLOAT32! It will be skipped.\n"; m_loadError = true; } if (!m_loadError) { std::string units; file.getAttr("units", units); if (units != "microsecond") { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field's units are " "not microsecond. It will be skipped.\n"; m_loadError = true; } file.closeData(); } // no error }
/** Load the event_id field, which has been opened * @param file An NeXus::File object opened at the correct group * @returns A new array containing the event Ids for this bank */ std::unique_ptr<uint32_t[]> LoadBankFromDiskTask::loadEventId(::NeXus::File &file) { // This is the data size ::NeXus::Info id_info = file.getInfo(); int64_t dim0 = recalculateDataSize(id_info.dims[0]); // Now we allocate the required arrays auto event_id = Mantid::Kernel::make_unique<uint32_t[]>(m_loadSize[0]); // Check that the required space is there in the file. if (dim0 < m_loadSize[0] + m_loadStart[0]) { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_id field is too small (" << dim0 << ") to load the desired data size (" << m_loadSize[0] + m_loadStart[0] << ").\n"; m_loadError = true; } if (m_loader.alg->getCancel()) m_loadError = true; // To allow cancelling the algorithm if (!m_loadError) { // Must be uint32 if (id_info.type == ::NeXus::UINT32) file.getSlab(event_id.get(), m_loadStart, m_loadSize); else { m_loader.alg->getLogger().warning() << "Entry " << entry_name << "'s event_id field is not UINT32! It will be skipped.\n"; m_loadError = true; } file.closeData(); // determine the range of pixel ids for (int64_t i = 0; i < m_loadSize[0]; ++i) { const auto id = event_id[i]; if (id < m_min_id) m_min_id = id; if (id > m_max_id) m_max_id = id; } if (m_min_id > static_cast<uint32_t>(m_loader.eventid_max)) { // All the detector IDs in the bank are higher than the highest 'known' // (from the IDF) // ID. Setting this will abort the loading of the bank. m_loadError = true; } // fixup the minimum pixel id in the case that it's lower than the lowest // 'known' id. We test this by checking that when we add the offset we // would not get a negative index into the vector. Note that m_min_id is // a uint so we have to be cautious about adding it to an int which may be // negative. if (static_cast<int32_t>(m_min_id) + m_loader.pixelID_to_wi_offset < 0) { m_min_id = static_cast<uint32_t>(abs(m_loader.pixelID_to_wi_offset)); } // fixup the maximum pixel id in the case that it's higher than the // highest 'known' id if (m_max_id > static_cast<uint32_t>(m_loader.eventid_max)) m_max_id = static_cast<uint32_t>(m_loader.eventid_max); } return event_id; }
/** Open the event_id field and validate the contents * * @param file :: File handle for the NeXus file * @param start_event :: set to the index of the first event * @param stop_event :: set to the index of the last event + 1 * @param event_index :: (a list of size of # of pulses giving the index in *the event list for that pulse) */ void LoadBankFromDiskTask::prepareEventId( ::NeXus::File &file, int64_t &start_event, int64_t &stop_event, const std::vector<uint64_t> &event_index) { // Get the list of pixel ID's if (m_oldNexusFileNames) file.openData("event_pixel_id"); else file.openData("event_id"); // By default, use all available indices start_event = 0; ::NeXus::Info id_info = file.getInfo(); // dims[0] can be negative in ISIS meaning 2^32 + dims[0]. Take that into // account int64_t dim0 = recalculateDataSize(id_info.dims[0]); stop_event = dim0; // Handle the time filtering by changing the start/end offsets. for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { if (thisBankPulseTimes->pulseTimes[i] >= m_loader.alg->filter_time_start) { start_event = static_cast<int64_t>(event_index[i]); break; // stop looking } } if (start_event > dim0) { // If the frame indexes are bad then we can't construct the times of the // events properly and filtering by time // will not work on this data m_loader.alg->getLogger().warning() << this->entry_name << "'s field 'event_index' seems to be invalid (start_index > than " "the number of events in the bank)." << "All events will appear in the same frame and filtering by time " "will not be possible on this data.\n"; start_event = 0; stop_event = dim0; } else { for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { if (thisBankPulseTimes->pulseTimes[i] > m_loader.alg->filter_time_stop) { stop_event = event_index[i]; break; } } } // We are loading part - work out the event number range if (m_loader.chunk != EMPTY_INT()) { start_event = static_cast<int64_t>(m_loader.chunk - m_loader.firstChunkForBank) * static_cast<int64_t>(m_loader.eventsPerChunk); // Don't change stop_event for the final chunk if (start_event + static_cast<int64_t>(m_loader.eventsPerChunk) < stop_event) stop_event = start_event + static_cast<int64_t>(m_loader.eventsPerChunk); } // Make sure it is within range if (stop_event > dim0) stop_event = dim0; m_loader.alg->getLogger().debug() << entry_name << ": start_event " << start_event << " stop_event " << stop_event << "\n"; }
/** * Read histogram data * @param histogramEntries map of the file entries that have histogram * @param outputGroup pointer to the workspace group * @param nxFile Reads data from inside first first top entry */ void LoadMcStas::readHistogramData( const std::map<std::string, std::string> &histogramEntries, WorkspaceGroup_sptr &outputGroup, ::NeXus::File &nxFile) { std::string nameAttrValueYLABEL; for (const auto &histogramEntry : histogramEntries) { const std::string &dataName = histogramEntry.first; const std::string &dataType = histogramEntry.second; // open second level entry nxFile.openGroup(dataName, dataType); // grap title to use to e.g. create workspace name std::string nameAttrValueTITLE; nxFile.getAttr("filename", nameAttrValueTITLE); if (nxFile.hasAttr("ylabel")) { nxFile.getAttr("ylabel", nameAttrValueYLABEL); } // Find the axis names auto nxdataEntries = nxFile.getEntries(); std::string axis1Name, axis2Name; for (auto &nxdataEntry : nxdataEntries) { if (nxdataEntry.second == "NXparameters") continue; if (nxdataEntry.first == "ncount") continue; nxFile.openData(nxdataEntry.first); if (nxFile.hasAttr("axis")) { int axisNo(0); nxFile.getAttr("axis", axisNo); if (axisNo == 1) axis1Name = nxdataEntry.first; else if (axisNo == 2) axis2Name = nxdataEntry.first; else throw std::invalid_argument("Unknown axis number"); } nxFile.closeData(); } std::vector<double> axis1Values, axis2Values; nxFile.readData<double>(axis1Name, axis1Values); if (axis2Name.length() == 0) { axis2Name = nameAttrValueYLABEL; axis2Values.push_back(0.0); } else { nxFile.readData<double>(axis2Name, axis2Values); } const size_t axis1Length = axis1Values.size(); const size_t axis2Length = axis2Values.size(); g_log.debug() << "Axis lengths=" << axis1Length << " " << axis2Length << '\n'; // Require "data" field std::vector<double> data; nxFile.readData<double>("data", data); // Optional errors field std::vector<double> errors; try { nxFile.readData<double>("errors", errors); } catch (::NeXus::Exception &) { g_log.information() << "Field " << dataName << " contains no error information.\n"; } // close second level entry nxFile.closeGroup(); MatrixWorkspace_sptr ws = WorkspaceFactory::Instance().create( "Workspace2D", axis2Length, axis1Length, axis1Length); Axis *axis1 = ws->getAxis(0); axis1->title() = axis1Name; // Set caption auto lblUnit = boost::make_shared<Units::Label>(); lblUnit->setLabel(axis1Name, ""); axis1->unit() = lblUnit; Axis *axis2 = new NumericAxis(axis2Length); axis2->title() = axis2Name; // Set caption lblUnit = boost::make_shared<Units::Label>(); lblUnit->setLabel(axis2Name, ""); axis2->unit() = lblUnit; ws->setYUnit(axis2Name); ws->replaceAxis(1, axis2); for (size_t wsIndex = 0; wsIndex < axis2Length; ++wsIndex) { auto &dataY = ws->dataY(wsIndex); auto &dataE = ws->dataE(wsIndex); auto &dataX = ws->dataX(wsIndex); for (size_t j = 0; j < axis1Length; ++j) { // Data is stored in column-major order so we are translating to // row major for Mantid const size_t fileDataIndex = j * axis2Length + wsIndex; dataY[j] = data[fileDataIndex]; dataX[j] = axis1Values[j]; if (!errors.empty()) dataE[j] = errors[fileDataIndex]; } axis2->setValue(wsIndex, axis2Values[wsIndex]); } // set the workspace title ws->setTitle(nameAttrValueTITLE); // use the workspace title to create the workspace name std::replace(nameAttrValueTITLE.begin(), nameAttrValueTITLE.end(), ' ', '_'); // ensure that specified name is given to workspace (eventWS) when added to // outputGroup std::string nameOfGroupWS = getProperty("OutputWorkspace"); std::string nameUserSee = nameAttrValueTITLE + "_" + nameOfGroupWS; std::string extraProperty = "Outputworkspace_dummy_" + std::to_string(m_countNumWorkspaceAdded); declareProperty(Kernel::make_unique<WorkspaceProperty<Workspace>>( extraProperty, nameUserSee, Direction::Output)); setProperty(extraProperty, boost::static_pointer_cast<Workspace>(ws)); m_countNumWorkspaceAdded++; // need to increment to ensure extraProperty are // unique // Make Mantid store the workspace in the group outputGroup->addWorkspace(ws); } nxFile.closeGroup(); } // finish
/** * Writes a single workspace into the file * @param workspace the workspace to get data from * @param nxFile the nexus file to save data into */ void SaveNXTomo::writeSingleWorkspace(const Workspace2D_sptr workspace, ::NeXus::File &nxFile) { try { nxFile.openPath("/entry1/tomo_entry/data"); } catch (...) { throw std::runtime_error("Unable to create a valid NXTomo file"); } int numFiles = 0; nxFile.getAttr<int>("NumFiles", numFiles); // Change slab start to after last data position m_slabStart[0] = numFiles; m_slabSize[0] = 1; // Set the rotation value for this WS std::vector<double> rotValue; rotValue.push_back(0); if (workspace->run().hasProperty("Rotation")) { std::string tmpVal = workspace->run().getLogData("Rotation")->value(); try { rotValue[0] = boost::lexical_cast<double>(tmpVal); } catch (...) { } // Invalid Cast is handled below } nxFile.openData("rotation_angle"); nxFile.putSlab(rotValue, numFiles, 1); nxFile.closeData(); // Copy data out, remake data with dimension of old size plus new elements. // Insert previous data. nxFile.openData("data"); double *dataArr = new double[m_spectraCount]; for (int64_t i = 0; i < m_dimensions[1]; ++i) { for (int64_t j = 0; j < m_dimensions[2]; ++j) { dataArr[i * m_dimensions[1] + j] = workspace->dataY(i * m_dimensions[1] + j)[0]; } } nxFile.putSlab(dataArr, m_slabStart, m_slabSize); nxFile.closeData(); nxFile.putAttr("NumFiles", numFiles + 1); nxFile.closeGroup(); // Write additional log information, intensity and image key writeLogValues(workspace, nxFile, numFiles); writeIntensityValue(workspace, nxFile, numFiles); writeImageKeyValue(workspace, nxFile, numFiles); ++numFiles; delete[] dataArr; }
bool MuonNexusReader::readMuonLogData(NeXus::File &handle) { const string NAME("name"); const string VALUES("values"); const string TIME("time"); // read name of Log data string dataName; handle.readData(NAME, dataName); // read data values try { handle.openData(VALUES); } catch (NeXus::Exception &) { g_log.warning() << "No " << VALUES << " set in " << handle.getPath() << "\n"; return false; } std::vector<float> values; std::vector<std::string> stringValues; bool isNumeric(false); NeXus::Info info = handle.getInfo(); if (info.type == NX_FLOAT32 && info.dims.size() == 1) { isNumeric = true; boost::scoped_array<float> dataVals(new float[info.dims[0]]); handle.getData(dataVals.get()); values.assign(dataVals.get(), dataVals.get() + info.dims[0]); stringValues.resize(info.dims[0]); // Leave empty } else if (info.type == NX_CHAR && info.dims.size() == 2) { boost::scoped_array<char> dataVals( new char[info.dims[0] * info.dims[1] + 1]); handle.getData(dataVals.get()); dataVals[info.dims[0] * info.dims[1]] = 0; for (int i = 0; i < info.dims[0]; ++i) { std::string str(&dataVals[i * info.dims[1]], &dataVals[(i + 1) * info.dims[1]]); stringValues.push_back(str); } values.resize(info.dims[0]); // Leave empty } else { // Leave both empty values.resize(info.dims[0]); stringValues.resize(info.dims[0]); } handle.closeData(); // read time values try { handle.openData(TIME); } catch (NeXus::Exception &) { g_log.warning() << "No " << TIME << " set in " << handle.getPath() << "\n"; return false; } info = handle.getInfo(); boost::scoped_array<float> timeVals(new float[info.dims[0]]); if (info.type == NX_FLOAT32 && info.dims.size() == 1) { handle.getData(timeVals.get()); } else { throw std::runtime_error( "Error in MuonNexusReader: expected float array for log times"); } handle.closeData(); // Add loaded values to vectors logNames.push_back(dataName); std::vector<float> tmp(timeVals.get(), timeVals.get() + info.dims[0]); logTimes.push_back(tmp); logType.push_back(isNumeric); logValues.push_back(values); logStringValues.push_back(stringValues); return true; }
/** * Update the detector information from a NeXus file * @param nxFile :: Handle to a NeXus file where the root group has been opened */ void UpdateInstrumentFromFile::updateFromNeXus(::NeXus::File & nxFile) { try { nxFile.openGroup("isis_vms_compat","IXvms"); } catch(::NeXus::Exception&) { throw std::runtime_error("Unknown NeXus flavour. Cannot update instrument positions using this type of file"); } // Det ID std::vector<int32_t> detID; nxFile.openData("UDET"); nxFile.getData(detID); nxFile.closeData(); // Position information std::vector<float> l2, theta,phi; nxFile.openData("LEN2"); nxFile.getData(l2); nxFile.closeData(); nxFile.openData("TTHE"); nxFile.getData(theta); nxFile.closeData(); nxFile.openData("UT01"); nxFile.getData(phi); nxFile.closeData(); g_log.information() << "Setting detector postions from NeXus file.\n"; setDetectorPositions(detID, l2, theta, phi); }
/** * Return the confidence with with this algorithm can load the file * @param eventEntries map of the file entries that have events * @param outputGroup pointer to the workspace group * @param nxFile Reads data from inside first first top entry */ void LoadMcStas::readEventData( const std::map<std::string, std::string> &eventEntries, WorkspaceGroup_sptr &outputGroup, ::NeXus::File &nxFile) { std::string filename = getPropertyValue("Filename"); auto entries = nxFile.getEntries(); // will assume that each top level entry contain one mcstas // generated IDF and any event data entries within this top level // entry are data collected for that instrument // This code for loading the instrument is for now adjusted code from // ExperimentalInfo. // Close data folder and go back to top level. Then read and close the // Instrument folder. nxFile.closeGroup(); Geometry::Instrument_sptr instrument; // Initialize progress reporting int reports = 2; const double progressFractionInitial = 0.1; Progress progInitial(this, 0.0, progressFractionInitial, reports); try { nxFile.openGroup("instrument", "NXinstrument"); std::string instrumentXML; nxFile.openGroup("instrument_xml", "NXnote"); nxFile.readData("data", instrumentXML); nxFile.closeGroup(); nxFile.closeGroup(); progInitial.report("Loading instrument"); Geometry::InstrumentDefinitionParser parser; std::string instrumentName = "McStas"; parser.initialize(filename, instrumentName, instrumentXML); std::string instrumentNameMangled = parser.getMangledName(); // Check whether the instrument is already in the InstrumentDataService if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) { // If it does, just use the one from the one stored there instrument = InstrumentDataService::Instance().retrieve(instrumentNameMangled); } else { // Really create the instrument instrument = parser.parseXML(NULL); // Add to data service for later retrieval InstrumentDataService::Instance().add(instrumentNameMangled, instrument); } } catch (...) { // Loader should not stop if there is no IDF.xml g_log.warning() << "\nCould not find the instrument description in the Nexus file:" << filename << " Ignore evntdata from data file" << std::endl; return; } // Finished reading Instrument. Then open new data folder again nxFile.openGroup("data", "NXdetector"); // create and prepare an event workspace ready to receive the mcstas events progInitial.report("Set up EventWorkspace"); EventWorkspace_sptr eventWS(new EventWorkspace()); // initialize, where create up front number of eventlists = number of // detectors eventWS->initialize(instrument->getNumberDetectors(), 1, 1); // Set the units eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF"); eventWS->setYUnit("Counts"); // set the instrument eventWS->setInstrument(instrument); // assign detector ID to eventlists std::vector<detid_t> detIDs = instrument->getDetectorIDs(); for (size_t i = 0; i < instrument->getNumberDetectors(); i++) { eventWS->getEventList(i).addDetectorID(detIDs[i]); // spectrum number are treated as equal to detector IDs for McStas data eventWS->getEventList(i).setSpectrumNo(detIDs[i]); } // the one is here for the moment for backward compatibility eventWS->rebuildSpectraMapping(true); bool isAnyNeutrons = false; // to store shortest and longest recorded TOF double shortestTOF(0.0); double longestTOF(0.0); const size_t numEventEntries = eventEntries.size(); Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2); for (auto eit = eventEntries.begin(); eit != eventEntries.end(); ++eit) { std::string dataName = eit->first; std::string dataType = eit->second; // open second level entry nxFile.openGroup(dataName, dataType); std::vector<double> data; nxFile.openData("events"); progEntries.report("read event data from nexus"); // Need to take into account that the nexus readData method reads a // multi-column data entry // into a vector // The number of data column for each neutron is here hardcoded to (p, x, // y, n, id, t) // Thus we have // column 0 : p neutron wight // column 1 : x x coordinate // column 2 : y y coordinate // column 3 : n accumulated number of neutrons // column 4 : id pixel id // column 5 : t time // get info about event data ::NeXus::Info id_info = nxFile.getInfo(); if (id_info.dims.size() != 2) { g_log.error() << "Event data in McStas nexus file not loaded. Expected " "event data block to be two dimensional" << std::endl; return; } int64_t nNeutrons = id_info.dims[0]; int64_t numberOfDataColumn = id_info.dims[1]; if (nNeutrons && numberOfDataColumn != 6) { g_log.error() << "Event data in McStas nexus file expecting 6 columns" << std::endl; return; } if (isAnyNeutrons == false && nNeutrons > 0) isAnyNeutrons = true; std::vector<int64_t> start(2); std::vector<int64_t> step(2); // read the event data in blocks. 1 million event is 1000000*6*8 doubles // about 50Mb int64_t nNeutronsInBlock = 1000000; int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock; int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock; // sum over number of blocks + 1 to cover the remainder for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) { if (iBlock == nOfFullBlocks) { // read remaining neutrons start[0] = nOfFullBlocks * nNeutronsInBlock; start[1] = 0; step[0] = nRemainingNeutrons; step[1] = numberOfDataColumn; } else { // read neutrons in a full block start[0] = iBlock * nNeutronsInBlock; start[1] = 0; step[0] = nNeutronsInBlock; step[1] = numberOfDataColumn; } const int64_t nNeutronsForthisBlock = step[0]; // number of neutrons read for this block data.resize(nNeutronsForthisBlock * numberOfDataColumn); // Check that the type is what it is supposed to be if (id_info.type == ::NeXus::FLOAT64) { nxFile.getSlab(&data[0], start, step); } else { g_log.warning() << "Entry event field is not FLOAT64! It will be skipped.\n"; continue; } // populate workspace with McStas events const detid2index_map detIDtoWSindex_map = eventWS->getDetectorIDToWorkspaceIndexMap(true); progEntries.report("read event data into workspace"); for (int64_t in = 0; in < nNeutronsForthisBlock; in++) { const int detectorID = static_cast<int>(data[4 + numberOfDataColumn * in]); const double detector_time = data[5 + numberOfDataColumn * in] * 1.0e6; // convert to microseconds if (in == 0 && iBlock == 0) { shortestTOF = detector_time; longestTOF = detector_time; } else { if (detector_time < shortestTOF) shortestTOF = detector_time; if (detector_time > longestTOF) longestTOF = detector_time; } const size_t workspaceIndex = detIDtoWSindex_map.find(detectorID)->second; int64_t pulse_time = 0; // eventWS->getEventList(workspaceIndex) += // TofEvent(detector_time,pulse_time); // eventWS->getEventList(workspaceIndex) += TofEvent(detector_time); eventWS->getEventList(workspaceIndex) += WeightedEvent( detector_time, pulse_time, data[numberOfDataColumn * in], 1.0); } } // end reading over number of blocks of an event dataset // nxFile.getData(data); nxFile.closeData(); nxFile.closeGroup(); } // end reading over number of event datasets // Create a default TOF-vector for histogramming, for now just 2 bins // 2 bins is the standard. However for McStas simulation data it may make // sense to // increase this number for better initial visual effect Kernel::cow_ptr<MantidVec> axis; MantidVec &xRef = axis.access(); xRef.resize(2, 0.0); // if ( nNeutrons > 0) if (isAnyNeutrons) { xRef[0] = shortestTOF - 1; // Just to make sure the bins hold it all xRef[1] = longestTOF + 1; } // Set the binning axis eventWS->setAllX(axis); // ensure that specified name is given to workspace (eventWS) when added to // outputGroup std::string nameOfGroupWS = getProperty("OutputWorkspace"); std::string nameUserSee = std::string("EventData_") + nameOfGroupWS; std::string extraProperty = "Outputworkspace_dummy_" + boost::lexical_cast<std::string>(m_countNumWorkspaceAdded); declareProperty(new WorkspaceProperty<Workspace>(extraProperty, nameUserSee, Direction::Output)); setProperty(extraProperty, boost::static_pointer_cast<Workspace>(eventWS)); m_countNumWorkspaceAdded++; // need to increment to ensure extraProperty are // unique outputGroup->addWorkspace(eventWS); }
/** Loads an entry from a previously-open NXS file as a log entry * in the workspace's run. * * @param file: NXS file handle. MUST BE PASSED BY REFERENCE otherwise there * occurs a segfault. * @param entry_name, entry_class: name and class of NXlog to open. */ void LoadLogsFromSNSNexus::loadSampleLog(::NeXus::File& file, std::string entry_name, std::string entry_class) { // whether or not to overwrite logs on workspace bool overwritelogs = this->getProperty("OverwriteLogs"); file.openGroup(entry_name, entry_class); // Validate the NX log class. map<string, string> entries = file.getEntries(); if ((entries.find("value") == entries.end()) || (entries.find("time") == entries.end()) ) { g_log.warning() << "Invalid NXlog entry " << entry_name << " found. Did not contain 'value' and 'time'.\n"; file.closeGroup(); return; } ::NeXus::Info info; //Two possible types of properties: vector<double> values; vector<int> values_int; bool isTimeSeries = false; bool isInt = false; file.openData("value"); //Get the units of the property std::string units(""); try { file.getAttr("units", units); } catch (::NeXus::Exception &) { //Ignore missing units field. units = ""; } //If there is more than one entry, it is a timeseries info = file.getInfo(); //isTimeSeries = (info.dims[0] > 1); isTimeSeries = true; Timer timer1; try { //Get the data (convert types if necessary) if (file.isDataInt()) { isInt = true; file.getDataCoerce(values_int); // if (values_int.size() == 1) // { // WS->mutableRun().addProperty(entry_name, values_int[0], units); // } } else { //Try to get as doubles. file.getDataCoerce(values); // if (values.size() == 1) // { // WS->mutableRun().addProperty(entry_name, values[0], units); // } } } catch (::NeXus::Exception &e) { g_log.warning() << "NXlog entry " << entry_name << " gave an error when loading 'value' data:'" << e.what() << "'.\n"; file.closeData(); file.closeGroup(); return; } if (VERBOSE) std::cout << "getDataCoerce took " << timer1.elapsed() << " sec.\n"; file.closeData(); if (isTimeSeries) { // --- Time series property --- //Get the times vector<double> time_double; vector<DateAndTime> times; try { file.openData("time"); } catch (::NeXus::Exception &e) { g_log.warning() << "NXlog entry " << entry_name << " gave an error when opening the time field '" << e.what() << "'.\n"; file.closeGroup(); return; } //----- Start time is an ISO8601 string date and time. ------ std::string start; try { file.getAttr("start", start); } catch (::NeXus::Exception &) { //Some logs have "offset" instead of start try { file.getAttr("offset", start); } catch (::NeXus::Exception &) { g_log.warning() << "NXlog entry " << entry_name << " has no start time indicated.\n"; file.closeData(); file.closeGroup(); return; } } //Convert to date and time Kernel::DateAndTime start_time = Kernel::DateAndTime(start); std::string time_units; file.getAttr("units", time_units); if (time_units != "second") { g_log.warning() << "NXlog entry " << entry_name << " has time units of '" << time_units << "', which are unsupported. 'second' is the only supported time unit.\n"; file.closeData(); file.closeGroup(); return; } Timer timer2; //--- Load the seconds into a double array --- try { file.getDataCoerce(time_double); } catch (::NeXus::Exception &e) { g_log.warning() << "NXlog entry " << entry_name << "'s time field could not be loaded: '" << e.what() << "'.\n"; file.closeData(); file.closeGroup(); return; } file.closeData(); if (VERBOSE) std::cout << "getDataCoerce for the seconds field took " << timer2.elapsed() << " sec.\n"; if (isInt) { //Make an int TSP TimeSeriesProperty<int> * tsp = new TimeSeriesProperty<int>(entry_name); tsp->create(start_time, time_double, values_int); tsp->setUnits( units ); WS->mutableRun().addProperty( tsp, overwritelogs ); } else { //Make a double TSP TimeSeriesProperty<double> * tsp = new TimeSeriesProperty<double>(entry_name); Timer timer3; tsp->create(start_time, time_double, values); if (VERBOSE) std::cout << "creating a TSP took " << timer3.elapsed() << " sec.\n"; tsp->setUnits( units ); WS->mutableRun().addProperty( tsp, overwritelogs ); // Trick to free memory? std::vector<double>().swap(time_double); std::vector<double>().swap(values); } } file.closeGroup(); }
/** * Load an SE log entry * @param file :: A reference to the NeXus file handle opened at the parent * group * @param entry_name :: The name of the log entry * @param workspace :: A pointer to the workspace to store the logs */ void LoadNexusLogs::loadSELog( ::NeXus::File &file, const std::string &entry_name, boost::shared_ptr<API::MatrixWorkspace> workspace) const { // Open the entry file.openGroup(entry_name, "IXseblock"); std::string propName = entry_name; if (workspace->run().hasProperty(propName)) { propName = "selog_" + propName; } // There are two possible entries: // value_log - A time series entry. This can contain a corrupt value entry // so if it does use the value one // value - A single value float entry Kernel::Property *logValue(nullptr); std::map<std::string, std::string> entries = file.getEntries(); if (entries.find("value_log") != entries.end()) { try { try { file.openGroup("value_log", "NXlog"); } catch (::NeXus::Exception &) { file.closeGroup(); throw; } logValue = createTimeSeries(file, propName); file.closeGroup(); } catch (::NeXus::Exception &e) { g_log.warning() << "IXseblock entry '" << entry_name << "' gave an error when loading " << "a time series:'" << e.what() << "'. Skipping entry\n"; file.closeGroup(); // value_log file.closeGroup(); // entry_name return; } } else if (entries.find("value") != entries.end()) { try { // This may have a larger dimension than 1 bit it has no time field so // take the first entry file.openData("value"); ::NeXus::Info info = file.getInfo(); if (info.type == ::NeXus::FLOAT32) { boost::scoped_array<float> value(new float[info.dims[0]]); file.getData(value.get()); file.closeData(); logValue = new Kernel::PropertyWithValue<double>( propName, static_cast<double>(value[0]), true); } else { file.closeGroup(); return; } } catch (::NeXus::Exception &e) { g_log.warning() << "IXseblock entry " << entry_name << " gave an error when loading " << "a single value:'" << e.what() << "'.\n"; file.closeData(); file.closeGroup(); return; } } else { g_log.warning() << "IXseblock entry " << entry_name << " cannot be read, skipping entry.\n"; file.closeGroup(); return; } workspace->mutableRun().addProperty(logValue); file.closeGroup(); }
/** * Read Event Data * @param eventEntries map of the file entries that have events * @param nxFile Reads data from inside first top entry * @return Names of workspaces to include in the output group */ std::vector<std::string> LoadMcStas::readEventData( const std::map<std::string, std::string> &eventEntries, ::NeXus::File &nxFile) { // vector to store output workspaces std::vector<std::string> scatteringWSNames; std::string filename = getPropertyValue("Filename"); auto entries = nxFile.getEntries(); const bool errorBarsSetTo1 = getProperty("ErrorBarsSetTo1"); // will assume that each top level entry contain one mcstas // generated IDF and any event data entries within this top level // entry are data collected for that instrument // This code for loading the instrument is for now adjusted code from // ExperimentalInfo. // Close data folder and go back to top level. Then read and close the // Instrument folder. nxFile.closeGroup(); Geometry::Instrument_sptr instrument; // Initialize progress reporting int reports = 2; const double progressFractionInitial = 0.1; Progress progInitial(this, 0.0, progressFractionInitial, reports); std::string instrumentXML; progInitial.report("Loading instrument"); try { nxFile.openGroup("instrument", "NXinstrument"); nxFile.openGroup("instrument_xml", "NXnote"); nxFile.readData("data", instrumentXML); nxFile.closeGroup(); nxFile.closeGroup(); } catch (...) { g_log.warning() << "\nCould not find the instrument description in the Nexus file:" << filename << " Ignore eventdata from the Nexus file\n"; return scatteringWSNames; ; } try { std::string instrumentName = "McStas"; Geometry::InstrumentDefinitionParser parser(filename, instrumentName, instrumentXML); std::string instrumentNameMangled = parser.getMangledName(); // Check whether the instrument is already in the InstrumentDataService if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) { // If it does, just use the one from the one stored there instrument = InstrumentDataService::Instance().retrieve(instrumentNameMangled); } else { // Really create the instrument instrument = parser.parseXML(nullptr); // Add to data service for later retrieval InstrumentDataService::Instance().add(instrumentNameMangled, instrument); } } catch (Exception::InstrumentDefinitionError &e) { g_log.warning() << "When trying to read the instrument description in the Nexus file: " << filename << " the following error is reported: " << e.what() << " Ignore eventdata from the Nexus file\n"; return scatteringWSNames; ; } catch (...) { g_log.warning() << "Could not parse instrument description in the Nexus file: " << filename << " Ignore eventdata from the Nexus file\n"; return scatteringWSNames; ; } // Finished reading Instrument. Then open new data folder again nxFile.openGroup("data", "NXdetector"); // create and prepare an event workspace ready to receive the mcstas events progInitial.report("Set up EventWorkspace"); EventWorkspace_sptr eventWS(new EventWorkspace()); // initialize, where create up front number of eventlists = number of // detectors eventWS->initialize(instrument->getNumberDetectors(), 1, 1); // Set the units eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF"); eventWS->setYUnit("Counts"); // set the instrument eventWS->setInstrument(instrument); // assign detector ID to eventlists std::vector<detid_t> detIDs = instrument->getDetectorIDs(); for (size_t i = 0; i < instrument->getNumberDetectors(); i++) { eventWS->getSpectrum(i).addDetectorID(detIDs[i]); // spectrum number are treated as equal to detector IDs for McStas data eventWS->getSpectrum(i).setSpectrumNo(detIDs[i]); } // the one is here for the moment for backward compatibility eventWS->rebuildSpectraMapping(true); bool isAnyNeutrons = false; // to store shortest and longest recorded TOF double shortestTOF(0.0); double longestTOF(0.0); // create vector container all the event output workspaces needed const size_t numEventEntries = eventEntries.size(); std::string nameOfGroupWS = getProperty("OutputWorkspace"); const auto eventDataTotalName = "EventData_" + nameOfGroupWS; std::vector<std::pair<EventWorkspace_sptr, std::string>> allEventWS = { {eventWS, eventDataTotalName}}; // if numEventEntries > 1 also create separate event workspaces const bool onlySummedEventWorkspace = getProperty("OutputOnlySummedEventWorkspace"); if (!onlySummedEventWorkspace && numEventEntries > 1) { for (const auto &eventEntry : eventEntries) { const std::string &dataName = eventEntry.first; // create container to hold partial event data // plus the name users will see for it const auto ws_name = dataName + "_" + nameOfGroupWS; allEventWS.emplace_back(eventWS->clone(), ws_name); } } Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2); // Refer to entry in allEventWS. The first non-summed workspace index is 1 auto eventWSIndex = 1u; // Loop over McStas event data components for (const auto &eventEntry : eventEntries) { const std::string &dataName = eventEntry.first; const std::string &dataType = eventEntry.second; // open second level entry nxFile.openGroup(dataName, dataType); std::vector<double> data; nxFile.openData("events"); progEntries.report("read event data from nexus"); // Need to take into account that the nexus readData method reads a // multi-column data entry // into a vector // The number of data column for each neutron is here hardcoded to (p, x, // y, n, id, t) // Thus we have // column 0 : p neutron wight // column 1 : x x coordinate // column 2 : y y coordinate // column 3 : n accumulated number of neutrons // column 4 : id pixel id // column 5 : t time // get info about event data ::NeXus::Info id_info = nxFile.getInfo(); if (id_info.dims.size() != 2) { g_log.error() << "Event data in McStas nexus file not loaded. Expected " "event data block to be two dimensional\n"; return scatteringWSNames; ; } int64_t nNeutrons = id_info.dims[0]; int64_t numberOfDataColumn = id_info.dims[1]; if (nNeutrons && numberOfDataColumn != 6) { g_log.error() << "Event data in McStas nexus file expecting 6 columns\n"; return scatteringWSNames; ; } if (!isAnyNeutrons && nNeutrons > 0) isAnyNeutrons = true; std::vector<int64_t> start(2); std::vector<int64_t> step(2); // read the event data in blocks. 1 million event is 1000000*6*8 doubles // about 50Mb int64_t nNeutronsInBlock = 1000000; int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock; int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock; // sum over number of blocks + 1 to cover the remainder for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) { if (iBlock == nOfFullBlocks) { // read remaining neutrons start[0] = nOfFullBlocks * nNeutronsInBlock; start[1] = 0; step[0] = nRemainingNeutrons; step[1] = numberOfDataColumn; } else { // read neutrons in a full block start[0] = iBlock * nNeutronsInBlock; start[1] = 0; step[0] = nNeutronsInBlock; step[1] = numberOfDataColumn; } const int64_t nNeutronsForthisBlock = step[0]; // number of neutrons read for this block data.resize(nNeutronsForthisBlock * numberOfDataColumn); // Check that the type is what it is supposed to be if (id_info.type == ::NeXus::FLOAT64) { nxFile.getSlab(&data[0], start, step); } else { g_log.warning() << "Entry event field is not FLOAT64! It will be skipped.\n"; continue; } // populate workspace with McStas events const detid2index_map detIDtoWSindex_map = allEventWS[0].first->getDetectorIDToWorkspaceIndexMap(true); progEntries.report("read event data into workspace"); for (int64_t in = 0; in < nNeutronsForthisBlock; in++) { const int detectorID = static_cast<int>(data[4 + numberOfDataColumn * in]); const double detector_time = data[5 + numberOfDataColumn * in] * 1.0e6; // convert to microseconds if (in == 0 && iBlock == 0) { shortestTOF = detector_time; longestTOF = detector_time; } else { if (detector_time < shortestTOF) shortestTOF = detector_time; if (detector_time > longestTOF) longestTOF = detector_time; } const size_t workspaceIndex = detIDtoWSindex_map.find(detectorID)->second; int64_t pulse_time = 0; auto weightedEvent = WeightedEvent(); if (errorBarsSetTo1) { weightedEvent = WeightedEvent(detector_time, pulse_time, data[numberOfDataColumn * in], 1.0); } else { weightedEvent = WeightedEvent( detector_time, pulse_time, data[numberOfDataColumn * in], data[numberOfDataColumn * in] * data[numberOfDataColumn * in]); } allEventWS[0].first->getSpectrum(workspaceIndex) += weightedEvent; if (!onlySummedEventWorkspace && numEventEntries > 1) { allEventWS[eventWSIndex].first->getSpectrum(workspaceIndex) += weightedEvent; } } eventWSIndex++; } // end reading over number of blocks of an event dataset nxFile.closeData(); nxFile.closeGroup(); } // end reading over number of event datasets // Create a default TOF-vector for histogramming, for now just 2 bins // 2 bins is the standard. However for McStas simulation data it may make // sense to // increase this number for better initial visual effect auto axis = HistogramData::BinEdges{shortestTOF - 1, longestTOF + 1}; // ensure that specified name is given to workspace (eventWS) when added to // outputGroup for (auto eventWS : allEventWS) { const auto ws = eventWS.first; ws->setAllX(axis); AnalysisDataService::Instance().addOrReplace(eventWS.second, ws); scatteringWSNames.emplace_back(eventWS.second); } return scatteringWSNames; }