/** Load the pulse times, if needed. This sets
 * thisBankPulseTimes to the right pointer.
 * */
void LoadBankFromDiskTask::loadPulseTimes(::NeXus::File &file) {
  try {
    // First, get info about the event_time_zero field in this bank
    file.openData("event_time_zero");
  } catch (::NeXus::Exception &) {
    // Field not found error is most likely.
    // Use the "proton_charge" das logs.
    thisBankPulseTimes = m_loader.alg->m_allBanksPulseTimes;
    return;
  }
  std::string thisStartTime;
  size_t thisNumPulses = 0;
  file.getAttr("offset", thisStartTime);
  if (!file.getInfo().dims.empty())
    thisNumPulses = file.getInfo().dims[0];
  file.closeData();

  // Now, we look through existing ones to see if it is already loaded
  // thisBankPulseTimes = NULL;
  for (auto &bankPulseTime : m_loader.m_bankPulseTimes) {
    if (bankPulseTime->equals(thisNumPulses, thisStartTime)) {
      thisBankPulseTimes = bankPulseTime;
      return;
    }
  }

  // Not found? Need to load and add it
  thisBankPulseTimes = boost::make_shared<BankPulseTimes>(boost::ref(file),
                                                          m_framePeriodNumbers);
  m_loader.m_bankPulseTimes.push_back(thisBankPulseTimes);
}
/** Load the event_index field
(a list of size of # of pulses giving the index in the event list for that
pulse)

* @param file :: File handle for the NeXus file
* @param event_index :: ref to the vector
*/
void LoadBankFromDiskTask::loadEventIndex(::NeXus::File &file,
                                          std::vector<uint64_t> &event_index) {
  // Get the event_index (a list of size of # of pulses giving the index in
  // the event list for that pulse)
  file.openData("event_index");
  // Must be uint64
  if (file.getInfo().type == ::NeXus::UINT64)
    file.getData(event_index);
  else {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_index field is not UINT64! It will be skipped.\n";
    m_loadError = true;
  }
  file.closeData();

  // Look for the sign that the bank is empty
  if (event_index.size() == 1) {
    if (event_index[0] == 0) {
      // One entry, only zero. This means NO events in this bank.
      m_loadError = true;
      m_loader.alg->getLogger().debug() << "Bank " << entry_name
                                        << " is empty.\n";
    }
  }
}
/** Open and load the times-of-flight data
*/
void LoadBankFromDiskTask::loadTof(::NeXus::File &file) {
  // Allocate the array
  auto temp = new float[m_loadSize[0]];
  delete[] m_event_time_of_flight;
  m_event_time_of_flight = temp;

  // Get the list of event_time_of_flight's
  if (!m_oldNexusFileNames)
    file.openData("event_time_offset");
  else
    file.openData("event_time_of_flight");

  // Check that the required space is there in the file.
  ::NeXus::Info tof_info = file.getInfo();
  int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]);
  if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name << "'s event_time_offset field is too small "
                                     "to load the desired data.\n";
    m_loadError = true;
  }

  // Check that the type is what it is supposed to be
  if (tof_info.type == ::NeXus::FLOAT32)
    file.getSlab(m_event_time_of_flight, m_loadStart, m_loadSize);
  else {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_time_offset field is not FLOAT32! It will be skipped.\n";
    m_loadError = true;
  }

  if (!m_loadError) {
    std::string units;
    file.getAttr("units", units);
    if (units != "microsecond") {
      m_loader.alg->getLogger().warning()
          << "Entry " << entry_name << "'s event_time_offset field's units are "
                                       "not microsecond. It will be skipped.\n";
      m_loadError = true;
    }
    file.closeData();
  } // no error
}
/** Load weight of weigthed events if they exist
 * @param file An NeXus::File object opened at the correct group
 * @returns A new array containing the weights or a nullptr if the weights
 * are not present
 */
std::unique_ptr<float[]>
LoadBankFromDiskTask::loadEventWeights(::NeXus::File &file) {
  try {
    // First, get info about the event_weight field in this bank
    file.openData("event_weight");
  } catch (::NeXus::Exception &) {
    // Field not found error is most likely.
    m_have_weight = false;
    return std::unique_ptr<float[]>();
  }
  // OK, we've got them
  m_have_weight = true;

  // Allocate the array
  auto event_weight = Mantid::Kernel::make_unique<float[]>(m_loadSize[0]);

  ::NeXus::Info weight_info = file.getInfo();
  int64_t weight_dim0 = recalculateDataSize(weight_info.dims[0]);
  if (weight_dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_weight field is too small to load the desired data.\n";
    m_loadError = true;
  }

  // Check that the type is what it is supposed to be
  if (weight_info.type == ::NeXus::FLOAT32)
    file.getSlab(event_weight.get(), m_loadStart, m_loadSize);
  else {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_weight field is not FLOAT32! It will be skipped.\n";
    m_loadError = true;
  }

  if (!m_loadError) {
    file.closeData();
  }
  return event_weight;
}
/** Open and load the times-of-flight data
 * @param file An NeXus::File object opened at the correct group
 * @returns A new array containing the time of flights for this bank
 */
std::unique_ptr<float[]> LoadBankFromDiskTask::loadTof(::NeXus::File &file) {
  // Allocate the array
  auto event_time_of_flight =
      Mantid::Kernel::make_unique<float[]>(m_loadSize[0]);

  // Get the list of event_time_of_flight's
  std::string key, tof_unit;
  if (!m_oldNexusFileNames)
    key = "event_time_offset";
  else
    key = "event_time_of_flight";
  file.openData(key);

  // Check that the required space is there in the file.
  ::NeXus::Info tof_info = file.getInfo();
  int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]);
  if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_time_offset field is too small "
           "to load the desired data.\n";
    m_loadError = true;
  }

  // The Nexus standard does not specify if event_time_offset should be float or
  // integer, so we use the NeXusIOHelper to perform the conversion to float on
  // the fly. If the data field already contains floats, the conversion is
  // skipped.
  auto vec = NeXus::NeXusIOHelper::readNexusSlab<float>(file, key, m_loadStart,
                                                        m_loadSize);
  file.getAttr("units", tof_unit);
  file.closeData();
  // Convert Tof to microseconds
  Kernel::Units::timeConversionVector(vec, tof_unit, "microseconds");
  std::copy(vec.begin(), vec.end(), event_time_of_flight.get());

  return event_time_of_flight;
}
/** Load the event_id field, which has been opened
 * @param file An NeXus::File object opened at the correct group
 * @returns A new array containing the event Ids for this bank
 */
std::unique_ptr<uint32_t[]>
LoadBankFromDiskTask::loadEventId(::NeXus::File &file) {
  // This is the data size
  ::NeXus::Info id_info = file.getInfo();
  int64_t dim0 = recalculateDataSize(id_info.dims[0]);

  // Now we allocate the required arrays
  auto event_id = Mantid::Kernel::make_unique<uint32_t[]>(m_loadSize[0]);

  // Check that the required space is there in the file.
  if (dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name << "'s event_id field is too small (" << dim0
        << ") to load the desired data size (" << m_loadSize[0] + m_loadStart[0]
        << ").\n";
    m_loadError = true;
  }

  if (m_loader.alg->getCancel())
    m_loadError = true; // To allow cancelling the algorithm

  if (!m_loadError) {
    // Must be uint32
    if (id_info.type == ::NeXus::UINT32)
      file.getSlab(event_id.get(), m_loadStart, m_loadSize);
    else {
      m_loader.alg->getLogger().warning()
          << "Entry " << entry_name
          << "'s event_id field is not UINT32! It will be skipped.\n";
      m_loadError = true;
    }
    file.closeData();

    // determine the range of pixel ids
    for (int64_t i = 0; i < m_loadSize[0]; ++i) {
      const auto id = event_id[i];
      if (id < m_min_id)
        m_min_id = id;
      if (id > m_max_id)
        m_max_id = id;
    }

    if (m_min_id > static_cast<uint32_t>(m_loader.eventid_max)) {
      // All the detector IDs in the bank are higher than the highest 'known'
      // (from the IDF)
      // ID. Setting this will abort the loading of the bank.
      m_loadError = true;
    }
    // fixup the minimum pixel id in the case that it's lower than the lowest
    // 'known' id. We test this by checking that when we add the offset we
    // would not get a negative index into the vector. Note that m_min_id is
    // a uint so we have to be cautious about adding it to an int which may be
    // negative.
    if (static_cast<int32_t>(m_min_id) + m_loader.pixelID_to_wi_offset < 0) {
      m_min_id = static_cast<uint32_t>(abs(m_loader.pixelID_to_wi_offset));
    }
    // fixup the maximum pixel id in the case that it's higher than the
    // highest 'known' id
    if (m_max_id > static_cast<uint32_t>(m_loader.eventid_max))
      m_max_id = static_cast<uint32_t>(m_loader.eventid_max);
  }
  return event_id;
}
/** Open the event_id field and validate the contents
 *
 * @param file :: File handle for the NeXus file
 * @param start_event :: set to the index of the first event
 * @param stop_event :: set to the index of the last event + 1
 * @param event_index ::  (a list of size of # of pulses giving the index in
 *the event list for that pulse)
 */
void LoadBankFromDiskTask::prepareEventId(
    ::NeXus::File &file, int64_t &start_event, int64_t &stop_event,
    const std::vector<uint64_t> &event_index) {
  // Get the list of pixel ID's
  if (m_oldNexusFileNames)
    file.openData("event_pixel_id");
  else
    file.openData("event_id");

  // By default, use all available indices
  start_event = 0;
  ::NeXus::Info id_info = file.getInfo();
  // dims[0] can be negative in ISIS meaning 2^32 + dims[0]. Take that into
  // account
  int64_t dim0 = recalculateDataSize(id_info.dims[0]);
  stop_event = dim0;

  // Handle the time filtering by changing the start/end offsets.
  for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) {
    if (thisBankPulseTimes->pulseTimes[i] >= m_loader.alg->filter_time_start) {
      start_event = static_cast<int64_t>(event_index[i]);
      break; // stop looking
    }
  }

  if (start_event > dim0) {
    // If the frame indexes are bad then we can't construct the times of the
    // events properly and filtering by time
    // will not work on this data
    m_loader.alg->getLogger().warning()
        << this->entry_name
        << "'s field 'event_index' seems to be invalid (start_index > than "
           "the number of events in the bank)."
        << "All events will appear in the same frame and filtering by time "
           "will not be possible on this data.\n";
    start_event = 0;
    stop_event = dim0;
  } else {
    for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) {
      if (thisBankPulseTimes->pulseTimes[i] > m_loader.alg->filter_time_stop) {
        stop_event = event_index[i];
        break;
      }
    }
  }
  // We are loading part - work out the event number range
  if (m_loader.chunk != EMPTY_INT()) {
    start_event =
        static_cast<int64_t>(m_loader.chunk - m_loader.firstChunkForBank) *
        static_cast<int64_t>(m_loader.eventsPerChunk);
    // Don't change stop_event for the final chunk
    if (start_event + static_cast<int64_t>(m_loader.eventsPerChunk) <
        stop_event)
      stop_event = start_event + static_cast<int64_t>(m_loader.eventsPerChunk);
  }

  // Make sure it is within range
  if (stop_event > dim0)
    stop_event = dim0;

  m_loader.alg->getLogger().debug()
      << entry_name << ": start_event " << start_event << " stop_event "
      << stop_event << "\n";
}
Exemple #8
0
bool MuonNexusReader::readMuonLogData(NeXus::File &handle) {
  const string NAME("name");
  const string VALUES("values");
  const string TIME("time");

  // read name of Log data
  string dataName;
  handle.readData(NAME, dataName);

  // read data values
  try {
    handle.openData(VALUES);
  } catch (NeXus::Exception &) {
    g_log.warning() << "No " << VALUES << " set in " << handle.getPath()
                    << "\n";
    return false;
  }

  std::vector<float> values;
  std::vector<std::string> stringValues;
  bool isNumeric(false);

  NeXus::Info info = handle.getInfo();
  if (info.type == NX_FLOAT32 && info.dims.size() == 1) {
    isNumeric = true;
    boost::scoped_array<float> dataVals(new float[info.dims[0]]);
    handle.getData(dataVals.get());
    values.assign(dataVals.get(), dataVals.get() + info.dims[0]);
    stringValues.resize(info.dims[0]); // Leave empty
  } else if (info.type == NX_CHAR && info.dims.size() == 2) {
    boost::scoped_array<char> dataVals(
        new char[info.dims[0] * info.dims[1] + 1]);
    handle.getData(dataVals.get());
    dataVals[info.dims[0] * info.dims[1]] = 0;
    for (int i = 0; i < info.dims[0]; ++i) {
      std::string str(&dataVals[i * info.dims[1]],
                      &dataVals[(i + 1) * info.dims[1]]);
      stringValues.push_back(str);
    }
    values.resize(info.dims[0]); // Leave empty
  } else {
    // Leave both empty
    values.resize(info.dims[0]);
    stringValues.resize(info.dims[0]);
  }
  handle.closeData();

  // read time values
  try {
    handle.openData(TIME);
  } catch (NeXus::Exception &) {
    g_log.warning() << "No " << TIME << " set in " << handle.getPath() << "\n";
    return false;
  }

  info = handle.getInfo();
  boost::scoped_array<float> timeVals(new float[info.dims[0]]);
  if (info.type == NX_FLOAT32 && info.dims.size() == 1) {
    handle.getData(timeVals.get());
  } else {
    throw std::runtime_error(
        "Error in MuonNexusReader: expected float array for log times");
  }
  handle.closeData();

  // Add loaded values to vectors

  logNames.push_back(dataName);

  std::vector<float> tmp(timeVals.get(), timeVals.get() + info.dims[0]);
  logTimes.push_back(tmp);

  logType.push_back(isNumeric);
  logValues.push_back(values);
  logStringValues.push_back(stringValues);

  return true;
}
Exemple #9
0
/** Loads an entry from a previously-open NXS file as a log entry
 * in the workspace's run.
 *
 * @param file: NXS file handle. MUST BE PASSED BY REFERENCE otherwise there
 *    occurs a segfault.
 * @param entry_name, entry_class: name and class of NXlog to open.
 */
void LoadLogsFromSNSNexus::loadSampleLog(::NeXus::File& file, std::string entry_name, std::string entry_class)
{
  // whether or not to overwrite logs on workspace
  bool overwritelogs = this->getProperty("OverwriteLogs");

  file.openGroup(entry_name, entry_class);

  // Validate the NX log class.
  map<string, string> entries = file.getEntries();
  if ((entries.find("value") == entries.end()) ||
      (entries.find("time") == entries.end()) )
  {
    g_log.warning() << "Invalid NXlog entry " << entry_name << " found. Did not contain 'value' and 'time'.\n";
    file.closeGroup();
    return;
  }


  ::NeXus::Info info;
  //Two possible types of properties:
  vector<double> values;
  vector<int> values_int;

  bool isTimeSeries = false;
  bool isInt = false;

  file.openData("value");

  //Get the units of the property
  std::string units("");
  try
  {
    file.getAttr("units", units);
  }
  catch (::NeXus::Exception &)
  {
    //Ignore missing units field.
    units = "";
  }

  //If there is more than one entry, it is a timeseries
  info = file.getInfo();
  //isTimeSeries = (info.dims[0] > 1);
  isTimeSeries = true;

  Timer timer1;
  try
  {
    //Get the data (convert types if necessary)
    if (file.isDataInt())
    {
      isInt = true;
      file.getDataCoerce(values_int);
//      if (values_int.size() == 1)
//      {
//        WS->mutableRun().addProperty(entry_name, values_int[0], units);
//      }

    }
    else
    {
      //Try to get as doubles.
      file.getDataCoerce(values);
//      if (values.size() == 1)
//      {
//        WS->mutableRun().addProperty(entry_name, values[0], units);
//      }
    }
  }
  catch (::NeXus::Exception &e)
  {
    g_log.warning() << "NXlog entry " << entry_name << " gave an error when loading 'value' data:'" << e.what() << "'.\n";
    file.closeData();
    file.closeGroup();
    return;
  }
  if (VERBOSE) std::cout << "getDataCoerce took " << timer1.elapsed() << " sec.\n";


  file.closeData();

  if (isTimeSeries)
  {
    // --- Time series property ---

    //Get the times
    vector<double> time_double;
    vector<DateAndTime> times;

    try {
      file.openData("time");
    }
    catch (::NeXus::Exception &e)
    {
      g_log.warning() << "NXlog entry " << entry_name << " gave an error when opening the time field '" << e.what() << "'.\n";
      file.closeGroup();
      return;
    }

    //----- Start time is an ISO8601 string date and time. ------
    std::string start;
    try {
      file.getAttr("start", start);
    }
    catch (::NeXus::Exception &)
    {
      //Some logs have "offset" instead of start
      try {
        file.getAttr("offset", start);
      }
      catch (::NeXus::Exception &)
      {
        g_log.warning() << "NXlog entry " << entry_name << " has no start time indicated.\n";
        file.closeData();
        file.closeGroup();
        return;
      }
    }

    //Convert to date and time
    Kernel::DateAndTime start_time = Kernel::DateAndTime(start);

    std::string time_units;
    file.getAttr("units", time_units);
    if (time_units != "second")
    {
      g_log.warning() << "NXlog entry " << entry_name << " has time units of '" << time_units << "', which are unsupported. 'second' is the only supported time unit.\n";
      file.closeData();
      file.closeGroup();
      return;
    }

    Timer timer2;
    //--- Load the seconds into a double array ---
    try {
      file.getDataCoerce(time_double);
    }
    catch (::NeXus::Exception &e)
    {
      g_log.warning() << "NXlog entry " << entry_name << "'s time field could not be loaded: '" << e.what() << "'.\n";
      file.closeData();
      file.closeGroup();
      return;
    }
    file.closeData();

    if (VERBOSE) std::cout << "getDataCoerce for the seconds field took " << timer2.elapsed() << " sec.\n";

    if (isInt)
    {
      //Make an int TSP
      TimeSeriesProperty<int> * tsp = new TimeSeriesProperty<int>(entry_name);
      tsp->create(start_time, time_double, values_int);
      tsp->setUnits( units );
      WS->mutableRun().addProperty( tsp, overwritelogs );
    }
    else
    {
      //Make a double TSP
      TimeSeriesProperty<double> * tsp = new TimeSeriesProperty<double>(entry_name);

      Timer timer3;
      tsp->create(start_time, time_double, values);
      if (VERBOSE) std::cout << "creating a TSP took  " << timer3.elapsed() << " sec.\n";

      tsp->setUnits( units );
      WS->mutableRun().addProperty( tsp, overwritelogs );
      // Trick to free memory?
      std::vector<double>().swap(time_double);
      std::vector<double>().swap(values);

    }

  }

  file.closeGroup();
}
Exemple #10
0
/**
 * Read Event Data
 * @param eventEntries map of the file entries that have events
 * @param nxFile Reads data from inside first top entry
 * @return Names of workspaces to include in the output group
 */
std::vector<std::string> LoadMcStas::readEventData(
    const std::map<std::string, std::string> &eventEntries,
    ::NeXus::File &nxFile) {

  // vector to store output workspaces
  std::vector<std::string> scatteringWSNames;

  std::string filename = getPropertyValue("Filename");
  auto entries = nxFile.getEntries();
  const bool errorBarsSetTo1 = getProperty("ErrorBarsSetTo1");

  // will assume that each top level entry contain one mcstas
  // generated IDF and any event data entries within this top level
  // entry are data collected for that instrument
  // This code for loading the instrument is for now adjusted code from
  // ExperimentalInfo.

  // Close data folder and go back to top level. Then read and close the
  // Instrument folder.
  nxFile.closeGroup();

  Geometry::Instrument_sptr instrument;

  // Initialize progress reporting
  int reports = 2;
  const double progressFractionInitial = 0.1;
  Progress progInitial(this, 0.0, progressFractionInitial, reports);

  std::string instrumentXML;
  progInitial.report("Loading instrument");
  try {
    nxFile.openGroup("instrument", "NXinstrument");
    nxFile.openGroup("instrument_xml", "NXnote");
    nxFile.readData("data", instrumentXML);
    nxFile.closeGroup();
    nxFile.closeGroup();
  } catch (...) {
    g_log.warning()
        << "\nCould not find the instrument description in the Nexus file:"
        << filename << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  }

  try {
    std::string instrumentName = "McStas";
    Geometry::InstrumentDefinitionParser parser(filename, instrumentName,
                                                instrumentXML);
    std::string instrumentNameMangled = parser.getMangledName();

    // Check whether the instrument is already in the InstrumentDataService
    if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) {
      // If it does, just use the one from the one stored there
      instrument =
          InstrumentDataService::Instance().retrieve(instrumentNameMangled);
    } else {
      // Really create the instrument
      instrument = parser.parseXML(nullptr);
      // Add to data service for later retrieval
      InstrumentDataService::Instance().add(instrumentNameMangled, instrument);
    }
  } catch (Exception::InstrumentDefinitionError &e) {
    g_log.warning()
        << "When trying to read the instrument description in the Nexus file: "
        << filename << " the following error is reported: " << e.what()
        << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  } catch (...) {
    g_log.warning()
        << "Could not parse instrument description in the Nexus file: "
        << filename << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  }
  // Finished reading Instrument. Then open new data folder again
  nxFile.openGroup("data", "NXdetector");

  // create and prepare an event workspace ready to receive the mcstas events
  progInitial.report("Set up EventWorkspace");
  EventWorkspace_sptr eventWS(new EventWorkspace());
  // initialize, where create up front number of eventlists = number of
  // detectors
  eventWS->initialize(instrument->getNumberDetectors(), 1, 1);
  // Set the units
  eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
  eventWS->setYUnit("Counts");
  // set the instrument
  eventWS->setInstrument(instrument);
  // assign detector ID to eventlists

  std::vector<detid_t> detIDs = instrument->getDetectorIDs();

  for (size_t i = 0; i < instrument->getNumberDetectors(); i++) {
    eventWS->getSpectrum(i).addDetectorID(detIDs[i]);
    // spectrum number are treated as equal to detector IDs for McStas data
    eventWS->getSpectrum(i).setSpectrumNo(detIDs[i]);
  }
  // the one is here for the moment for backward compatibility
  eventWS->rebuildSpectraMapping(true);

  bool isAnyNeutrons = false;
  // to store shortest and longest recorded TOF
  double shortestTOF(0.0);
  double longestTOF(0.0);

  // create vector container all the event output workspaces needed
  const size_t numEventEntries = eventEntries.size();
  std::string nameOfGroupWS = getProperty("OutputWorkspace");
  const auto eventDataTotalName = "EventData_" + nameOfGroupWS;
  std::vector<std::pair<EventWorkspace_sptr, std::string>> allEventWS = {
      {eventWS, eventDataTotalName}};
  // if numEventEntries > 1 also create separate event workspaces
  const bool onlySummedEventWorkspace =
      getProperty("OutputOnlySummedEventWorkspace");
  if (!onlySummedEventWorkspace && numEventEntries > 1) {
    for (const auto &eventEntry : eventEntries) {
      const std::string &dataName = eventEntry.first;
      // create container to hold partial event data
      // plus the name users will see for it
      const auto ws_name = dataName + "_" + nameOfGroupWS;
      allEventWS.emplace_back(eventWS->clone(), ws_name);
    }
  }

  Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2);

  // Refer to entry in allEventWS. The first non-summed workspace index is 1
  auto eventWSIndex = 1u;
  // Loop over McStas event data components
  for (const auto &eventEntry : eventEntries) {
    const std::string &dataName = eventEntry.first;
    const std::string &dataType = eventEntry.second;

    // open second level entry
    nxFile.openGroup(dataName, dataType);
    std::vector<double> data;
    nxFile.openData("events");
    progEntries.report("read event data from nexus");

    // Need to take into account that the nexus readData method reads a
    // multi-column data entry
    // into a vector
    // The number of data column for each neutron is here hardcoded to (p, x,
    // y,  n, id, t)
    // Thus  we have
    // column  0 : p 	neutron wight
    // column  1 : x 	x coordinate
    // column  2 : y 	y coordinate
    // column  3 : n 	accumulated number of neutrons
    // column  4 : id 	pixel id
    // column  5 : t 	time

    // get info about event data
    ::NeXus::Info id_info = nxFile.getInfo();
    if (id_info.dims.size() != 2) {
      g_log.error() << "Event data in McStas nexus file not loaded. Expected "
                       "event data block to be two dimensional\n";
      return scatteringWSNames;
      ;
    }
    int64_t nNeutrons = id_info.dims[0];
    int64_t numberOfDataColumn = id_info.dims[1];
    if (nNeutrons && numberOfDataColumn != 6) {
      g_log.error() << "Event data in McStas nexus file expecting 6 columns\n";
      return scatteringWSNames;
      ;
    }
    if (!isAnyNeutrons && nNeutrons > 0)
      isAnyNeutrons = true;

    std::vector<int64_t> start(2);
    std::vector<int64_t> step(2);

    // read the event data in blocks. 1 million event is 1000000*6*8 doubles
    // about 50Mb
    int64_t nNeutronsInBlock = 1000000;
    int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock;
    int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock;
    // sum over number of blocks + 1 to cover the remainder
    for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) {
      if (iBlock == nOfFullBlocks) {
        // read remaining neutrons
        start[0] = nOfFullBlocks * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nRemainingNeutrons;
        step[1] = numberOfDataColumn;
      } else {
        // read neutrons in a full block
        start[0] = iBlock * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nNeutronsInBlock;
        step[1] = numberOfDataColumn;
      }
      const int64_t nNeutronsForthisBlock =
          step[0]; // number of neutrons read for this block
      data.resize(nNeutronsForthisBlock * numberOfDataColumn);

      // Check that the type is what it is supposed to be
      if (id_info.type == ::NeXus::FLOAT64) {
        nxFile.getSlab(&data[0], start, step);
      } else {
        g_log.warning()
            << "Entry event field is not FLOAT64! It will be skipped.\n";
        continue;
      }

      // populate workspace with McStas events
      const detid2index_map detIDtoWSindex_map =
          allEventWS[0].first->getDetectorIDToWorkspaceIndexMap(true);

      progEntries.report("read event data into workspace");
      for (int64_t in = 0; in < nNeutronsForthisBlock; in++) {
        const int detectorID =
            static_cast<int>(data[4 + numberOfDataColumn * in]);
        const double detector_time = data[5 + numberOfDataColumn * in] *
                                     1.0e6; // convert to microseconds
        if (in == 0 && iBlock == 0) {
          shortestTOF = detector_time;
          longestTOF = detector_time;
        } else {
          if (detector_time < shortestTOF)
            shortestTOF = detector_time;
          if (detector_time > longestTOF)
            longestTOF = detector_time;
        }

        const size_t workspaceIndex =
            detIDtoWSindex_map.find(detectorID)->second;

        int64_t pulse_time = 0;
        auto weightedEvent = WeightedEvent();
        if (errorBarsSetTo1) {
          weightedEvent = WeightedEvent(detector_time, pulse_time,
                                        data[numberOfDataColumn * in], 1.0);
        } else {
          weightedEvent = WeightedEvent(
              detector_time, pulse_time, data[numberOfDataColumn * in],
              data[numberOfDataColumn * in] * data[numberOfDataColumn * in]);
        }
        allEventWS[0].first->getSpectrum(workspaceIndex) += weightedEvent;
        if (!onlySummedEventWorkspace && numEventEntries > 1) {
          allEventWS[eventWSIndex].first->getSpectrum(workspaceIndex) +=
              weightedEvent;
        }
      }
      eventWSIndex++;
    } // end reading over number of blocks of an event dataset

    nxFile.closeData();
    nxFile.closeGroup();

  } // end reading over number of event datasets

  // Create a default TOF-vector for histogramming, for now just 2 bins
  // 2 bins is the standard. However for McStas simulation data it may make
  // sense to
  // increase this number for better initial visual effect

  auto axis = HistogramData::BinEdges{shortestTOF - 1, longestTOF + 1};

  // ensure that specified name is given to workspace (eventWS) when added to
  // outputGroup
  for (auto eventWS : allEventWS) {
    const auto ws = eventWS.first;
    ws->setAllX(axis);
    AnalysisDataService::Instance().addOrReplace(eventWS.second, ws);
    scatteringWSNames.emplace_back(eventWS.second);
  }
  return scatteringWSNames;
}
Exemple #11
0
/**
 * Load an SE log entry
 * @param file :: A reference to the NeXus file handle opened at the parent
 * group
 * @param entry_name :: The name of the log entry
 * @param workspace :: A pointer to the workspace to store the logs
 */
void LoadNexusLogs::loadSELog(
    ::NeXus::File &file, const std::string &entry_name,
    boost::shared_ptr<API::MatrixWorkspace> workspace) const {
  // Open the entry
  file.openGroup(entry_name, "IXseblock");
  std::string propName = entry_name;
  if (workspace->run().hasProperty(propName)) {
    propName = "selog_" + propName;
  }
  // There are two possible entries:
  //   value_log - A time series entry. This can contain a corrupt value entry
  //   so if it does use the value one
  //   value - A single value float entry
  Kernel::Property *logValue(nullptr);
  std::map<std::string, std::string> entries = file.getEntries();
  if (entries.find("value_log") != entries.end()) {
    try {
      try {
        file.openGroup("value_log", "NXlog");
      } catch (::NeXus::Exception &) {
        file.closeGroup();
        throw;
      }
      logValue = createTimeSeries(file, propName);
      file.closeGroup();
    } catch (::NeXus::Exception &e) {
      g_log.warning() << "IXseblock entry '" << entry_name
                      << "' gave an error when loading "
                      << "a time series:'" << e.what() << "'. Skipping entry\n";
      file.closeGroup(); // value_log
      file.closeGroup(); // entry_name
      return;
    }
  } else if (entries.find("value") != entries.end()) {
    try {
      // This may have a larger dimension than 1 bit it has no time field so
      // take the first entry
      file.openData("value");
      ::NeXus::Info info = file.getInfo();
      if (info.type == ::NeXus::FLOAT32) {
        boost::scoped_array<float> value(new float[info.dims[0]]);
        file.getData(value.get());
        file.closeData();
        logValue = new Kernel::PropertyWithValue<double>(
            propName, static_cast<double>(value[0]), true);
      } else {
        file.closeGroup();
        return;
      }
    } catch (::NeXus::Exception &e) {
      g_log.warning() << "IXseblock entry " << entry_name
                      << " gave an error when loading "
                      << "a single value:'" << e.what() << "'.\n";
      file.closeData();
      file.closeGroup();
      return;
    }
  } else {
    g_log.warning() << "IXseblock entry " << entry_name
                    << " cannot be read, skipping entry.\n";
    file.closeGroup();
    return;
  }
  workspace->mutableRun().addProperty(logValue);
  file.closeGroup();
}
Exemple #12
0
/**
 * Return the confidence with with this algorithm can load the file
 * @param eventEntries map of the file entries that have events
 * @param outputGroup pointer to the workspace group
 * @param nxFile Reads data from inside first first top entry
 */
void LoadMcStas::readEventData(
    const std::map<std::string, std::string> &eventEntries,
    WorkspaceGroup_sptr &outputGroup, ::NeXus::File &nxFile) {
  std::string filename = getPropertyValue("Filename");
  auto entries = nxFile.getEntries();

  // will assume that each top level entry contain one mcstas
  // generated IDF and any event data entries within this top level
  // entry are data collected for that instrument
  // This code for loading the instrument is for now adjusted code from
  // ExperimentalInfo.

  // Close data folder and go back to top level. Then read and close the
  // Instrument folder.
  nxFile.closeGroup();

  Geometry::Instrument_sptr instrument;

  // Initialize progress reporting
  int reports = 2;
  const double progressFractionInitial = 0.1;
  Progress progInitial(this, 0.0, progressFractionInitial, reports);

  try {
    nxFile.openGroup("instrument", "NXinstrument");
    std::string instrumentXML;
    nxFile.openGroup("instrument_xml", "NXnote");
    nxFile.readData("data", instrumentXML);
    nxFile.closeGroup();
    nxFile.closeGroup();

    progInitial.report("Loading instrument");

    Geometry::InstrumentDefinitionParser parser;
    std::string instrumentName = "McStas";
    parser.initialize(filename, instrumentName, instrumentXML);
    std::string instrumentNameMangled = parser.getMangledName();

    // Check whether the instrument is already in the InstrumentDataService
    if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) {
      // If it does, just use the one from the one stored there
      instrument =
          InstrumentDataService::Instance().retrieve(instrumentNameMangled);
    } else {
      // Really create the instrument
      instrument = parser.parseXML(NULL);
      // Add to data service for later retrieval
      InstrumentDataService::Instance().add(instrumentNameMangled, instrument);
    }
  } catch (...) {
    // Loader should not stop if there is no IDF.xml
    g_log.warning()
        << "\nCould not find the instrument description in the Nexus file:"
        << filename << " Ignore evntdata from data file" << std::endl;
    return;
  }
  // Finished reading Instrument. Then open new data folder again
  nxFile.openGroup("data", "NXdetector");

  // create and prepare an event workspace ready to receive the mcstas events
  progInitial.report("Set up EventWorkspace");
  EventWorkspace_sptr eventWS(new EventWorkspace());
  // initialize, where create up front number of eventlists = number of
  // detectors
  eventWS->initialize(instrument->getNumberDetectors(), 1, 1);
  // Set the units
  eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
  eventWS->setYUnit("Counts");
  // set the instrument
  eventWS->setInstrument(instrument);
  // assign detector ID to eventlists

  std::vector<detid_t> detIDs = instrument->getDetectorIDs();

  for (size_t i = 0; i < instrument->getNumberDetectors(); i++) {
    eventWS->getEventList(i).addDetectorID(detIDs[i]);
    // spectrum number are treated as equal to detector IDs for McStas data
    eventWS->getEventList(i).setSpectrumNo(detIDs[i]);
  }
  // the one is here for the moment for backward compatibility
  eventWS->rebuildSpectraMapping(true);

  bool isAnyNeutrons = false;
  // to store shortest and longest recorded TOF
  double shortestTOF(0.0);
  double longestTOF(0.0);

  const size_t numEventEntries = eventEntries.size();
  Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2);
  for (auto eit = eventEntries.begin(); eit != eventEntries.end(); ++eit) {
    std::string dataName = eit->first;
    std::string dataType = eit->second;

    // open second level entry
    nxFile.openGroup(dataName, dataType);
    std::vector<double> data;
    nxFile.openData("events");
    progEntries.report("read event data from nexus");

    // Need to take into account that the nexus readData method reads a
    // multi-column data entry
    // into a vector
    // The number of data column for each neutron is here hardcoded to (p, x,
    // y,  n, id, t)
    // Thus  we have
    // column  0 : p 	neutron wight
    // column  1 : x 	x coordinate
    // column  2 : y 	y coordinate
    // column  3 : n 	accumulated number of neutrons
    // column  4 : id 	pixel id
    // column  5 : t 	time

    // get info about event data
    ::NeXus::Info id_info = nxFile.getInfo();
    if (id_info.dims.size() != 2) {
      g_log.error() << "Event data in McStas nexus file not loaded. Expected "
                       "event data block to be two dimensional" << std::endl;
      return;
    }
    int64_t nNeutrons = id_info.dims[0];
    int64_t numberOfDataColumn = id_info.dims[1];
    if (nNeutrons && numberOfDataColumn != 6) {
      g_log.error() << "Event data in McStas nexus file expecting 6 columns"
                    << std::endl;
      return;
    }
    if (isAnyNeutrons == false && nNeutrons > 0)
      isAnyNeutrons = true;

    std::vector<int64_t> start(2);
    std::vector<int64_t> step(2);

    // read the event data in blocks. 1 million event is 1000000*6*8 doubles
    // about 50Mb
    int64_t nNeutronsInBlock = 1000000;
    int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock;
    int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock;
    // sum over number of blocks + 1 to cover the remainder
    for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) {
      if (iBlock == nOfFullBlocks) {
        // read remaining neutrons
        start[0] = nOfFullBlocks * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nRemainingNeutrons;
        step[1] = numberOfDataColumn;
      } else {
        // read neutrons in a full block
        start[0] = iBlock * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nNeutronsInBlock;
        step[1] = numberOfDataColumn;
      }
      const int64_t nNeutronsForthisBlock =
          step[0]; // number of neutrons read for this block
      data.resize(nNeutronsForthisBlock * numberOfDataColumn);

      // Check that the type is what it is supposed to be
      if (id_info.type == ::NeXus::FLOAT64) {
        nxFile.getSlab(&data[0], start, step);
      } else {
        g_log.warning()
            << "Entry event field is not FLOAT64! It will be skipped.\n";
        continue;
      }

      // populate workspace with McStas events
      const detid2index_map detIDtoWSindex_map =
          eventWS->getDetectorIDToWorkspaceIndexMap(true);

      progEntries.report("read event data into workspace");
      for (int64_t in = 0; in < nNeutronsForthisBlock; in++) {
        const int detectorID =
            static_cast<int>(data[4 + numberOfDataColumn * in]);
        const double detector_time = data[5 + numberOfDataColumn * in] *
                                     1.0e6; // convert to microseconds
        if (in == 0 && iBlock == 0) {
          shortestTOF = detector_time;
          longestTOF = detector_time;
        } else {
          if (detector_time < shortestTOF)
            shortestTOF = detector_time;
          if (detector_time > longestTOF)
            longestTOF = detector_time;
        }

        const size_t workspaceIndex =
            detIDtoWSindex_map.find(detectorID)->second;

        int64_t pulse_time = 0;
        // eventWS->getEventList(workspaceIndex) +=
        // TofEvent(detector_time,pulse_time);
        // eventWS->getEventList(workspaceIndex) += TofEvent(detector_time);
        eventWS->getEventList(workspaceIndex) += WeightedEvent(
            detector_time, pulse_time, data[numberOfDataColumn * in], 1.0);
      }
    } // end reading over number of blocks of an event dataset

    // nxFile.getData(data);
    nxFile.closeData();
    nxFile.closeGroup();

  } // end reading over number of event datasets

  // Create a default TOF-vector for histogramming, for now just 2 bins
  // 2 bins is the standard. However for McStas simulation data it may make
  // sense to
  // increase this number for better initial visual effect
  Kernel::cow_ptr<MantidVec> axis;
  MantidVec &xRef = axis.access();
  xRef.resize(2, 0.0);
  // if ( nNeutrons > 0)
  if (isAnyNeutrons) {
    xRef[0] = shortestTOF - 1; // Just to make sure the bins hold it all
    xRef[1] = longestTOF + 1;
  }
  // Set the binning axis
  eventWS->setAllX(axis);

  // ensure that specified name is given to workspace (eventWS) when added to
  // outputGroup
  std::string nameOfGroupWS = getProperty("OutputWorkspace");
  std::string nameUserSee = std::string("EventData_") + nameOfGroupWS;
  std::string extraProperty =
      "Outputworkspace_dummy_" +
      boost::lexical_cast<std::string>(m_countNumWorkspaceAdded);
  declareProperty(new WorkspaceProperty<Workspace>(extraProperty, nameUserSee,
                                                   Direction::Output));
  setProperty(extraProperty, boost::static_pointer_cast<Workspace>(eventWS));
  m_countNumWorkspaceAdded++; // need to increment to ensure extraProperty are
                              // unique

  outputGroup->addWorkspace(eventWS);
}