/** Open and load the times-of-flight data
*/
void LoadBankFromDiskTask::loadTof(::NeXus::File &file) {
  // Allocate the array
  auto temp = new float[m_loadSize[0]];
  delete[] m_event_time_of_flight;
  m_event_time_of_flight = temp;

  // Get the list of event_time_of_flight's
  if (!m_oldNexusFileNames)
    file.openData("event_time_offset");
  else
    file.openData("event_time_of_flight");

  // Check that the required space is there in the file.
  ::NeXus::Info tof_info = file.getInfo();
  int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]);
  if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name << "'s event_time_offset field is too small "
                                     "to load the desired data.\n";
    m_loadError = true;
  }

  // Check that the type is what it is supposed to be
  if (tof_info.type == ::NeXus::FLOAT32)
    file.getSlab(m_event_time_of_flight, m_loadStart, m_loadSize);
  else {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_time_offset field is not FLOAT32! It will be skipped.\n";
    m_loadError = true;
  }

  if (!m_loadError) {
    std::string units;
    file.getAttr("units", units);
    if (units != "microsecond") {
      m_loader.alg->getLogger().warning()
          << "Entry " << entry_name << "'s event_time_offset field's units are "
                                       "not microsecond. It will be skipped.\n";
      m_loadError = true;
    }
    file.closeData();
  } // no error
}
/** Load weight of weigthed events if they exist
 * @param file An NeXus::File object opened at the correct group
 * @returns A new array containing the weights or a nullptr if the weights
 * are not present
 */
std::unique_ptr<float[]>
LoadBankFromDiskTask::loadEventWeights(::NeXus::File &file) {
  try {
    // First, get info about the event_weight field in this bank
    file.openData("event_weight");
  } catch (::NeXus::Exception &) {
    // Field not found error is most likely.
    m_have_weight = false;
    return std::unique_ptr<float[]>();
  }
  // OK, we've got them
  m_have_weight = true;

  // Allocate the array
  auto event_weight = Mantid::Kernel::make_unique<float[]>(m_loadSize[0]);

  ::NeXus::Info weight_info = file.getInfo();
  int64_t weight_dim0 = recalculateDataSize(weight_info.dims[0]);
  if (weight_dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_weight field is too small to load the desired data.\n";
    m_loadError = true;
  }

  // Check that the type is what it is supposed to be
  if (weight_info.type == ::NeXus::FLOAT32)
    file.getSlab(event_weight.get(), m_loadStart, m_loadSize);
  else {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name
        << "'s event_weight field is not FLOAT32! It will be skipped.\n";
    m_loadError = true;
  }

  if (!m_loadError) {
    file.closeData();
  }
  return event_weight;
}
/** Load the event_id field, which has been opened
 * @param file An NeXus::File object opened at the correct group
 * @returns A new array containing the event Ids for this bank
 */
std::unique_ptr<uint32_t[]>
LoadBankFromDiskTask::loadEventId(::NeXus::File &file) {
  // This is the data size
  ::NeXus::Info id_info = file.getInfo();
  int64_t dim0 = recalculateDataSize(id_info.dims[0]);

  // Now we allocate the required arrays
  auto event_id = Mantid::Kernel::make_unique<uint32_t[]>(m_loadSize[0]);

  // Check that the required space is there in the file.
  if (dim0 < m_loadSize[0] + m_loadStart[0]) {
    m_loader.alg->getLogger().warning()
        << "Entry " << entry_name << "'s event_id field is too small (" << dim0
        << ") to load the desired data size (" << m_loadSize[0] + m_loadStart[0]
        << ").\n";
    m_loadError = true;
  }

  if (m_loader.alg->getCancel())
    m_loadError = true; // To allow cancelling the algorithm

  if (!m_loadError) {
    // Must be uint32
    if (id_info.type == ::NeXus::UINT32)
      file.getSlab(event_id.get(), m_loadStart, m_loadSize);
    else {
      m_loader.alg->getLogger().warning()
          << "Entry " << entry_name
          << "'s event_id field is not UINT32! It will be skipped.\n";
      m_loadError = true;
    }
    file.closeData();

    // determine the range of pixel ids
    for (int64_t i = 0; i < m_loadSize[0]; ++i) {
      const auto id = event_id[i];
      if (id < m_min_id)
        m_min_id = id;
      if (id > m_max_id)
        m_max_id = id;
    }

    if (m_min_id > static_cast<uint32_t>(m_loader.eventid_max)) {
      // All the detector IDs in the bank are higher than the highest 'known'
      // (from the IDF)
      // ID. Setting this will abort the loading of the bank.
      m_loadError = true;
    }
    // fixup the minimum pixel id in the case that it's lower than the lowest
    // 'known' id. We test this by checking that when we add the offset we
    // would not get a negative index into the vector. Note that m_min_id is
    // a uint so we have to be cautious about adding it to an int which may be
    // negative.
    if (static_cast<int32_t>(m_min_id) + m_loader.pixelID_to_wi_offset < 0) {
      m_min_id = static_cast<uint32_t>(abs(m_loader.pixelID_to_wi_offset));
    }
    // fixup the maximum pixel id in the case that it's higher than the
    // highest 'known' id
    if (m_max_id > static_cast<uint32_t>(m_loader.eventid_max))
      m_max_id = static_cast<uint32_t>(m_loader.eventid_max);
  }
  return event_id;
}
Exemple #4
0
/**
 * Read Event Data
 * @param eventEntries map of the file entries that have events
 * @param nxFile Reads data from inside first top entry
 * @return Names of workspaces to include in the output group
 */
std::vector<std::string> LoadMcStas::readEventData(
    const std::map<std::string, std::string> &eventEntries,
    ::NeXus::File &nxFile) {

  // vector to store output workspaces
  std::vector<std::string> scatteringWSNames;

  std::string filename = getPropertyValue("Filename");
  auto entries = nxFile.getEntries();
  const bool errorBarsSetTo1 = getProperty("ErrorBarsSetTo1");

  // will assume that each top level entry contain one mcstas
  // generated IDF and any event data entries within this top level
  // entry are data collected for that instrument
  // This code for loading the instrument is for now adjusted code from
  // ExperimentalInfo.

  // Close data folder and go back to top level. Then read and close the
  // Instrument folder.
  nxFile.closeGroup();

  Geometry::Instrument_sptr instrument;

  // Initialize progress reporting
  int reports = 2;
  const double progressFractionInitial = 0.1;
  Progress progInitial(this, 0.0, progressFractionInitial, reports);

  std::string instrumentXML;
  progInitial.report("Loading instrument");
  try {
    nxFile.openGroup("instrument", "NXinstrument");
    nxFile.openGroup("instrument_xml", "NXnote");
    nxFile.readData("data", instrumentXML);
    nxFile.closeGroup();
    nxFile.closeGroup();
  } catch (...) {
    g_log.warning()
        << "\nCould not find the instrument description in the Nexus file:"
        << filename << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  }

  try {
    std::string instrumentName = "McStas";
    Geometry::InstrumentDefinitionParser parser(filename, instrumentName,
                                                instrumentXML);
    std::string instrumentNameMangled = parser.getMangledName();

    // Check whether the instrument is already in the InstrumentDataService
    if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) {
      // If it does, just use the one from the one stored there
      instrument =
          InstrumentDataService::Instance().retrieve(instrumentNameMangled);
    } else {
      // Really create the instrument
      instrument = parser.parseXML(nullptr);
      // Add to data service for later retrieval
      InstrumentDataService::Instance().add(instrumentNameMangled, instrument);
    }
  } catch (Exception::InstrumentDefinitionError &e) {
    g_log.warning()
        << "When trying to read the instrument description in the Nexus file: "
        << filename << " the following error is reported: " << e.what()
        << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  } catch (...) {
    g_log.warning()
        << "Could not parse instrument description in the Nexus file: "
        << filename << " Ignore eventdata from the Nexus file\n";
    return scatteringWSNames;
    ;
  }
  // Finished reading Instrument. Then open new data folder again
  nxFile.openGroup("data", "NXdetector");

  // create and prepare an event workspace ready to receive the mcstas events
  progInitial.report("Set up EventWorkspace");
  EventWorkspace_sptr eventWS(new EventWorkspace());
  // initialize, where create up front number of eventlists = number of
  // detectors
  eventWS->initialize(instrument->getNumberDetectors(), 1, 1);
  // Set the units
  eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
  eventWS->setYUnit("Counts");
  // set the instrument
  eventWS->setInstrument(instrument);
  // assign detector ID to eventlists

  std::vector<detid_t> detIDs = instrument->getDetectorIDs();

  for (size_t i = 0; i < instrument->getNumberDetectors(); i++) {
    eventWS->getSpectrum(i).addDetectorID(detIDs[i]);
    // spectrum number are treated as equal to detector IDs for McStas data
    eventWS->getSpectrum(i).setSpectrumNo(detIDs[i]);
  }
  // the one is here for the moment for backward compatibility
  eventWS->rebuildSpectraMapping(true);

  bool isAnyNeutrons = false;
  // to store shortest and longest recorded TOF
  double shortestTOF(0.0);
  double longestTOF(0.0);

  // create vector container all the event output workspaces needed
  const size_t numEventEntries = eventEntries.size();
  std::string nameOfGroupWS = getProperty("OutputWorkspace");
  const auto eventDataTotalName = "EventData_" + nameOfGroupWS;
  std::vector<std::pair<EventWorkspace_sptr, std::string>> allEventWS = {
      {eventWS, eventDataTotalName}};
  // if numEventEntries > 1 also create separate event workspaces
  const bool onlySummedEventWorkspace =
      getProperty("OutputOnlySummedEventWorkspace");
  if (!onlySummedEventWorkspace && numEventEntries > 1) {
    for (const auto &eventEntry : eventEntries) {
      const std::string &dataName = eventEntry.first;
      // create container to hold partial event data
      // plus the name users will see for it
      const auto ws_name = dataName + "_" + nameOfGroupWS;
      allEventWS.emplace_back(eventWS->clone(), ws_name);
    }
  }

  Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2);

  // Refer to entry in allEventWS. The first non-summed workspace index is 1
  auto eventWSIndex = 1u;
  // Loop over McStas event data components
  for (const auto &eventEntry : eventEntries) {
    const std::string &dataName = eventEntry.first;
    const std::string &dataType = eventEntry.second;

    // open second level entry
    nxFile.openGroup(dataName, dataType);
    std::vector<double> data;
    nxFile.openData("events");
    progEntries.report("read event data from nexus");

    // Need to take into account that the nexus readData method reads a
    // multi-column data entry
    // into a vector
    // The number of data column for each neutron is here hardcoded to (p, x,
    // y,  n, id, t)
    // Thus  we have
    // column  0 : p 	neutron wight
    // column  1 : x 	x coordinate
    // column  2 : y 	y coordinate
    // column  3 : n 	accumulated number of neutrons
    // column  4 : id 	pixel id
    // column  5 : t 	time

    // get info about event data
    ::NeXus::Info id_info = nxFile.getInfo();
    if (id_info.dims.size() != 2) {
      g_log.error() << "Event data in McStas nexus file not loaded. Expected "
                       "event data block to be two dimensional\n";
      return scatteringWSNames;
      ;
    }
    int64_t nNeutrons = id_info.dims[0];
    int64_t numberOfDataColumn = id_info.dims[1];
    if (nNeutrons && numberOfDataColumn != 6) {
      g_log.error() << "Event data in McStas nexus file expecting 6 columns\n";
      return scatteringWSNames;
      ;
    }
    if (!isAnyNeutrons && nNeutrons > 0)
      isAnyNeutrons = true;

    std::vector<int64_t> start(2);
    std::vector<int64_t> step(2);

    // read the event data in blocks. 1 million event is 1000000*6*8 doubles
    // about 50Mb
    int64_t nNeutronsInBlock = 1000000;
    int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock;
    int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock;
    // sum over number of blocks + 1 to cover the remainder
    for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) {
      if (iBlock == nOfFullBlocks) {
        // read remaining neutrons
        start[0] = nOfFullBlocks * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nRemainingNeutrons;
        step[1] = numberOfDataColumn;
      } else {
        // read neutrons in a full block
        start[0] = iBlock * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nNeutronsInBlock;
        step[1] = numberOfDataColumn;
      }
      const int64_t nNeutronsForthisBlock =
          step[0]; // number of neutrons read for this block
      data.resize(nNeutronsForthisBlock * numberOfDataColumn);

      // Check that the type is what it is supposed to be
      if (id_info.type == ::NeXus::FLOAT64) {
        nxFile.getSlab(&data[0], start, step);
      } else {
        g_log.warning()
            << "Entry event field is not FLOAT64! It will be skipped.\n";
        continue;
      }

      // populate workspace with McStas events
      const detid2index_map detIDtoWSindex_map =
          allEventWS[0].first->getDetectorIDToWorkspaceIndexMap(true);

      progEntries.report("read event data into workspace");
      for (int64_t in = 0; in < nNeutronsForthisBlock; in++) {
        const int detectorID =
            static_cast<int>(data[4 + numberOfDataColumn * in]);
        const double detector_time = data[5 + numberOfDataColumn * in] *
                                     1.0e6; // convert to microseconds
        if (in == 0 && iBlock == 0) {
          shortestTOF = detector_time;
          longestTOF = detector_time;
        } else {
          if (detector_time < shortestTOF)
            shortestTOF = detector_time;
          if (detector_time > longestTOF)
            longestTOF = detector_time;
        }

        const size_t workspaceIndex =
            detIDtoWSindex_map.find(detectorID)->second;

        int64_t pulse_time = 0;
        auto weightedEvent = WeightedEvent();
        if (errorBarsSetTo1) {
          weightedEvent = WeightedEvent(detector_time, pulse_time,
                                        data[numberOfDataColumn * in], 1.0);
        } else {
          weightedEvent = WeightedEvent(
              detector_time, pulse_time, data[numberOfDataColumn * in],
              data[numberOfDataColumn * in] * data[numberOfDataColumn * in]);
        }
        allEventWS[0].first->getSpectrum(workspaceIndex) += weightedEvent;
        if (!onlySummedEventWorkspace && numEventEntries > 1) {
          allEventWS[eventWSIndex].first->getSpectrum(workspaceIndex) +=
              weightedEvent;
        }
      }
      eventWSIndex++;
    } // end reading over number of blocks of an event dataset

    nxFile.closeData();
    nxFile.closeGroup();

  } // end reading over number of event datasets

  // Create a default TOF-vector for histogramming, for now just 2 bins
  // 2 bins is the standard. However for McStas simulation data it may make
  // sense to
  // increase this number for better initial visual effect

  auto axis = HistogramData::BinEdges{shortestTOF - 1, longestTOF + 1};

  // ensure that specified name is given to workspace (eventWS) when added to
  // outputGroup
  for (auto eventWS : allEventWS) {
    const auto ws = eventWS.first;
    ws->setAllX(axis);
    AnalysisDataService::Instance().addOrReplace(eventWS.second, ws);
    scatteringWSNames.emplace_back(eventWS.second);
  }
  return scatteringWSNames;
}
Exemple #5
0
/**
 * Return the confidence with with this algorithm can load the file
 * @param eventEntries map of the file entries that have events
 * @param outputGroup pointer to the workspace group
 * @param nxFile Reads data from inside first first top entry
 */
void LoadMcStas::readEventData(
    const std::map<std::string, std::string> &eventEntries,
    WorkspaceGroup_sptr &outputGroup, ::NeXus::File &nxFile) {
  std::string filename = getPropertyValue("Filename");
  auto entries = nxFile.getEntries();

  // will assume that each top level entry contain one mcstas
  // generated IDF and any event data entries within this top level
  // entry are data collected for that instrument
  // This code for loading the instrument is for now adjusted code from
  // ExperimentalInfo.

  // Close data folder and go back to top level. Then read and close the
  // Instrument folder.
  nxFile.closeGroup();

  Geometry::Instrument_sptr instrument;

  // Initialize progress reporting
  int reports = 2;
  const double progressFractionInitial = 0.1;
  Progress progInitial(this, 0.0, progressFractionInitial, reports);

  try {
    nxFile.openGroup("instrument", "NXinstrument");
    std::string instrumentXML;
    nxFile.openGroup("instrument_xml", "NXnote");
    nxFile.readData("data", instrumentXML);
    nxFile.closeGroup();
    nxFile.closeGroup();

    progInitial.report("Loading instrument");

    Geometry::InstrumentDefinitionParser parser;
    std::string instrumentName = "McStas";
    parser.initialize(filename, instrumentName, instrumentXML);
    std::string instrumentNameMangled = parser.getMangledName();

    // Check whether the instrument is already in the InstrumentDataService
    if (InstrumentDataService::Instance().doesExist(instrumentNameMangled)) {
      // If it does, just use the one from the one stored there
      instrument =
          InstrumentDataService::Instance().retrieve(instrumentNameMangled);
    } else {
      // Really create the instrument
      instrument = parser.parseXML(NULL);
      // Add to data service for later retrieval
      InstrumentDataService::Instance().add(instrumentNameMangled, instrument);
    }
  } catch (...) {
    // Loader should not stop if there is no IDF.xml
    g_log.warning()
        << "\nCould not find the instrument description in the Nexus file:"
        << filename << " Ignore evntdata from data file" << std::endl;
    return;
  }
  // Finished reading Instrument. Then open new data folder again
  nxFile.openGroup("data", "NXdetector");

  // create and prepare an event workspace ready to receive the mcstas events
  progInitial.report("Set up EventWorkspace");
  EventWorkspace_sptr eventWS(new EventWorkspace());
  // initialize, where create up front number of eventlists = number of
  // detectors
  eventWS->initialize(instrument->getNumberDetectors(), 1, 1);
  // Set the units
  eventWS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
  eventWS->setYUnit("Counts");
  // set the instrument
  eventWS->setInstrument(instrument);
  // assign detector ID to eventlists

  std::vector<detid_t> detIDs = instrument->getDetectorIDs();

  for (size_t i = 0; i < instrument->getNumberDetectors(); i++) {
    eventWS->getEventList(i).addDetectorID(detIDs[i]);
    // spectrum number are treated as equal to detector IDs for McStas data
    eventWS->getEventList(i).setSpectrumNo(detIDs[i]);
  }
  // the one is here for the moment for backward compatibility
  eventWS->rebuildSpectraMapping(true);

  bool isAnyNeutrons = false;
  // to store shortest and longest recorded TOF
  double shortestTOF(0.0);
  double longestTOF(0.0);

  const size_t numEventEntries = eventEntries.size();
  Progress progEntries(this, progressFractionInitial, 1.0, numEventEntries * 2);
  for (auto eit = eventEntries.begin(); eit != eventEntries.end(); ++eit) {
    std::string dataName = eit->first;
    std::string dataType = eit->second;

    // open second level entry
    nxFile.openGroup(dataName, dataType);
    std::vector<double> data;
    nxFile.openData("events");
    progEntries.report("read event data from nexus");

    // Need to take into account that the nexus readData method reads a
    // multi-column data entry
    // into a vector
    // The number of data column for each neutron is here hardcoded to (p, x,
    // y,  n, id, t)
    // Thus  we have
    // column  0 : p 	neutron wight
    // column  1 : x 	x coordinate
    // column  2 : y 	y coordinate
    // column  3 : n 	accumulated number of neutrons
    // column  4 : id 	pixel id
    // column  5 : t 	time

    // get info about event data
    ::NeXus::Info id_info = nxFile.getInfo();
    if (id_info.dims.size() != 2) {
      g_log.error() << "Event data in McStas nexus file not loaded. Expected "
                       "event data block to be two dimensional" << std::endl;
      return;
    }
    int64_t nNeutrons = id_info.dims[0];
    int64_t numberOfDataColumn = id_info.dims[1];
    if (nNeutrons && numberOfDataColumn != 6) {
      g_log.error() << "Event data in McStas nexus file expecting 6 columns"
                    << std::endl;
      return;
    }
    if (isAnyNeutrons == false && nNeutrons > 0)
      isAnyNeutrons = true;

    std::vector<int64_t> start(2);
    std::vector<int64_t> step(2);

    // read the event data in blocks. 1 million event is 1000000*6*8 doubles
    // about 50Mb
    int64_t nNeutronsInBlock = 1000000;
    int64_t nOfFullBlocks = nNeutrons / nNeutronsInBlock;
    int64_t nRemainingNeutrons = nNeutrons - nOfFullBlocks * nNeutronsInBlock;
    // sum over number of blocks + 1 to cover the remainder
    for (int64_t iBlock = 0; iBlock < nOfFullBlocks + 1; iBlock++) {
      if (iBlock == nOfFullBlocks) {
        // read remaining neutrons
        start[0] = nOfFullBlocks * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nRemainingNeutrons;
        step[1] = numberOfDataColumn;
      } else {
        // read neutrons in a full block
        start[0] = iBlock * nNeutronsInBlock;
        start[1] = 0;
        step[0] = nNeutronsInBlock;
        step[1] = numberOfDataColumn;
      }
      const int64_t nNeutronsForthisBlock =
          step[0]; // number of neutrons read for this block
      data.resize(nNeutronsForthisBlock * numberOfDataColumn);

      // Check that the type is what it is supposed to be
      if (id_info.type == ::NeXus::FLOAT64) {
        nxFile.getSlab(&data[0], start, step);
      } else {
        g_log.warning()
            << "Entry event field is not FLOAT64! It will be skipped.\n";
        continue;
      }

      // populate workspace with McStas events
      const detid2index_map detIDtoWSindex_map =
          eventWS->getDetectorIDToWorkspaceIndexMap(true);

      progEntries.report("read event data into workspace");
      for (int64_t in = 0; in < nNeutronsForthisBlock; in++) {
        const int detectorID =
            static_cast<int>(data[4 + numberOfDataColumn * in]);
        const double detector_time = data[5 + numberOfDataColumn * in] *
                                     1.0e6; // convert to microseconds
        if (in == 0 && iBlock == 0) {
          shortestTOF = detector_time;
          longestTOF = detector_time;
        } else {
          if (detector_time < shortestTOF)
            shortestTOF = detector_time;
          if (detector_time > longestTOF)
            longestTOF = detector_time;
        }

        const size_t workspaceIndex =
            detIDtoWSindex_map.find(detectorID)->second;

        int64_t pulse_time = 0;
        // eventWS->getEventList(workspaceIndex) +=
        // TofEvent(detector_time,pulse_time);
        // eventWS->getEventList(workspaceIndex) += TofEvent(detector_time);
        eventWS->getEventList(workspaceIndex) += WeightedEvent(
            detector_time, pulse_time, data[numberOfDataColumn * in], 1.0);
      }
    } // end reading over number of blocks of an event dataset

    // nxFile.getData(data);
    nxFile.closeData();
    nxFile.closeGroup();

  } // end reading over number of event datasets

  // Create a default TOF-vector for histogramming, for now just 2 bins
  // 2 bins is the standard. However for McStas simulation data it may make
  // sense to
  // increase this number for better initial visual effect
  Kernel::cow_ptr<MantidVec> axis;
  MantidVec &xRef = axis.access();
  xRef.resize(2, 0.0);
  // if ( nNeutrons > 0)
  if (isAnyNeutrons) {
    xRef[0] = shortestTOF - 1; // Just to make sure the bins hold it all
    xRef[1] = longestTOF + 1;
  }
  // Set the binning axis
  eventWS->setAllX(axis);

  // ensure that specified name is given to workspace (eventWS) when added to
  // outputGroup
  std::string nameOfGroupWS = getProperty("OutputWorkspace");
  std::string nameUserSee = std::string("EventData_") + nameOfGroupWS;
  std::string extraProperty =
      "Outputworkspace_dummy_" +
      boost::lexical_cast<std::string>(m_countNumWorkspaceAdded);
  declareProperty(new WorkspaceProperty<Workspace>(extraProperty, nameUserSee,
                                                   Direction::Output));
  setProperty(extraProperty, boost::static_pointer_cast<Workspace>(eventWS));
  m_countNumWorkspaceAdded++; // need to increment to ensure extraProperty are
                              // unique

  outputGroup->addWorkspace(eventWS);
}