/**
 * Convert to the output dimensions
 * @param inputWs : Input Matrix workspace
 * @return workspace group containing output matrix workspaces of ki and kf
 */
Mantid::API::MatrixWorkspace_sptr ReflectometryTransform::execute(
    Mantid::API::MatrixWorkspace_const_sptr inputWs) const {
  auto ws = boost::make_shared<Mantid::DataObjects::Workspace2D>();

  ws->initialize(m_d1NumBins, m_d0NumBins,
                 m_d0NumBins); // Create the output workspace as a distribution

  // Mapping so that d0 and d1 values calculated can be added to the matrix
  // workspace at the correct index.
  const double gradD0 =
      double(m_d0NumBins) / (m_d0Max - m_d0Min); // The x - axis
  const double gradD1 =
      double(m_d1NumBins) / (m_d1Max - m_d1Min); // Actually the y-axis
  const double cxToIndex = -gradD0 * m_d0Min;
  const double cyToIndex = -gradD1 * m_d1Min;
  const double cxToD0 = m_d0Min - (1 / gradD0);
  const double cyToD1 = m_d1Min - (1 / gradD1);

  // Create an X - Axis.
  MantidVec xAxisVec = createXAxis(ws.get(), gradD0, cxToD0, m_d0NumBins,
                                   m_d0Label, "1/Angstroms");
  // Create a Y (vertical) Axis
  createVerticalAxis(ws.get(), xAxisVec, gradD1, cyToD1, m_d1NumBins, m_d1Label,
                     "1/Angstroms");

  // Loop over all entries in the input workspace and calculate d0 and d1
  // for each.
  auto spectraAxis = inputWs->getAxis(1);
  for (size_t index = 0; index < inputWs->getNumberHistograms(); ++index) {
    auto counts = inputWs->readY(index);
    auto wavelengths = inputWs->readX(index);
    auto errors = inputWs->readE(index);
    const size_t nInputBins = wavelengths.size() - 1;
    const double theta_final = spectraAxis->getValue(index);
    m_calculator->setThetaFinal(theta_final);
    // Loop over all bins in spectra
    for (size_t binIndex = 0; binIndex < nInputBins; ++binIndex) {
      const double wavelength =
          0.5 * (wavelengths[binIndex] + wavelengths[binIndex + 1]);
      double _d0 = m_calculator->calculateDim0(wavelength);
      double _d1 = m_calculator->calculateDim1(wavelength);

      if (_d0 >= m_d0Min && _d0 <= m_d0Max && _d1 >= m_d1Min &&
          _d1 <= m_d1Max) // Check that the calculated ki and kf are in range
      {
        const int outIndexX = static_cast<int>((gradD0 * _d0) + cxToIndex);
        const int outIndexZ = static_cast<int>((gradD1 * _d1) + cyToIndex);

        ws->dataY(outIndexZ)[outIndexX] += counts[binIndex];
        ws->dataE(outIndexZ)[outIndexX] += errors[binIndex];
      }
    }
  }
  return ws;
}
/** Apply dead time correction to spectra in inputWs and create temporary workspace with corrected spectra
* @param inputWs :: [input] input workspace containing spectra to correct
* @param deadTimeTable :: [input] table containing dead times
* @param tempWs :: [output] workspace containing corrected spectra
*/
void PhaseQuadMuon::deadTimeCorrection(API::MatrixWorkspace_sptr inputWs, API::ITableWorkspace_sptr deadTimeTable, API::MatrixWorkspace_sptr& tempWs)
{

  // Apply correction only from t = m_tPulseOver
  // To do so, we first apply corrections to the whole spectrum (ApplyDeadTimeCorr
  // does not allow to select a range in the spectrum)
  // Then we recover counts from 0 to m_tPulseOver

  auto alg = this->createChildAlgorithm("ApplyDeadTimeCorr",-1,-1);
  alg->initialize();
  alg->setProperty("DeadTimeTable", deadTimeTable);
  alg->setPropertyValue("InputWorkspace", inputWs->getName());
  alg->setPropertyValue("OutputWorkspace", inputWs->getName()+"_deadtime");
  bool sucDeadTime = alg->execute();
  if (!sucDeadTime)
  {
    g_log.error() << "PhaseQuad: Unable to apply dead time corrections" << std::endl;
    throw std::runtime_error("PhaseQuad: Unable to apply dead time corrections");
  }
  tempWs = alg->getProperty("OutputWorkspace");

  // Now recover counts from t=0 to m_tPulseOver
  // Errors are set to m_bigNumber
  for (int h=0; h<m_nHist; h++)
  {
    auto specOld = inputWs->getSpectrum(h);
    auto specNew = tempWs->getSpectrum(h);

    for (int t=0; t<m_tPulseOver; t++)
    {
      specNew->dataY()[t] = specOld->dataY()[t];
      specNew->dataE()[t] = m_bigNumber;
    }
  }

}
Exemple #3
0
/// Construct from ISpectrum.
Histogram1D::Histogram1D(const ISpectrum &other) : ISpectrum(other) {
    dataY() = other.readY();
    dataE() = other.readE();
}
Exemple #4
0
/// Assignment from ISpectrum.
Histogram1D &Histogram1D::operator=(const ISpectrum &rhs) {
    ISpectrum::operator=(rhs);
    dataY() = rhs.readY();
    dataE() = rhs.readE();
    return *this;
}
Exemple #5
0
/**
 * ガウス過程を使って、high-level indicatorから対応するPMパラメータを推定する。
 *
 * 1) 2000個のサンプルを生成して、high-level indicatorを計算する。
 * 2) ガウス過程を使って、high-level indictorから、PMパラメータを推定する。
 * 3) 推定値のエラーを計算する。
 */
void MainWindow::onInversePMByGaussianProcess() {
	const int N = 2000;

	if (!QDir("samples").exists()) QDir().mkdir("samples");

	cout << "Generating samples..." << endl;

	cv::Mat_<double> dataX(N, 14);
	cv::Mat_<double> dataY(N, 16);
	int seed_count = 0;
	for (int iter = 0; iter < N; ++iter) {
		cout << iter << endl;

		while (true) {
			glWidget->tree->randomInit(seed_count++);
			if (glWidget->tree->generate()) break;
		}

		vector<float> params = glWidget->tree->getParams();
		for (int col = 0; col < dataX.cols; ++col) {
			dataX(iter, col) = params[col];
		}

		vector<float> statistics = glWidget->tree->getStatistics3();
		for (int col = 0; col < dataY.cols - 1; ++col) {
			dataY(iter, col) = statistics[col];
		}
		dataY(iter, dataY.cols - 1) = 1; // 定数項
	}

	glWidget->update();
	controlWidget->update();

	// normalization
	cv::Mat_<double> muX, muY;
	cv::reduce(dataX, muX, 0, CV_REDUCE_AVG);
	cv::reduce(dataY, muY, 0, CV_REDUCE_AVG);
	cv::Mat_<double> dataX2 = dataX - cv::repeat(muX, N, 1);
	cv::Mat_<double> dataY2 = dataY - cv::repeat(muY, N, 1);

	// [-1, 1]にする
	cv::Mat_<double> maxX, maxY;
	cv::reduce(cv::abs(dataX2), maxX, 0, CV_REDUCE_MAX);
	cv::reduce(cv::abs(dataY2), maxY, 0, CV_REDUCE_MAX);
	dataX2 /= cv::repeat(maxX, N, 1);
	dataY2 /= cv::repeat(maxY, N, 1);

	// 定数項
	for (int r = 0; r < N; ++r) {
		dataY2(r, dataY2.cols - 1) = 1;
	}

	cv::Mat_<double> error = cv::Mat_<double>::zeros(1, dataX2.cols);
	cv::Mat_<double> error2 = cv::Mat_<double>::zeros(1, dataX2.cols);

	GaussianProcess gp(dataY2);
	for (int iter = 0; iter < dataY2.rows; ++iter) {
		cout << iter << endl;

		cv::Mat normalized_x_hat = gp.predict(dataY2.row(iter), dataY2, dataX2);
		cv::Mat x_hat = normalized_x_hat.mul(maxX) + muX;

		error += (dataX2.row(iter) - normalized_x_hat).mul(dataX2.row(iter) - normalized_x_hat);
		error2 += (dataX.row(iter) - x_hat).mul(dataX.row(iter) - x_hat);

		if (iter % 100 == 0) {
			glWidget->tree->setParams(dataX.row(iter));
			glWidget->updateGL();
			QString fileName = "samples/" + QString::number(iter / 100) + ".png";
			glWidget->grabFrameBuffer().save(fileName);


			glWidget->tree->setParams(x_hat);
			glWidget->updateGL();
			fileName = "samples/reversed_" + QString::number(iter / 100) + ".png";
			glWidget->grabFrameBuffer().save(fileName);
		}
	}

	error /= N;
	error2 /= N;
	cv::sqrt(error, error);
	cv::sqrt(error2, error2);

	cout << "Prediction error (normalized):" << endl;
	cout << error << endl;
	cout << "Prediction error:" << endl;
	cout << error2 << endl;
}
Exemple #6
0
/**
 * データを階層的にクラスタリングし、各クラスタについてLinear regression
 * を使って、high-level indicatorから対応するPMパラメータを計算する。
 *
 * 1) 2000個のサンプルを生成して、high-level indicatorを計算する。
 * 2) データを、k-meansにより階層的にクラスタリングする。
 * 3) 各クラスタについて、Linear regressionによりマッピング行列Wを計算する。
 * 4) マッピング行列Wを使って、high-level indicatorからPMパラメータを推定する。
 * 5) 推定値のエラーを計算する。
 */
void MainWindow::onInversePMByHierarchicalLR() {
	const int N = 2000;

	if (!QDir("samples").exists()) QDir().mkdir("samples");

	cout << "Generating samples..." << endl;

	cv::Mat_<double> dataX(N, 14);
	cv::Mat_<double> dataY(N, 16);
	int seed_count = 0;
	for (int iter = 0; iter < N; ++iter) {
		cout << iter << endl;

		while (true) {
			glWidget->tree->randomInit(seed_count++);
			if (glWidget->tree->generate()) break;
		}

		vector<float> params = glWidget->tree->getParams();
		for (int col = 0; col < dataX.cols; ++col) {
			dataX(iter, col) = params[col];
		}

		vector<float> statistics = glWidget->tree->getStatistics3();
		for (int col = 0; col < dataY.cols - 1; ++col) {
			dataY(iter, col) = statistics[col];
		}
		dataY(iter, dataY.cols - 1) = 1; // 定数項
	}

	glWidget->update();
	controlWidget->update();

	// normalization
	cv::Mat_<double> muX, muY;
	cv::reduce(dataX, muX, 0, CV_REDUCE_AVG);
	cv::reduce(dataY, muY, 0, CV_REDUCE_AVG);
	cv::Mat_<double> dataX2 = dataX - cv::repeat(muX, N, 1);
	cv::Mat_<double> dataY2 = dataY - cv::repeat(muY, N, 1);

	// [-1, 1]にする
	cv::Mat_<double> maxX, maxY;
	cv::reduce(cv::abs(dataX2), maxX, 0, CV_REDUCE_MAX);
	cv::reduce(cv::abs(dataY2), maxY, 0, CV_REDUCE_MAX);
	dataX2 /= cv::repeat(maxX, N, 1);
	dataY2 /= cv::repeat(maxY, N, 1);

	// 定数項
	for (int r = 0; r < N; ++r) {
		dataY2(r, dataY2.cols - 1) = 1;
	}

	vector<cv::Mat_<float> > clusterX, clusterX2, clusterY2;
	vector<vector<int> > clusterIndices;
	{
		cv::Mat samplesX, samplesX2, samplesY2;
		dataX.convertTo(samplesX, CV_32F);
		dataX2.convertTo(samplesX2, CV_32F);
		dataY2.convertTo(samplesY2, CV_32F);
		vector<int> indices(N);
		for (int i = 0; i < N; ++i) indices[i] = i;
		DataPartition::partition(samplesX2, samplesY2, samplesX, indices, 20, clusterX2, clusterY2, clusterX, clusterIndices);

		for (int i = 0; i < clusterX.size(); ++i) {
			cout << clusterX[i].rows << endl;
		}
	}

	cv::Mat_<double> error = cv::Mat_<double>::zeros(1, dataX2.cols);
	cv::Mat_<double> error2 = cv::Mat_<double>::zeros(1, dataX2.cols);
	for (int clu = 0; clu < clusterX.size(); ++clu) {
		cv::Mat_<double> dataX;
		clusterX[clu].convertTo(dataX, CV_64F);
		cv::Mat_<double> dataX2;
		clusterX2[clu].convertTo(dataX2, CV_64F);
		cv::Mat_<double> dataY2;
		clusterY2[clu].convertTo(dataY2, CV_64F);

		// Linear regressionにより、Wを求める(yW = x より、W = y^+ x)
		cv::Mat_<double> W = dataY2.inv(cv::DECOMP_SVD) * dataX2;

		// reverseで木を生成する
		for (int iter = 0; iter < dataX2.rows; ++iter) {
			cv::Mat x2_hat = dataY2.row(iter) * W;
			cv::Mat x_hat = x2_hat.mul(maxX) + muX;
			error += (dataX2.row(iter) - x2_hat).mul(dataX2.row(iter) - x2_hat);
			error2 += (dataX.row(iter) - x_hat).mul(dataX.row(iter) - x_hat);

			if (clusterIndices[clu][iter] % 100 == 0) {
				glWidget->tree->setParams(dataX.row(iter));
				glWidget->updateGL();
				QString fileName = "samples/" + QString::number(clusterIndices[clu][iter] / 100) + ".png";
				glWidget->grabFrameBuffer().save(fileName);

				glWidget->tree->setParams(x_hat);
				glWidget->updateGL();
				fileName = "samples/reversed_" + QString::number(clusterIndices[clu][iter] / 100) + ".png";
				glWidget->grabFrameBuffer().save(fileName);
			}
		}
	}

	error /= N;
	error2 /= N;
	cv::sqrt(error, error);
	cv::sqrt(error2, error2);

	cout << "Prediction error (normalized):" << endl;
	cout << error << endl;
	cout << "Prediction error:" << endl;
	cout << error2 << endl;
}
void TOFSANSResolutionByPixel::exec() {
  MatrixWorkspace_sptr inWS = getProperty("InputWorkspace");
  double deltaR = getProperty("DeltaR");
  double R1 = getProperty("SourceApertureRadius");
  double R2 = getProperty("SampleApertureRadius");
  const bool doGravity = getProperty("AccountForGravity");

  // Check the input
  checkInput(inWS);

  // Setup outputworkspace
  auto outWS = setupOutputWorkspace(inWS);

  // Convert to meters
  deltaR /= 1000.0;
  R1 /= 1000.0;
  R2 /= 1000.0;

  // The moderator workspace needs to match the data workspace
  // in terms of wavelength binning
  const MatrixWorkspace_sptr sigmaModeratorVSwavelength =
      getModeratorWorkspace(inWS);

  // create interpolation table from sigmaModeratorVSwavelength
  Kernel::Interpolation lookUpTable;

  const MantidVec xInterpolate = sigmaModeratorVSwavelength->readX(0);
  const MantidVec yInterpolate = sigmaModeratorVSwavelength->readY(0);

  // prefer the input to be a pointworkspace and create interpolation function
  if (sigmaModeratorVSwavelength->isHistogramData()) {
    g_log.notice() << "mid-points of SigmaModerator histogram bins will be "
                      "used for interpolation.";

    for (size_t i = 0; i < xInterpolate.size() - 1; ++i) {
      const double midpoint = (xInterpolate[i + 1] + xInterpolate[i]) / 2.0;
      lookUpTable.addPoint(midpoint, yInterpolate[i]);
    }
  } else {
    for (size_t i = 0; i < xInterpolate.size(); ++i) {
      lookUpTable.addPoint(xInterpolate[i], yInterpolate[i]);
    }
  }

  // Calculate the L1 distance
  const V3D samplePos = inWS->getInstrument()->getSample()->getPos();
  const V3D sourcePos = inWS->getInstrument()->getSource()->getPos();
  const V3D SSD = samplePos - sourcePos;
  const double L1 = SSD.norm();

  // Get the collimation length
  double LCollim = getProperty("CollimationLength");

  if (LCollim == 0.0) {
    auto collimationLengthEstimator = SANSCollimationLengthEstimator();
    LCollim = collimationLengthEstimator.provideCollimationLength(inWS);
    g_log.information() << "No collimation length was specified. A default "
                           "collimation length was estimated to be " << LCollim
                        << std::endl;
  } else {
    g_log.information() << "The collimation length is  " << LCollim
                        << std::endl;
  }

  const int numberOfSpectra = static_cast<int>(inWS->getNumberHistograms());
  Progress progress(this, 0.0, 1.0, numberOfSpectra);

  for (int i = 0; i < numberOfSpectra; i++) {
    IDetector_const_sptr det;
    try {
      det = inWS->getDetector(i);
    } catch (Exception::NotFoundError &) {
      g_log.information() << "Spectrum index " << i
                          << " has no detector assigned to it - discarding"
                          << std::endl;
    }
    // If no detector found or if it's masked or a monitor, skip onto the next
    // spectrum
    if (!det || det->isMonitor() || det->isMasked())
      continue;

    // Get the flight path from the sample to the detector pixel
    const V3D samplePos = inWS->getInstrument()->getSample()->getPos();
    const V3D scatteredFlightPathV3D = det->getPos() - samplePos;
    const double L2 = scatteredFlightPathV3D.norm();

    TOFSANSResolutionByPixelCalculator calculator;
    const double waveLengthIndependentFactor =
        calculator.getWavelengthIndependentFactor(R1, R2, deltaR, LCollim, L2);

    // Multiplicative factor to go from lambda to Q
    // Don't get fooled by the function name...
    const double theta = inWS->detectorTwoTheta(det);
    double sinTheta = sin(theta / 2.0);
    double factor = 4.0 * M_PI * sinTheta;

    const MantidVec &xIn = inWS->readX(i);
    const size_t xLength = xIn.size();

    // Gravity correction
    boost::shared_ptr<GravitySANSHelper> grav;
    if (doGravity) {
      grav = boost::make_shared<GravitySANSHelper>(inWS, det,
                                                   getProperty("ExtraLength"));
    }

    // Get handles on the outputWorkspace
    MantidVec &yOut = outWS->dataY(i);
    // for each wavelenght bin of each pixel calculate a q-resolution
    for (size_t j = 0; j < xLength - 1; j++) {
      // use the midpoint of each bin
      const double wl = (xIn[j + 1] + xIn[j]) / 2.0;
      // Calculate q. Alternatively q could be calculated using ConvertUnit
      // If we include a gravity correction we need to adjust sinTheta
      // for each wavelength (in Angstrom)
      if (doGravity) {
        double sinThetaGrav = grav->calcSinTheta(wl);
        factor = 4.0 * M_PI * sinThetaGrav;
      }
      const double q = factor / wl;

      // wavelenght spread from bin assumed to be
      const double sigmaSpreadFromBin = xIn[j + 1] - xIn[j];

      // Get the uncertainty in Q
      auto sigmaQ = calculator.getSigmaQValue(lookUpTable.value(wl),
                                              waveLengthIndependentFactor, q,
                                              wl, sigmaSpreadFromBin, L1, L2);

      // Insert the Q value and the Q resolution into the outputworkspace
      yOut[j] = sigmaQ;
    }
    progress.report("Computing Q resolution");
  }

  // Set the y axis label
  outWS->setYUnitLabel("QResolution");

  setProperty("OutputWorkspace", outWS);
}