/// Solve the linear system Ax = b, with A being the
    /// combined derivative matrix of the residual and b
    /// being the residual itself.
    /// \param[in] residual   residual object containing A and b.
    /// \return               the solution x
    NewtonIterationBlackoilSimple::SolutionVector
    NewtonIterationBlackoilSimple::computeNewtonIncrement(const LinearisedBlackoilResidual& residual) const
    {
        typedef LinearisedBlackoilResidual::ADB ADB;
        const int np = residual.material_balance_eq.size();
        ADB mass_res = residual.material_balance_eq[0];
        for (int phase = 1; phase < np; ++phase) {
            mass_res = vertcat(mass_res, residual.material_balance_eq[phase]);
        }
        const ADB well_res = vertcat(residual.well_flux_eq, residual.well_eq);
        const ADB total_residual = collapseJacs(vertcat(mass_res, well_res));

        Eigen::SparseMatrix<double, Eigen::RowMajor> matr;
        total_residual.derivative()[0].toSparse(matr);

        SolutionVector dx(SolutionVector::Zero(total_residual.size()));
        Opm::LinearSolverInterface::LinearSolverReport rep
            = linsolver_->solve(matr.rows(), matr.nonZeros(),
                                matr.outerIndexPtr(), matr.innerIndexPtr(), matr.valuePtr(),
                                total_residual.value().data(), dx.data(), parallelInformation_);

        // store iterations
        iterations_ = rep.iterations;

        if (!rep.converged) {
            OPM_THROW(LinearSolverProblem,
                      "FullyImplicitBlackoilSolver::solveJacobianSystem(): "
                      "Linear solver convergence failure.");
        }
        return dx;
    }
Exemplo n.º 2
0
void MacauOnePrior<FType>::sample_latents(
    Eigen::MatrixXd &U,
    const Eigen::SparseMatrix<double> &Ymat,
    double mean_value,
    const Eigen::MatrixXd &V,
    double alpha,
    const int num_latent)
{
  const int N = U.cols();
  const int D = U.rows();

#pragma omp parallel for schedule(dynamic, 4)
  for (int i = 0; i < N; i++) {

    const int nnz = Ymat.outerIndexPtr()[i + 1] - Ymat.outerIndexPtr()[i];
    VectorXd Yhat(nnz);

    // precalculating Yhat and Qi
    int idx = 0;
    VectorXd Qi = lambda;
    for (SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) {
      Qi.noalias() += alpha * V.col(it.row()).cwiseAbs2();
      Yhat(idx)     = mean_value + U.col(i).dot( V.col(it.row()) );
    }
    VectorXd rnorms(num_latent);
    bmrandn_single(rnorms);

    for (int d = 0; d < D; d++) {
      // computing Lid
      const double uid = U(d, i);
      double Lid = lambda(d) * (mu(d) + Uhat(d, i));

      idx = 0;
      for ( SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) {
        const double vjd = V(d, it.row());
        // L_id += alpha * (Y_ij - k_ijd) * v_jd
        Lid += alpha * (it.value() - (Yhat(idx) - uid*vjd)) * vjd;
      }
      // Now use Lid and Qid to update uid
      double uid_old = U(d, i);
      double uid_var = 1.0 / Qi(d);

      // sampling new u_id ~ Norm(Lid / Qid, 1/Qid)
      U(d, i) = Lid * uid_var + sqrt(uid_var) * rnorms(d);

      // updating Yhat
      double uid_delta = U(d, i) - uid_old;
      idx = 0;
      for (SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) {
        Yhat(idx) += uid_delta * V(d, it.row());
      }
    }
  }
}
Exemplo n.º 3
0
void createSearchKey(unsigned int numberRows, unsigned int NBFS, std::vector<int> &search_key, const Eigen::SparseMatrix<int> &EdgeMatrix)
{

//columndegree contains number of nonzeros per column
//for removing searchkey values that are not connected to main graph
    std::vector<int> columndegree;
    columndegree.reserve(numberRows);

    for (unsigned int i = 0; i < numberRows; i++)
    {
        columndegree.push_back(EdgeMatrix.outerIndexPtr()[i+1]-EdgeMatrix.outerIndexPtr()[i]);
    }

//generate search key values based on time seed
    std::mt19937 generator(std::chrono::system_clock::now().time_since_epoch()/std::chrono::seconds(1));

    std::cout << "creating search key vector" << std::endl;
    for (unsigned int i = 0; i < numberRows; i++)
    {
        search_key.push_back(i);
    }
//shuffle search key values
    std::shuffle(search_key.begin(),search_key.end(),generator);

//take first 64 or entire search key, whichever is smaller
    if (search_key.size() > NBFS)
    {
        for (unsigned int i = 0; i < NBFS+20; i++)
        {
            //remove search key values that aren't connected to main graph
            if (columndegree.at(search_key.at(i)) == 0)
            {
                search_key.erase(search_key.begin()+i);
                i--;
            }
        }
        search_key.erase(search_key.begin()+NBFS, search_key.end());
    }

    std::cout << "Removing search keys with no edges" << std::endl;
    for (unsigned int i = 0; i < search_key.size(); i++)
    {
        //remove search key values that aren't connected to main graph
        if (columndegree.at(search_key.at(i)) == 0)
        {
            search_key.erase(search_key.begin()+i);
            i--;
        }
    }

    search_key.shrink_to_fit();
}
Exemplo n.º 4
0
ColCompressedMatrix convert_from_Eigen(const Eigen::SparseMatrix<double> &m)
{
	assert(m.rows() == m.cols());
	assert(m.isCompressed());
	return ColCompressedMatrix(m.rows, m.nonZeros(), 
		m.valuePtr(), m.outerIndexPtr(), m.innerIndexPtr());
}
Exemplo n.º 5
0
void ProbitNoise::evalModel(Eigen::SparseMatrix<double> & Ytest, const int n, Eigen::VectorXd & predictions, Eigen::VectorXd & predictions_var, const Eigen::MatrixXd &cols, const Eigen::MatrixXd &rows, double mean_rating) {
  const unsigned N = Ytest.nonZeros();
  Eigen::VectorXd pred(N);
  Eigen::VectorXd test(N);

// #pragma omp parallel for schedule(dynamic,8) reduction(+:se, se_avg) <- dark magic :)
  for (int k = 0; k < Ytest.outerSize(); ++k) {
    int idx = Ytest.outerIndexPtr()[k];
    for (Eigen::SparseMatrix<double>::InnerIterator it(Ytest,k); it; ++it) {
     pred[idx] = nCDF(cols.col(it.col()).dot(rows.col(it.row())));
     test[idx] = it.value();

      // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
      double pred_avg;
      if (n == 0) {
        pred_avg = pred[idx];
      } else {
        double delta = pred[idx] - predictions[idx];
        pred_avg = (predictions[idx] + delta / (n + 1));
        predictions_var[idx] += delta * (pred[idx] - pred_avg);
      }
      predictions[idx++] = pred_avg;

   }
  }
  auc_test_onesample = auc(pred,test);
  auc_test = auc(predictions, test);
}
Exemplo n.º 6
0
CollOfScalar EquelleRuntimeCPU::solveForUpdate(const CollOfScalar& residual) const
{
    Eigen::SparseMatrix<double, Eigen::RowMajor> matr = residual.derivative()[0];

    CollOfScalar::V du = CollOfScalar::V::Zero(residual.size());

    Opm::time::StopWatch clock;
    clock.start();

    // solve(n, # nonzero values ("val"), ptr to col indices
    // ("col_ind"), ptr to row locations in val array ("row_ind")
    // (these two may be swapped, not sure about the naming convention
    // here...), array of actual values ("val") (I guess... '*sa'...),
    // rhs, solution)
    Opm::LinearSolverInterface::LinearSolverReport rep
        = linsolver_.solve(matr.rows(), matr.nonZeros(),
                           matr.outerIndexPtr(), matr.innerIndexPtr(), matr.valuePtr(),
                           residual.value().data(), du.data());

    if (verbose_ > 2) {
        std::cout << "        solveForUpdate: Linear solver took: " << clock.secsSinceLast() << " seconds." << std::endl;
    }
    if (!rep.converged) {
        OPM_THROW(std::runtime_error, "Linear solver convergence failure.");
    }
    return du;
}
ConvertToMklResult to_mkl(const Eigen::SparseMatrix<double> &Ain,
                          sparse_status_t &status) {
  ConvertToMklResult result;

  const int N = static_cast<int>(Ain.rows());
  // const-cast to work with C-api.
  int *row_starts = const_cast<int *>(Ain.outerIndexPtr());
  int *col_index = const_cast<int *>(Ain.innerIndexPtr());
  double *values = const_cast<double *>(Ain.valuePtr());

  result.descr.type = SPARSE_MATRIX_TYPE_GENERAL; /* Full matrix is stored */

  result.status =
      mkl_sparse_d_create_csr(&result.matrix, SPARSE_INDEX_BASE_ZERO, N, N,
                              row_starts, row_starts + 1, col_index, values);
  return result;
}
    void NewtonIterationBlackoilInterleaved::formInterleavedSystem(const std::vector<ADB>& eqs,
                                                                   const Eigen::SparseMatrix<double, Eigen::RowMajor>& A,
                                                                   Mat& istlA) const
    {
        const int np = eqs.size();

        // Find sparsity structure as union of basic block sparsity structures,
        // corresponding to the jacobians with respect to pressure.
        // Use addition to get to the union structure.
        Eigen::SparseMatrix<double> structure = eqs[0].derivative()[0];
        for (int phase = 0; phase < np; ++phase) {
            structure += eqs[phase].derivative()[0];
        }
        Eigen::SparseMatrix<double, Eigen::RowMajor> s = structure;

        // Create ISTL matrix with interleaved rows and columns (block structured).
        assert(np == 3);
        istlA.setSize(s.rows(), s.cols(), s.nonZeros());
        istlA.setBuildMode(Mat::row_wise);
        const int* ia = s.outerIndexPtr();
        const int* ja = s.innerIndexPtr();
        for (Mat::CreateIterator row = istlA.createbegin(); row != istlA.createend(); ++row) {
            int ri = row.index();
            for (int i = ia[ri]; i < ia[ri + 1]; ++i) {
                row.insert(ja[i]);
            }
        }
        const int size = s.rows();
        Span span[3] = { Span(size, 1, 0),
                         Span(size, 1, size),
                         Span(size, 1, 2*size) };
        for (int row = 0; row < size; ++row) {
            for (int col_ix = ia[row]; col_ix < ia[row + 1]; ++col_ix) {
                const int col = ja[col_ix];
                MatrixBlockType block;
                for (int p1 = 0; p1 < np; ++p1) {
                    for (int p2 = 0; p2 < np; ++p2) {
                        block[p1][p2] = A.coeff(span[p1][row], span[p2][col]);
                    }
                }
                istlA[row][col] = block;
            }
        }
    }