예제 #1
0
 /** Method that is called after execution of the task. */
 void End()
 {
   DBG_ASSERT(!end_called_);
   DBG_ASSERT(start_called_);
   end_called_ = true;
   start_called_ = false;
   total_cputime_ += CpuTime() - start_cputime_;
   total_systime_ += SysTime() - start_systime_;
   total_walltime_ += WallclockTime() - start_walltime_;
 }
예제 #2
0
 /** Method that is called before execution of the task. */
 void Start()
 {
   DBG_ASSERT(end_called_);
   DBG_ASSERT(!start_called_);
   end_called_ = false;
   start_called_ = true;
   start_cputime_ = CpuTime();
   start_systime_ = SysTime();
   start_walltime_ = WallclockTime();
 }
예제 #3
0
 /** Method that is called after execution of the task for which
  *  timing might have been started.  This only updates the timing
  *  if the timing has indeed been conducted. This is useful to
  *  stop timing after catching exceptions. */
 void EndIfStarted()
 {
   if (start_called_) {
     end_called_ = true;
     start_called_ = false;
     total_cputime_ += CpuTime() - start_cputime_;
     total_systime_ += SysTime() - start_systime_;
     total_walltime_ += WallclockTime() - start_walltime_;
   }
   DBG_ASSERT(end_called_);
 }
예제 #4
0
 ESymSolverStatus MumpsSolverInterface::Solve(Index nrhs, double *rhs_vals)
 {
   DBG_START_METH("MumpsSolverInterface::Solve", dbg_verbosity);
   DMUMPS_STRUC_C* mumps_data = (DMUMPS_STRUC_C*)mumps_ptr_;
   ESymSolverStatus retval = SYMSOLVER_SUCCESS;
   if (HaveIpData()) {
     IpData().TimingStats().LinearSystemBackSolve().Start();
   }
   for (Index i = 0; i < nrhs; i++) {
     Index offset = i * mumps_data->n;
     mumps_data->rhs = &(rhs_vals[offset]);
     mumps_data->job = 3;//solve
     Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                    "Calling MUMPS-3 for solve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
     dmumps_c(mumps_data);
     Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                    "Done with MUMPS-3 for solve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
     int error = mumps_data->info[0];
     if (error < 0) {
       Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                      "Error=%d returned from MUMPS in Solve.\n",
                      error);
       retval = SYMSOLVER_FATAL_ERROR;
     }
   }
   if (HaveIpData()) {
     IpData().TimingStats().LinearSystemBackSolve().End();
   }
   return retval;
 }
예제 #5
0
  ESymSolverStatus MumpsSolverInterface::Factorization(
    bool check_NegEVals, Index numberOfNegEVals)
  {
    DBG_START_METH("MumpsSolverInterface::Factorization", dbg_verbosity);
    DMUMPS_STRUC_C* mumps_data = (DMUMPS_STRUC_C*)mumps_ptr_;

    mumps_data->job = 2;//numerical factorization

    dump_matrix(mumps_data);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling MUMPS-2 for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    dmumps_c(mumps_data);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with MUMPS-2 for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    int error = mumps_data->info[0];

    //Check for errors
    if (error == -8 || error == -9) {//not enough memory
      const Index trycount_max = 20;
      for (int trycount=0; trycount<trycount_max; trycount++) {
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                       "MUMPS returned INFO(1) = %d and requires more memory, reallocating.  Attempt %d\n",
                       error,trycount+1);
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                       "  Increasing icntl[13] from %d to ", mumps_data->icntl[13]);
        double mem_percent = mumps_data->icntl[13];
        mumps_data->icntl[13] = (Index)(2.0 * mem_percent);
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA, "%d.\n", mumps_data->icntl[13]);

        dump_matrix(mumps_data);
        Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                       "Calling MUMPS-2 (repeated) for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
        dmumps_c(mumps_data);
        Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                       "Done with MUMPS-2 (repeated) for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
        error = mumps_data->info[0];
        if (error != -8 && error != -9)
          break;
      }
      if (error == -8 || error == -9) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "MUMPS was not able to obtain enough memory.\n");
        return SYMSOLVER_FATAL_ERROR;
      }
    }

    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Number of doubles for MUMPS to hold factorization (INFO(9)) = %d\n",
                   mumps_data->info[8]);
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Number of integers for MUMPS to hold factorization (INFO(10)) = %d\n",
                   mumps_data->info[9]);

    if (error == -10) {//system is singular
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "MUMPS returned INFO(1) = %d matrix is singular.\n",error);
      return SYMSOLVER_SINGULAR;
    }

    negevals_ = mumps_data->infog[11];

    if (error == -13) {
      Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                     "MUMPS returned INFO(1) =%d - out of memory when trying to allocate %d %s.\nIn some cases it helps to decrease the value of the option \"mumps_mem_percent\".\n",
                     error, mumps_data->info[1] < 0 ? -mumps_data->info[1] : mumps_data->info[1], mumps_data->info[1] < 0 ? "MB" : "bytes");
      return SYMSOLVER_FATAL_ERROR;
    }
    if (error < 0) {//some other error
      Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                     "MUMPS returned INFO(1) =%d MUMPS failure.\n",
                     error);
      return SYMSOLVER_FATAL_ERROR;
    }

    if (check_NegEVals && (numberOfNegEVals!=negevals_)) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "In MumpsSolverInterface::Factorization: negevals_ = %d, but numberOfNegEVals = %d\n",
                     negevals_, numberOfNegEVals);
      return SYMSOLVER_WRONG_INERTIA;
    }

    return SYMSOLVER_SUCCESS;
  }
예제 #6
0
  ESymSolverStatus MumpsSolverInterface::SymbolicFactorization()
  {
    DBG_START_METH("MumpsSolverInterface::SymbolicFactorization",
                   dbg_verbosity);
    DMUMPS_STRUC_C* mumps_data = (DMUMPS_STRUC_C*)mumps_ptr_;

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().Start();
    }

    mumps_data->job = 1;//symbolic ordering pass

    //mumps_data->icntl[1] = 6;
    //mumps_data->icntl[2] = 6;//QUIETLY!
    //mumps_data->icntl[3] = 4;

    mumps_data->icntl[5] = mumps_permuting_scaling_;
    mumps_data->icntl[6] = mumps_pivot_order_;
    mumps_data->icntl[7] = mumps_scaling_;
    mumps_data->icntl[9] = 0;//no iterative refinement iterations


    mumps_data->icntl[12] = 1;//avoid lapack bug, ensures proper inertia
    mumps_data->icntl[13] = mem_percent_; //% memory to allocate over expected
    mumps_data->cntl[0] = pivtol_;  // Set pivot tolerance

    dump_matrix(mumps_data);

    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling MUMPS-1 for symbolic factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    dmumps_c(mumps_data);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with MUMPS-1 for symbolic factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    int error = mumps_data->info[0];
    const int& mumps_permuting_scaling_used = mumps_data->infog[22];
    const int& mumps_pivot_order_used = mumps_data->infog[6];
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "MUMPS used permuting_scaling %d and pivot_order %d.\n",
                   mumps_permuting_scaling_used, mumps_pivot_order_used);
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "           scaling will be %d.\n",
                   mumps_data->icntl[7]);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().End();
    }

    //return appropriat value
    if (error == -6) {//system is singular
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "MUMPS returned INFO(1) = %d matrix is singular.\n",error);
      return SYMSOLVER_SINGULAR;
    }
    if (error < 0) {
      Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                     "Error=%d returned from MUMPS in Factorization.\n",
                     error);
      return SYMSOLVER_FATAL_ERROR;
    }

    return SYMSOLVER_SUCCESS;
  }
  ESymSolverStatus IterativeWsmpSolverInterface::Solve(
    const Index* ia,
    const Index* ja,
    Index nrhs,
    double *rhs_vals)
  {
    DBG_START_METH("IterativeWsmpSolverInterface::Solve",dbg_verbosity);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().Start();
    }

    // Call WISMP to solve for some right hand sides.  The solution
    // will be stored in rhs_vals, and we need to make a copy of the
    // original right hand side before the call.
    ipfint N = dim_;
    ipfint LDB = dim_;
    double* RHS = new double[dim_*nrhs];
    IpBlasDcopy(dim_*nrhs, rhs_vals, 1, RHS, 1);
    ipfint LDX = dim_; // Q: Do we have to zero out solution?
    ipfint NRHS = nrhs;
    IPARM_[1] = 4; // Iterative solver solution
    IPARM_[2] = 4;

    double* CVGH = NULL;
    if (Jnlst().ProduceOutput(J_MOREDETAILED, J_LINEAR_ALGEBRA)) {
      IPARM_[26] = 1; // Record convergence history
      CVGH = new double[IPARM_[5]+1];
    }
    else {
      IPARM_[26] = 0;
    }

    double ddmy;
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WISMP-4-4 for backsolve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    F77_FUNC(wismp,WISMP)(&N, ia, ja, a_, RHS, &LDB, rhs_vals, &LDX,
                          &NRHS, &ddmy, CVGH, IPARM_, DPARM_);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WISMP-4-4 for backsolve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().End();
    }

    Index ierror = IPARM_[63];
    if (ierror!=0) {
      if (ierror==-102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WISMP is not able to allocate sufficient amount of memory during ordering/symbolic factorization.\n");
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WISMP during ordering/symbolic factorization phase.\n     Error code is %d.\n", ierror);
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Number of itertive solver steps in WISMP: %d\n",
                   IPARM_[25]);
    if (Jnlst().ProduceOutput(J_MOREDETAILED, J_LINEAR_ALGEBRA)) {
      Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                     "WISMP congergence history:\n");
      for (Index i=0; i<=IPARM_[25]; ++i) {
        Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                       " Resid[%3d] = %13.6e\n", i, CVGH[i]);
      }
      delete [] CVGH;
    }

    return SYMSOLVER_SUCCESS;
  }
  ESymSolverStatus
  IterativeWsmpSolverInterface::Factorization(
    const Index* ia,
    const Index* ja,
    bool check_NegEVals,
    Index numberOfNegEVals)
  {
    DBG_START_METH("IterativeWsmpSolverInterface::Factorization",dbg_verbosity);

    // If desired, write out the matrix
    Index iter_count = -1;
    if (HaveIpData()) {
      iter_count = IpData().iter_count();
    }
    if (iter_count == wsmp_write_matrix_iteration_) {
      matrix_file_number_++;
      char buf[256];
      Snprintf(buf, 255, "wsmp_matrix_%d_%d.dat", iter_count,
               matrix_file_number_);
      Jnlst().Printf(J_SUMMARY, J_LINEAR_ALGEBRA,
                     "Writing WSMP matrix into file %s.\n", buf);
      FILE* fp = fopen(buf, "w");
      fprintf(fp, "%d\n", dim_); // N
      for (Index icol=0; icol<dim_; icol++) {
        fprintf(fp, "%d", ia[icol+1]-ia[icol]); // number of elements for this column
        // Now for each colum we write row indices and values
        for (Index irow=ia[icol]; irow<ia[icol+1]; irow++) {
          fprintf(fp, " %23.16e %d",a_[irow-1],ja[irow-1]);
        }
        fprintf(fp, "\n");
      }
      fclose(fp);
    }

    // Check if we have to do the symbolic factorization and ordering
    // phase yet
    if (!have_symbolic_factorization_) {
      ESymSolverStatus retval = InternalSymFact(ia, ja);
      if (retval != SYMSOLVER_SUCCESS) {
        return retval;
      }
      have_symbolic_factorization_ = true;
    }

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemFactorization().Start();
    }

    // Call WSSMP for numerical factorization
    ipfint N = dim_;
    IPARM_[1] = 2; // value analysis
    IPARM_[2] = 3; // preconditioner generation
    DPARM_[10] = wsmp_pivtol_; // set current pivot tolerance
    ipfint idmy;
    double ddmy;

    // set drop tolerances for now....
    if (wsmp_inexact_droptol_ != 0.) {
      DPARM_[13] = wsmp_inexact_droptol_;
    }
    if (wsmp_inexact_fillin_limit_ != 0.) {
      DPARM_[14] = wsmp_inexact_fillin_limit_;
    }

    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WISMP-2-3 with DPARM(14) = %8.2e and DPARM(15) = %8.2e.\n", DPARM_[13], DPARM_[14]);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WISMP-2-3 for value analysis and preconditioner computation at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    F77_FUNC(wismp,WISMP)(&N, ia, ja, a_, &ddmy, &idmy, &ddmy, &idmy, &idmy,
                          &ddmy, &ddmy, IPARM_, DPARM_);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WISMP-2-3 for value analysis and preconditioner computation at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WISMP-2-3 with DPARM(14) = %8.2e and DPARM(15) = %8.2e.\n", DPARM_[13], DPARM_[14]);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "                         DPARM(4) = %8.2e and DPARM(5) = %8.2e and ratio = %8.2e.\n", DPARM_[3], DPARM_[4], DPARM_[3]/DPARM_[4]);

    const Index ierror = IPARM_[63];
    if (ierror > 0) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "WISMP detected that the matrix is singular and encountered %d zero pivots.\n", dim_+1-ierror);
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }
      return SYMSOLVER_SINGULAR;
    }
    else if (ierror != 0) {
      if (ierror == -102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WISMP is not able to allocate sufficient amount of memory during factorization.\n");
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WSMP during factorization phase.\n     Error code is %d.\n", ierror);
      }
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Memory usage for WISMP after factorization IPARM(23) = %d\n",
                   IPARM_[22]);

#if 0
    // Check whether the number of negative eigenvalues matches the requested
    // count
    if (check_NegEVals && (numberOfNegEVals!=negevals_)) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "Wrong inertia: required are %d, but we got %d.\n",
                     numberOfNegEVals, negevals_);
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }
      return SYMSOLVER_WRONG_INERTIA;
    }
#endif

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemFactorization().End();
    }
    return SYMSOLVER_SUCCESS;
  }
  ESymSolverStatus
  IterativeWsmpSolverInterface::InternalSymFact(
    const Index* ia,
    const Index* ja)
  {
    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().Start();
    }

    // Call WISMP for ordering and symbolic factorization
    ipfint N = dim_;
    IPARM_[1] = 1; // ordering
    IPARM_[2] = 1; // symbolic factorization
    ipfint idmy;
    double ddmy;
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WISMP-1-1 for symbolic analysis at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    F77_FUNC(wismp,WISMP)(&N, ia, ja, a_, &ddmy, &idmy, &ddmy, &idmy, &idmy,
                          &ddmy, &ddmy, IPARM_, DPARM_);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WISMP-1-1 for symbolic analysis at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());

    Index ierror = IPARM_[63];
    if (ierror!=0) {
      if (ierror==-102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WISMP is not able to allocate sufficient amount of memory during ordering/symbolic factorization.\n");
      }
      else if (ierror>0) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Matrix appears to be singular (with ierror = %d).\n",
                       ierror);
        if (HaveIpData()) {
          IpData().TimingStats().LinearSystemSymbolicFactorization().End();
        }
        return SYMSOLVER_SINGULAR;
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WISMP during ordering/symbolic factorization phase.\n     Error code is %d.\n", ierror);
      }
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemSymbolicFactorization().End();
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Predicted memory usage for WISMP after symbolic factorization IPARM(23)= %d.\n",
                   IPARM_[22]);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().End();
    }

    return SYMSOLVER_SUCCESS;
  }
예제 #10
0
  ESymSolverStatus WsmpSolverInterface::Solve(
    const Index* ia,
    const Index* ja,
    Index nrhs,
    double *rhs_vals)
  {
    DBG_START_METH("WsmpSolverInterface::Solve",dbg_verbosity);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().Start();
    }

    // Call WSMP to solve for some right hand sides (including
    // iterative refinement)
    // ToDo: Make iterative refinement an option?
    ipfint N = dim_;
    ipfint LDB = dim_;
    ipfint NRHS = nrhs;
    ipfint NAUX = 0;
    IPARM_[1] = 4; // Forward and Backward Elimintation
    IPARM_[2] = 5; // Iterative refinement
    IPARM_[5] = 1;
    DPARM_[5] = 1e-12;

    double ddmy;
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WSSMP-4-5 for backsolve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());

#ifdef PARDISO_MATCHING_PREPROCESS
    double* X = new double[nrhs*N];

    // Initialize solution with zero and save right hand side
    for (int i = 0; i < nrhs*N; i++) {
      X[perm2[i]] = scale2[i] * rhs_vals[i];
    }
    F77_FUNC(wssmp,WSSMP)(&N, ia, ja, a_, &ddmy, PERM_, INVP_,
                          X, &LDB, &NRHS, &ddmy, &NAUX,
                          MRP_, IPARM_, DPARM_);
    for (int i = 0; i < N; i++) {
      rhs_vals[i] = scale2[i]*X[perm2[i]];
    }
#else
    F77_FUNC(wssmp,WSSMP)(&N, ia, ja, a_, &ddmy, PERM_, INVP_,
                          rhs_vals, &LDB, &NRHS, &ddmy, &NAUX,
                          MRP_, IPARM_, DPARM_);
#endif

    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WSSMP-4-5 for backsolve at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().End();
    }

    Index ierror = IPARM_[63];
    if (ierror!=0) {
      if (ierror==-102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WSMP is not able to allocate sufficient amount of memory during ordering/symbolic factorization.\n");
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WSMP during ordering/symbolic factorization phase.\n     Error code is %d.\n", ierror);
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Number of iterative refinement steps in WSSMP: %d\n",
                   IPARM_[5]);

#ifdef PARDISO_MATCHING_PREPROCESS
    delete [] X;
#endif

    return SYMSOLVER_SUCCESS;
  }
예제 #11
0
  ESymSolverStatus
  WsmpSolverInterface::Factorization(
    const Index* ia,
    const Index* ja,
    bool check_NegEVals,
    Index numberOfNegEVals)
  {
    DBG_START_METH("WsmpSolverInterface::Factorization",dbg_verbosity);

    // If desired, write out the matrix
    Index iter_count = -1;
    if (HaveIpData()) {
      iter_count = IpData().iter_count();
    }
    if (iter_count == wsmp_write_matrix_iteration_) {
      matrix_file_number_++;
      char buf[256];
      Snprintf(buf, 255, "wsmp_matrix_%d_%d.dat", iter_count,
               matrix_file_number_);
      Jnlst().Printf(J_SUMMARY, J_LINEAR_ALGEBRA,
                     "Writing WSMP matrix into file %s.\n", buf);
      FILE* fp = fopen(buf, "w");
      fprintf(fp, "%d\n", dim_); // N
      for (Index icol=0; icol<dim_; icol++) {
        fprintf(fp, "%d", ia[icol+1]-ia[icol]); // number of elements for this column
        // Now for each colum we write row indices and values
        for (Index irow=ia[icol]; irow<ia[icol+1]; irow++) {
          fprintf(fp, " %23.16e %d",a_[irow-1],ja[irow-1]);
        }
        fprintf(fp, "\n");
      }
      fclose(fp);
    }

    // Check if we have to do the symbolic factorization and ordering
    // phase yet
    if (!have_symbolic_factorization_) {
      ESymSolverStatus retval = InternalSymFact(ia, ja, numberOfNegEVals);
      if (retval != SYMSOLVER_SUCCESS) {
        return retval;
      }
      have_symbolic_factorization_ = true;
    }

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemFactorization().Start();
    }

    // Call WSSMP for numerical factorization
    ipfint N = dim_;
    ipfint NAUX = 0;
    IPARM_[1] = 3; // numerical factorization
    IPARM_[2] = 3; // numerical factorization
    DPARM_[10] = wsmp_pivtol_; // set current pivot tolerance
    ipfint idmy;
    double ddmy;

#ifdef PARDISO_MATCHING_PREPROCESS
    {
      ipfint* tmp2_  = new ipfint[N];
      smat_reordering_pardiso_wsmp_ (&N, ia, ja, a_, ia2, ja2, a2_, perm2, scale2, tmp2_, 1);
      delete[] tmp2_;
    }
#endif


    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WSSMP-3-3 for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
#ifdef PARDISO_MATCHING_PREPROCESS
    F77_FUNC(wssmp,WSSMP)(&N, ia2, ja2, a2_, &ddmy, PERM_, INVP_, &ddmy, &idmy,
#else
    F77_FUNC(wssmp,WSSMP)(&N,  ia,  ja,  a_, &ddmy, PERM_, INVP_, &ddmy, &idmy,
#endif
                          &idmy, &ddmy, &NAUX, MRP_, IPARM_, DPARM_);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WSSMP-3-3 for numerical factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());

    const Index ierror = IPARM_[63];
    if (ierror > 0) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "WSMP detected that the matrix is singular and encountered %d zero pivots.\n", dim_+1-ierror);
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }
      return SYMSOLVER_SINGULAR;
    }
    else if (ierror != 0) {
      if (ierror == -102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WSMP is not able to allocate sufficient amount of memory during factorization.\n");
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WSMP during factorization phase.\n     Error code is %d.\n", ierror);
      }
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Memory usage for WSSMP after factorization IPARM(23) = %d\n",
                   IPARM_[22]);
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Number of nonzeros in WSSMP after factorization IPARM(24) = %d\n",
                   IPARM_[23]);

    if (factorizations_since_recomputed_ordering_ != -1) {
      factorizations_since_recomputed_ordering_++;
    }

    negevals_ = IPARM_[21]; // Number of negative eigenvalues determined during factorization

    // Check whether the number of negative eigenvalues matches the requested
    // count
    if (check_NegEVals && (numberOfNegEVals!=negevals_)) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "Wrong inertia: required are %d, but we got %d.\n",
                     numberOfNegEVals, negevals_);
      if (skip_inertia_check_) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "  But wsmp_skip_inertia_check is set.  Ignore inertia.\n");
        IpData().Append_info_string("IC ");
        negevals_ = numberOfNegEVals;
      }
      else {
        if (HaveIpData()) {
          IpData().TimingStats().LinearSystemFactorization().End();
        }
        return SYMSOLVER_WRONG_INERTIA;
      }
    }

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemFactorization().End();
    }
    return SYMSOLVER_SUCCESS;
  }
예제 #12
0
  ESymSolverStatus
  WsmpSolverInterface::InternalSymFact(
    const Index* ia,
    const Index* ja,
    Index numberOfNegEVals)
  {
    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().Start();
    }

    // Create space for the permutations
    delete [] PERM_;
    PERM_ = NULL;
    delete [] INVP_;
    INVP_ = NULL;
    delete [] MRP_;
    MRP_ = NULL;
    PERM_ = new ipfint[dim_];
    INVP_ = new ipfint[dim_];
    MRP_ = new ipfint[dim_];

    ipfint N = dim_;

#ifdef PARDISO_MATCHING_PREPROCESS

    delete[] ia2;
    ia2 = NULL;

    delete[] ja2;
    ja2 = NULL;

    delete[] a2_;
    a2_ = NULL;

    delete[] perm2;
    perm2 = NULL;

    delete[] scale2;
    scale2 = NULL;

    ia2    = new ipfint[N+1];
    ja2    = new ipfint[nonzeros_];
    a2_    = new double[nonzeros_];
    perm2  = new ipfint[N];
    scale2 = new double[N];
    ipfint* tmp2_  = new ipfint[N];

    smat_reordering_pardiso_wsmp_(&N, ia, ja, a_, ia2, ja2, a2_, perm2,
                                  scale2, tmp2_, 0);

    delete[] tmp2_;

#endif


    // Call WSSMP for ordering and symbolic factorization
    ipfint NAUX = 0;
    IPARM_[1] = 1; // ordering
    IPARM_[2] = 2; // symbolic factorization
#ifdef PARDISO_MATCHING_PREPROCESS
    IPARM_[9]  =  2; // switch off WSMP's ordering and scaling
    IPARM_[15] = -1; // switch off WSMP's ordering and scaling
    IPARM_[30] =  6; // next step supernode pivoting , since not implemented
    // =2 regular Bunch/Kaufman
    // =1 no pivots
    // =6 limited pivots
    DPARM_[21] = 2e-8; // set pivot perturbation
#endif
    ipfint idmy;
    double ddmy;

    if (wsmp_no_pivoting_) {
      IPARM_[14] = dim_ - numberOfNegEVals; // CHECK
      Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                     "Restricting WSMP static pivot sequence with IPARM(15) = %d\n", IPARM_[14]);
    }

    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Calling WSSMP-1-2 for ordering and symbolic factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());
#ifdef PARDISO_MATCHING_PREPROCESS
    F77_FUNC(wssmp,WSSMP)(&N,  ia2,  ja2,  a2_, &ddmy, PERM_, INVP_,
#else
    F77_FUNC(wssmp,WSSMP)(&N, ia, ja, a_, &ddmy, PERM_, INVP_,
#endif
                          &ddmy, &idmy, &idmy, &ddmy, &NAUX, MRP_,
                          IPARM_, DPARM_);
    Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                   "Done with WSSMP-1-2 for ordering and symbolic factorization at cpu time %10.3f (wall %10.3f).\n", CpuTime(), WallclockTime());

    Index ierror = IPARM_[63];
    if (ierror!=0) {
      if (ierror==-102) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error: WSMP is not able to allocate sufficient amount of memory during ordering/symbolic factorization.\n");
      }
      else if (ierror>0) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Matrix appears to be singular (with ierror = %d).\n",
                       ierror);
        if (HaveIpData()) {
          IpData().TimingStats().LinearSystemSymbolicFactorization().End();
        }
        return SYMSOLVER_SINGULAR;
      }
      else {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in WSMP during ordering/symbolic factorization phase.\n     Error code is %d.\n", ierror);
      }
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemSymbolicFactorization().End();
      }
      return SYMSOLVER_FATAL_ERROR;
    }
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Predicted memory usage for WSSMP after symbolic factorization IPARM(23)= %d.\n",
                   IPARM_[22]);
    Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                   "Predicted number of nonzeros in factor for WSSMP after symbolic factorization IPARM(23)= %d.\n",
                   IPARM_[23]);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemSymbolicFactorization().End();
    }

    return SYMSOLVER_SUCCESS;
  }
예제 #13
0
  void RestoIterationOutput::WriteOutput()
  {
    // Get pointers to the Original NLP objects
    const RestoIpoptNLP* resto_ipopt_nlp =
      static_cast<const RestoIpoptNLP*>(&IpNLP());
    DBG_ASSERT(resto_ipopt_nlp);

    SmartPtr<IpoptData> orig_ip_data = &resto_ipopt_nlp->OrigIpData();
    SmartPtr<IpoptNLP> orig_ip_nlp = &resto_ipopt_nlp->OrigIpNLP();
    SmartPtr<IpoptCalculatedQuantities> orig_ip_cq =
      &resto_ipopt_nlp->OrigIpCq();

    // Set the iteration counter for the original NLP to the current value
    Index iter = IpData().iter_count();
    orig_ip_data->Set_iter_count(iter);

    // If a resto_orig_iteration_output object was given, first do the
    // WriteOutput method with that one
    if (IsValid(resto_orig_iteration_output_)) {
      resto_orig_iteration_output_->WriteOutput();
    }

    //////////////////////////////////////////////////////////////////////
    //         First print the summary line for the iteration           //
    //////////////////////////////////////////////////////////////////////

    std::string header =
      "iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls\n";
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "\n\n**************************************************\n");
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "*** Summary of Iteration %d for original NLP:", IpData().iter_count());
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "\n**************************************************\n\n");
    if (IpData().info_iters_since_header() >= 10 && !IsValid(resto_orig_iteration_output_)) {
      // output the header
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN, header.c_str());
      IpData().Set_info_iters_since_header(0);
    }
    else {
      Jnlst().Printf(J_DETAILED, J_MAIN, header.c_str());
    }

    // For now, just print the total NLP error for the restoration
    // phase problem in the dual infeasibility column
    Number inf_du =
      IpCq().curr_dual_infeasibility(NORM_MAX);

    Number mu = IpData().curr_mu();
    Number dnrm = 0.;
    if (IsValid(IpData().delta()) && IsValid(IpData().delta()->x()) && IsValid(IpData().delta()->s())) {
      dnrm = Max(IpData().delta()->x()->Amax(), IpData().delta()->s()->Amax());
    }

    // Set  the trial  values  for  the original  Data  object to  the
    // current restoration phase values
    SmartPtr<const Vector> x = IpData().curr()->x();
    const CompoundVector* cx =
      static_cast<const CompoundVector*>(GetRawPtr(x));
    DBG_ASSERT(dynamic_cast<const CompoundVector*>(GetRawPtr(x)));
    SmartPtr<const Vector> s = IpData().curr()->s();
    const CompoundVector* cs =
      static_cast<const CompoundVector*>(GetRawPtr(s));
    DBG_ASSERT(dynamic_cast<const CompoundVector*>(GetRawPtr(s)));

    SmartPtr<IteratesVector> trial = orig_ip_data->trial()->MakeNewContainer();
    trial->Set_x(*cx->GetComp(0));
    trial->Set_s(*cs->GetComp(0));
    orig_ip_data->set_trial(trial);

    // Compute primal infeasibility
    Number inf_pr = 0.0;
    switch (inf_pr_output_) {
    case INTERNAL:
      inf_pr = orig_ip_cq->trial_primal_infeasibility(NORM_MAX);
      break;
    case ORIGINAL:
      inf_pr = orig_ip_cq->unscaled_trial_nlp_constraint_violation(NORM_MAX);
      break;
    }
    // Compute original objective function
    Number f = orig_ip_cq->unscaled_trial_f();

    // Retrieve some information set in the different parts of the algorithm
    char info_iter='r';

    Number alpha_primal = IpData().info_alpha_primal();
    char alpha_primal_char = IpData().info_alpha_primal_char();
    Number alpha_dual = IpData().info_alpha_dual();
    Number regu_x = IpData().info_regu_x();
    char regu_x_buf[8];
    char dashes[]="   - ";
    char *regu_x_ptr;
    if (regu_x==.0) {
      regu_x_ptr = dashes;
    }
    else {
      Snprintf(regu_x_buf, 7, "%5.1f", log10(regu_x));
      regu_x_ptr = regu_x_buf;
    }
    Index ls_count = IpData().info_ls_count();
    const std::string info_string = IpData().info_string();

    Number current_time = 0.0;
    Number last_output = IpData().info_last_output();
    if ((iter % print_frequency_iter_) == 0 &&
        (print_frequency_time_ == 0.0 || last_output < (current_time = WallclockTime()) - print_frequency_time_ || last_output < 0.0)) {
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN,
                     "%4d%c%14.7e %7.2e %7.2e %5.1f %7.2e %5s %7.2e %7.2e%c%3d",
                     iter, info_iter, f, inf_pr, inf_du, log10(mu), dnrm, regu_x_ptr,
                     alpha_dual, alpha_primal, alpha_primal_char,
                    ls_count);
      if (print_info_string_) {
        Jnlst().Printf(J_ITERSUMMARY, J_MAIN, " %s", info_string.c_str());
      }
      else {
        Jnlst().Printf(J_DETAILED, J_MAIN, " %s", info_string.c_str());
      }
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN, "\n");

      IpData().Set_info_last_output(current_time);
      IpData().Inc_info_iters_since_header();
    }

    //////////////////////////////////////////////////////////////////////
    //           Now if desired more detail on the iterates             //
    //////////////////////////////////////////////////////////////////////

    if (Jnlst().ProduceOutput(J_DETAILED, J_MAIN)) {
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n**************************************************\n");
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "*** Beginning Iteration %d from the following point:",
                     IpData().iter_count());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n**************************************************\n\n");

      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "Primal infeasibility for restoration phase problem = %.16e\n",
                     IpCq().curr_primal_infeasibility(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "Dual infeasibility for restoration phase problem   = %.16e\n",
                     IpCq().curr_dual_infeasibility(NORM_MAX));

      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_x||_inf   = %.16e\n", IpData().curr()->x()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_s||_inf   = %.16e\n", IpData().curr()->s()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_y_c||_inf = %.16e\n", IpData().curr()->y_c()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_y_d||_inf = %.16e\n", IpData().curr()->y_d()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_z_L||_inf = %.16e\n", IpData().curr()->z_L()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_z_U||_inf = %.16e\n", IpData().curr()->z_U()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_v_L||_inf = %.16e\n", IpData().curr()->v_L()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_v_U||_inf = %.16e\n", IpData().curr()->v_U()->Amax());
    }
    if (Jnlst().ProduceOutput(J_MOREDETAILED, J_MAIN)) {
      if (IsValid(IpData().delta())) {
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "\n||delta_x||_inf   = %.16e\n", IpData().delta()->x()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_s||_inf   = %.16e\n", IpData().delta()->s()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_y_c||_inf = %.16e\n", IpData().delta()->y_c()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_y_d||_inf = %.16e\n", IpData().delta()->y_d()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_z_L||_inf = %.16e\n", IpData().delta()->z_L()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_z_U||_inf = %.16e\n", IpData().delta()->z_U()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_v_L||_inf = %.16e\n", IpData().delta()->v_L()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_v_U||_inf = %.16e\n", IpData().delta()->v_U()->Amax());
      }
      else {
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "\nNo search direction has been computed yet.\n");
      }
    }
    if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) {
      IpData().curr()->x()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_x");
      IpData().curr()->s()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_s");

      IpData().curr()->y_c()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_y_c");
      IpData().curr()->y_d()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_y_d");

      IpCq().curr_slack_x_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_x_L");
      IpCq().curr_slack_x_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_x_U");
      IpData().curr()->z_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_z_L");
      IpData().curr()->z_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_z_U");

      IpCq().curr_slack_s_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_s_L");
      IpCq().curr_slack_s_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_s_U");
      IpData().curr()->v_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_v_L");
      IpData().curr()->v_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_v_U");
    }
    if (Jnlst().ProduceOutput(J_MOREVECTOR, J_MAIN)) {
      IpCq().curr_grad_lag_x()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "curr_grad_lag_x");
      IpCq().curr_grad_lag_s()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "curr_grad_lag_s");
      if (IsValid(IpData().delta())) {
        IpData().delta()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "delta");
      }
    }

    if (Jnlst().ProduceOutput(J_DETAILED, J_MAIN)) {
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n\n***Current NLP Values for Iteration (Restoration phase problem) %d:\n",
                     IpData().iter_count());
      Jnlst().Printf(J_DETAILED, J_MAIN, "\n                                   (scaled)                 (unscaled)\n");
      Jnlst().Printf(J_DETAILED, J_MAIN, "Objective...............: %24.16e  %24.16e\n", IpCq().curr_f(), IpCq().unscaled_curr_f());
      Jnlst().Printf(J_DETAILED, J_MAIN, "Dual infeasibility......: %24.16e  %24.16e\n", IpCq().curr_dual_infeasibility(NORM_MAX), IpCq().unscaled_curr_dual_infeasibility(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Constraint violation....: %24.16e  %24.16e\n", IpCq().curr_nlp_constraint_violation(NORM_MAX), IpCq().unscaled_curr_nlp_constraint_violation(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Complementarity.........: %24.16e  %24.16e\n", IpCq().curr_complementarity(0., NORM_MAX), IpCq().unscaled_curr_complementarity(0., NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Overall NLP error.......: %24.16e  %24.16e\n\n", IpCq().curr_nlp_error(), IpCq().unscaled_curr_nlp_error());
    }
    if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) {
      IpCq().curr_grad_f()->Print(Jnlst(), J_VECTOR, J_MAIN, "grad_f");
      IpCq().curr_c()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_c");
      IpCq().curr_d()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_d");
      IpCq().curr_d_minus_s()->Print(Jnlst(), J_VECTOR, J_MAIN,
                                     "curr_d - curr_s");
    }

    if (Jnlst().ProduceOutput(J_MATRIX, J_MAIN)) {
      IpCq().curr_jac_c()->Print(Jnlst(), J_MATRIX, J_MAIN, "jac_c");
      IpCq().curr_jac_d()->Print(Jnlst(), J_MATRIX, J_MAIN, "jac_d");
      IpData().W()->Print(Jnlst(), J_MATRIX, J_MAIN, "W");
    }

    Jnlst().Printf(J_DETAILED, J_MAIN, "\n\n");
  }