void TaylorApproximation::build()
{
  // base class implementation checks data set against min required
  Approximation::build();

  // No computations needed.  Just do sanity checking on approxData.

  // Check number of data points
  if (!approxData.anchor() || approxData.size()) {
    Cerr << "Error: wrong number of data points in TaylorApproximation::"
	 << "build()." << std::endl;
    abort_handler(-1);
  }

  // Check gradient
  short  bdo   = sharedDataRep->buildDataOrder;
  size_t num_v = sharedDataRep->numVars;
  if ( (bdo & 2) && approxData.anchor_gradient().length() != num_v) {
    Cerr << "Error: gradient vector required in TaylorApproximation::build()."
	 << std::endl;
    abort_handler(-1);
  }

  // Check Hessian
  if ( (bdo & 4) && approxData.anchor_hessian().numRows() != num_v) {
    Cerr << "Error: Hessian matrix required in TaylorApproximation::build()."
	 << std::endl;
    abort_handler(-1);
  }
}
Пример #2
0
PythonInterface::PythonInterface(const ProblemDescDB& problem_db)
  : DirectApplicInterface(problem_db),
    userNumpyFlag(problem_db.get_bool("interface.python.numpy"))
{
  Py_Initialize();
  if (Py_IsInitialized()) {
    if (outputLevel >= NORMAL_OUTPUT)
      Cout << "Python interpreter initialized for direct function evaluation."
	   << std::endl;
  }
  else {
    Cerr << "Error: Could not initialize Python for direct function "
	 << "evaluation." << std::endl;
    abort_handler(-1);
  }

  if (userNumpyFlag) {
#ifdef DAKOTA_PYTHON_NUMPY
    import_array();
#else
    Cerr << "\nError: Direct Python interface 'numpy' option requested, but "
	 << "not available." << std::endl;
    abort_handler(-1);
#endif
  }

  // prepend sys.path (env PYTHONPATH) with empty string to find module in pwd
  // This assumes any directory changing in the driver is reversed
  // between function evaluations
  PyRun_SimpleString("import sys\nsys.path.insert(0,\"\")");

}
Пример #3
0
/** Executes the sysCommand by passing it to system().  Appends an
    "&" if asynchFlag is set (background system call) and echos the
    sysCommand to Cout if suppressOutputFlag is not set. */
CommandShell& CommandShell::flush()
{
  if (asynchFlag)
#if !defined(_MSC_VER)
    sysCommand += " &";
#else
    sysCommand = "start \"SystemInterface-Evaluation\" " + sysCommand;
#endif

  if (!suppressOutputFlag)
    Cout << sysCommand << std::endl;  // output the cmd string for verification

  if ( !workDir.empty() )
    WorkdirHelper::change_cwd(workDir);

#ifdef HAVE_SYSTEM
  std::system(sysCommand.c_str());
#else
  Cout << "ERROR: attempting to use a system call on a system that does"
       << " NOT support system calls" << std::endl;
  abort_handler(-1);
#endif

  if ( !workDir.empty() )
    WorkdirHelper::reset();

  sysCommand.clear();
  return *this;
}
/// On-the-fly constructor which uses mostly Surfpack model defaults
SharedSurfpackApproxData::
SharedSurfpackApproxData(const String& approx_type,
			 const UShortArray& approx_order, size_t num_vars,
			 short data_order, short output_level):
  SharedApproxData(NoDBBaseConstructor(), approx_type, num_vars, data_order,
		   output_level),
  crossValidateFlag(false), numFolds(0), percentFold(0.0), pressFlag(false)
{
  approxType = approx_type;
  if (approx_order.empty())
    approxOrder = 2;
  else {
    approxOrder = approx_order[0];
    if (approx_order.size() != num_vars) {
      Cerr << "Error: bad size of " << approx_order.size()
	   << " for approx_order in SharedSurfpackApproxData lightweight "
	   << "constructor.  Expected " << num_vars << "." << std::endl;
      abort_handler(-1);
    }
    for (size_t i=1; i<num_vars; ++i)
      if (approx_order[i] != approxOrder) {
	Cerr << "Warning: SharedSurfpackApproxData lightweight constructor "
	     << "requires homogeneous approximation order.  Promoting to max "
	     << "value." << std::endl;
	approxOrder = std::max(approx_order[i], approxOrder);
      }
  }
}
void Approximation::
add(const Variables& vars, bool anchor_flag, bool deep_copy)
{
  if (approxRep)
    approxRep->add(vars, anchor_flag, deep_copy);
  else { // not virtual: all derived classes use following definition
    // Approximation does not know about view mappings; therefore, take the
    // simple approach of matching up active or all counts with numVars.
    size_t num_v = sharedDataRep->numVars;
    if (vars.cv() + vars.div() + vars.drv() == num_v)
      add(vars.continuous_variables(), vars.discrete_int_variables(),
	  vars.discrete_real_variables(), anchor_flag, deep_copy);
    else if (vars.acv() + vars.adiv() + vars.adrv() == num_v)
      add(vars.all_continuous_variables(), vars.all_discrete_int_variables(),
	  vars.all_discrete_real_variables(), anchor_flag, deep_copy);
    /*
    else if (vars.cv() == num_v) {  // compactMode does not affect vars
      IntVector empty_iv; RealVector empty_rv;
      add(vars.continuous_variables(), empty_iv, empty_rv,
	  anchor_flag, deep_copy);
    }
    else if (vars.acv() == num_v) { // potential conflict with cv/div/drv
      IntVector empty_iv; RealVector empty_rv;
      add(vars.all_continuous_variables(), empty_iv, empty_rv,
	  anchor_flag, deep_copy);
    }
    */
    else {
      Cerr << "Error: variable size mismatch in Approximation::add()."
	   << std::endl;
      abort_handler(-1);
    }
  }
}
Пример #6
0
Real ExperimentData::
scalar_sigma(size_t response, size_t experiment, size_t replicate)
{
  if (allExperiments[response].experimentType != SCALAR_DATA) {
    Cerr << "Error (ExperimentData): invalid query of scalar data." << std::endl;
    abort_handler(-1);
  }
  return(allExperiments[response].dataThisResponse[experiment].sigmaScalar[replicate]);
}
void GridApplicInterface::
derived_map(const Variables& vars, const ActiveSet& set, Response& response,
	    int fn_eval_id)
{
  //
  // Launch the grid solver (asynchronously)
  //
  ParamResponsePair prp(vars, interfaceId, response, fn_eval_id);
  derived_map_asynch(prp);
  //
  // Call wait_local_evaluations() until our id is in the set
  //
  PRPQueue prp_queue;
  prp_queue.push_back(prp);
  if (!completionSet.empty()) {
    Cerr << "derived_map - should start with an empty completion set\n";
    abort_handler(-1);
  }
  wait_local_evaluations(prp_queue); // rebuilds completionSet
  response = prp_queue.front().prp_response();
  completionSet.clear();
#if 0
  //
  // Read the params file and handle exceptions
  //
  try {
    if (evalCommRank == 0)
      read_results_files(response, fn_eval_id);
  }
  catch(String& err_msg) {
    // a String exception involves detection of an incomplete file/data
    // set.  In the synchronous case, there is no potential for an incomplete
    // file resulting from a race condition -> echo the error and abort.
    Cerr << err_msg << std::endl;
    abort_handler(-1);
  }
  catch(int fail_code) {
    // The approach here is to have catch(int) rethrow the exception to an 
    // outer catch (either the catch within manage_failure or a catch that 
    // calls manage_failure).
    throw;
  }
#endif
}
void Approximation::approximation_coefficients(const RealVector& approx_coeffs)
{
  if (approxRep)
    approxRep->approximation_coefficients(approx_coeffs);
  else {
    Cerr << "Error: approximation_coefficients() not available for this "
	 << "approximation type." << std::endl;
    abort_handler(-1);
  }
}
Real Approximation::value(const Variables& vars)
{
  if (!approxRep) {
    Cerr << "Error: value() not available for this approximation type."
	 << std::endl;
    abort_handler(-1);
  }

  return approxRep->value(vars);
}
Пример #10
0
Real Approximation::prediction_variance(const Variables& vars)
{
  if (!approxRep) {
    Cerr << "Error: prediction_variance() not available for this approximation "
	 << "type." << std::endl;
    abort_handler(-1);
  }

  return approxRep->prediction_variance(vars);
}
Пример #11
0
void Approximation::print_coefficients(std::ostream& s, bool normalized)
{
  if (approxRep)
    approxRep->print_coefficients(s, normalized);
  else {
    Cerr << "Error: print_coefficients() not available for this approximation "
	 << "type." << std::endl;
    abort_handler(-1);
  }
}
Пример #12
0
const RealVector& Approximation::approximation_coefficients() const
{
  if (!approxRep) {
    Cerr << "Error: approximation_coefficients() not available for this "
	 << "approximation type." << std::endl;
    abort_handler(-1);
  }
   
  return approxRep->approximation_coefficients();
}
Пример #13
0
const RealVector& Approximation::gradient(const Variables& vars)
{
  if (!approxRep) {
    Cerr << "Error: gradient() not available for this approximation type."
	 << std::endl;
    abort_handler(-1);
  }

  return approxRep->gradient(vars);
}
Пример #14
0
const RealSymMatrix& Approximation::hessian(const Variables& vars)
{
  if (!approxRep) {
    Cerr << "Error: hessian() not available for this approximation type."
	 << std::endl;
    abort_handler(-1);
  }
    
  return approxRep->hessian(vars);
}
Пример #15
0
int Approximation::min_coefficients() const
{
  if (!approxRep) { // no default implementation
    Cerr << "Error: min_coefficients() not defined for this approximation type."
         << std::endl;
    abort_handler(-1);
  }

  return approxRep->min_coefficients(); // fwd to letter
}
Пример #16
0
void Approximation::
coefficient_labels(std::vector<std::string>& coeff_labels) const
{
  if (approxRep)
    approxRep->coefficient_labels(coeff_labels);
  else {
    Cerr << "Error: coefficient_labels() not available for this approximation "
	 << "type." << std::endl;
    abort_handler(-1);
  }
}
Пример #17
0
GridApplicInterface::
GridApplicInterface(const ProblemDescDB& problem_db):
  SysCallApplicInterface(problem_db)
{ 
  void* handle = dlopen("foo.so", RTLD_NOW);
  if (!handle) {
    Cerr << "Problem loading shared object file: foo.so" << std::endl;
    abort_handler(-1);
  }
  start_grid_computing
    = (start_grid_computing_t)(dlsym(handle, "start_grid_computing"));
  const char* error;
  if ((error = dlerror()) != NULL) {
    Cerr << "Problem loading start_grid_computing function: " << error
         << std::endl;
    abort_handler(-1);
  }
  stop_grid_computing
    = (stop_grid_computing_t)dlsym(handle, "stop_grid_computing");
  if ((error = dlerror()) != NULL) {
    Cerr << "Problem loading stop_grid_computing function: " << error
         << std::endl;
    abort_handler(-1);
  }
  perform_analysis = (perform_analysis_t)dlsym(handle, "perform_analysis");
  if ((error = dlerror()) != NULL) {
    Cerr << "Problem loading perform_analysis function: " << error << std::endl;
    abort_handler(-1);
  }
  get_jobs_completed = (get_jobs_completed_t)dlsym(handle,"get_jobs_completed");
  if ((error = dlerror()) != NULL) {
    Cerr << "Problem loading get_jobs_completed function: " << error
         << std::endl;
    abort_handler(-1);
  }
  int status = (*start_grid_computing)(programNames[0].data(),
				       paramsFileName.data(), 
				       resultsFileName.data());
  //fileSaveFlag=true;
}
void SharedSurfpackApproxData::
add_sd_to_surfdata(const Pecos::SurrogateDataVars& sdv,
		   const Pecos::SurrogateDataResp& sdr, short fail_code,
		   SurfData& surf_data)
{
  // coarse-grained fault tolerance for now: any failure qualifies for omission
  if (fail_code)
    return;

  // Surfpack's RealArray is std::vector<double>; use DAKOTA copy_data helpers.
  // For DAKOTA's compact mode, any active discrete {int,real} variables could
  // be contained within SDV's continuousVars (see Approximation::add(Real*)),
  // although it depends on eval cache lookups as shown in
  // ApproximationInterface::update_approximation().
  RealArray x; 
  sdv_to_realarray(sdv, x);
  Real f = sdr.response_function();

  // for now only allow builds from exactly 1, 3=1+2, or 7=1+2+4; use
  // different set functions so the SurfPoint data remains empty if
  // not present
  switch (buildDataOrder) {

  case 1:
    surf_data.addPoint(SurfPoint(x, f));
    break;

  case 3: {
    RealArray gradient;
    copy_data(sdr.response_gradient(), gradient);
    surf_data.addPoint(SurfPoint(x, f, gradient));
    break;
  }

  case 7: {
    RealArray gradient;
    copy_data(sdr.response_gradient(), gradient);
    SurfpackMatrix<Real> hessian;
    copy_matrix(sdr.response_hessian(), hessian);
    surf_data.addPoint(SurfPoint(x, f, gradient, hessian));
    break;
  }

  default:
    Cerr << "\nError (SharedSurfpackApproxData): derivative data may only be "
	 << "used if all\nlower-order information is also present. Specified "
	 << "buildDataOrder is " << buildDataOrder << "."  << std::endl; 
    abort_handler(-1);
    break;

  }
}
Пример #19
0
void NLPQLPOptimizer::initialize()
{
  // NLPQLP does not support internal calculation of numerical derivatives
  if (vendorNumericalGradFlag) {
    Cerr << "\nError: vendor numerical gradients not supported by nlpql_sqp."
	 << "\n       Please select dakota numerical instead." << std::endl;
    abort_handler(-1);
  }

  // Prevent nesting of an instance of a Fortran iterator within another
  // instance of the same iterator (which would result in data clashes since
  // Fortran does not support object independence).  Recurse through all
  // sub-models and test each sub-iterator for NLPQL presence.
  Iterator sub_iterator = iteratedModel.subordinate_iterator();
  if (!sub_iterator.is_null() && 
       ( strbegins(sub_iterator.method_name(), "nlpql") ||
	 strbegins(sub_iterator.uses_method(), "nlpql") ) )
    sub_iterator.method_recourse();
  ModelList& sub_models = iteratedModel.subordinate_models();
  for (ModelLIter ml_iter = sub_models.begin();
       ml_iter != sub_models.end(); ml_iter++) {
    sub_iterator = ml_iter->subordinate_iterator();
    if (!sub_iterator.is_null() && 
	 ( strbegins(sub_iterator.method_name(), "nlpql") ||
	   strbegins(sub_iterator.uses_method(), "nlpql") ) )
      sub_iterator.method_recourse();
  }

  // Set NLPQL optimization controls 
  L      = 1;
  ACC    = 1.0e-9;
  ACCQP  = 1.0e-11;
  STPMIN = 0;
  MAXFUN = 10; // max fn evals per line search
  MAXIT  = maxIterations;
  MAX_NM = 10;
  TOL_NM = 0.1;
  MODE   = 0;
  IOUT   = 6;
  LQL    = 1;

  switch (outputLevel) {
  case DEBUG_OUTPUT:
    IPRINT = 4; break;
  case VERBOSE_OUTPUT:
    IPRINT = 2; break;
  case SILENT_OUTPUT:
    IPRINT = 0; break;
  case NORMAL_OUTPUT: default:
    IPRINT = 1; break;
  }
}
Пример #20
0
/** This is the alternate envelope constructor for instantiations on
    the fly.  Since it does not have access to problem_db, it utilizes
    the NoDBBaseConstructor constructor chain. */
Approximation::Approximation(const SharedApproxData& shared_data):
  sharedDataRep(NULL), referenceCount(1)
{
#ifdef REFCOUNT_DEBUG
  Cout << "Approximation::Approximation(String&) called to instantiate "
       << "envelope." << std::endl;
#endif

  // Set the rep pointer to the appropriate derived type
  approxRep = get_approx(shared_data);
  if ( !approxRep ) // bad type or insufficient memory
    abort_handler(-1);
}
Пример #21
0
/** \b Usage: "dakota_restart_util cat dakota_1.rst ... dakota_n.rst
                 dakota_new.rst"

    Combines multiple restart files into a single restart database. */
void concatenate_restart(int argc, char** argv)
{
  if (argc < 5) {
    Cerr << "Usage: \"dakota_restart_util cat <restart_file_1> ... "
	 << "<restart_file_n> <new_restart_file>\"." << endl;
    exit(-1);
  }

  std::ofstream restart_output_fs(argv[argc-1], std::ios::binary);
  boost::archive::binary_oarchive restart_output_archive(restart_output_fs);

  cout << "Writing new restart file " << argv[argc-1] << '\n';

  for (int cat_cntr=2; cat_cntr<argc-1; cat_cntr++) {

    std::ifstream restart_input_fs(argv[cat_cntr], std::ios::binary);
    if (!restart_input_fs.good()) {
      Cerr << "Error: failed to open restart file " << argv[cat_cntr] << endl;
      exit(-1);
    }
    boost::archive::binary_iarchive restart_input_archive(restart_input_fs);

    int cntr = 0;
    while (restart_input_fs.good() && !restart_input_fs.eof()) {

      ParamResponsePair current_pair;
      try { 
	restart_input_archive & current_pair; 
      }
      catch(const boost::archive::archive_exception& e) {
	Cerr << "\nError reading restart file (boost::archive exception):\n" 
	     << e.what() << std::endl;
	abort_handler(-1);
      }
      catch(const std::string& err_msg) {
        Cout << "\nWarning reading restart file: " << err_msg << std::endl;
        break;
      }
      restart_output_archive & current_pair;
      cntr++;
 
      // peek to force EOF if the last restart record was read
      restart_input_fs.peek();
    }

    cout << argv[cat_cntr] << " processing completed: " << cntr
         << " evaluations retrieved.\n";
  }
  restart_output_fs.close();
}
Пример #22
0
/** This is the common base class portion of the virtual fn and is
    insufficient on its own; derived implementations should explicitly
    invoke (or reimplement) this base class contribution. */
void Approximation::pop(bool save_data)
{
  if (approxRep)
    approxRep->pop(save_data);
  else {
    if (popCountStack.empty()) {
      Cerr << "\nError: empty count stack in Approximation::pop()."
	   << std::endl;
      abort_handler(-1);
    }
    approxData.pop(popCountStack.back(), save_data);
    popCountStack.pop_back();
  }
}
Пример #23
0
/** This is the common base class portion of the virtual fn and is
    insufficient on its own; derived implementations should explicitly
    invoke (or reimplement) this base class contribution. */
void Approximation::build()
{
  if (approxRep)
    approxRep->build();
  else {
    size_t num_curr_pts = approxData.size();
    int ms = min_points(true); // account for anchor point & buildDataOrder
    if (num_curr_pts < ms) {
      Cerr << "\nError: not enough samples to build approximation.  "
	   << "Construction of this approximation\n       requires at least "
	   << ms << " samples for " << sharedDataRep->numVars << " variables.  "
	   << "Only " << num_curr_pts << " samples were provided." << std::endl;
      abort_handler(-1);
    }
  }
}
void SharedSurfpackApproxData::
sdv_to_realarray(const Pecos::SurrogateDataVars& sdv, RealArray& ra)
{
  // check incoming vars for correct length (active or all views)
  const RealVector&  cv = sdv.continuous_variables();
  const IntVector&  div = sdv.discrete_int_variables();
  const RealVector& drv = sdv.discrete_real_variables();
  if (cv.length() + div.length() + drv.length() == numVars)
    merge_variable_arrays(cv, div, drv, ra);
  else {
    Cerr << "Error: bad parameter set length in SharedSurfpackApproxData::"
	 << "sdv_to_realarray(): " << numVars << " != " << cv.length() << " + "
	 << div.length() << " + " << drv.length() << "." << std::endl;
    abort_handler(-1);
  }
}
Пример #25
0
void RelaxedVariables::build_active_views()
{
  // Initialize active view vectors and counts.  Don't bleed over any logic
  // about supported view combinations; rather, keep this class general and
  // encapsulated.
  const SizetArray& vc_totals = sharedVarsData.components_totals();
  size_t num_cdv = vc_totals[0], num_ddv = vc_totals[1] + vc_totals[2],
    num_mdv  = num_cdv + num_ddv, num_cauv = vc_totals[3],
    num_dauv = vc_totals[4] + vc_totals[5], num_ceuv = vc_totals[6],
    num_deuv = vc_totals[7] + vc_totals[8], num_mauv = num_cauv + num_dauv,
    num_meuv = num_ceuv + num_deuv, num_muv = num_mauv + num_meuv,
    num_csv  = vc_totals[9], num_dsv = vc_totals[10] + vc_totals[11],
    num_msv  = num_csv + num_dsv;

  // Initialize active views
  size_t cv_start, num_cv;
  switch (sharedVarsData.view().first) {
  case EMPTY:
    Cerr << "Error: active view cannot be EMPTY in RelaxedVariables."
	 << std::endl; abort_handler(-1);             break;
  case RELAXED_ALL:
    // start at the beginning
    cv_start = 0; num_cv = num_mdv + num_muv + num_msv; break;
  case RELAXED_DESIGN:
    // start at the beginning
    cv_start = 0; num_cv = num_mdv;                     break;
  case RELAXED_ALEATORY_UNCERTAIN:
    // skip over the relaxed design variables
    cv_start = num_mdv; num_cv = num_mauv;              break;
  case RELAXED_EPISTEMIC_UNCERTAIN:
    // skip over the relaxed design and aleatory variables
    cv_start = num_mdv+num_mauv;  num_cv = num_meuv;    break;
  case RELAXED_UNCERTAIN:
    // skip over the relaxed design variables
    cv_start = num_mdv; num_cv = num_muv;               break;
  case RELAXED_STATE:
    // skip over the relaxed design and uncertain variables
    cv_start = num_mdv + num_muv; num_cv = num_msv;     break;
  }
  sharedVarsData.cv_start(cv_start); sharedVarsData.cv(num_cv);
  sharedVarsData.div_start(0);       sharedVarsData.div(0);
  sharedVarsData.drv_start(0);       sharedVarsData.drv(0);
  sharedVarsData.initialize_active_components();
  if (num_cv)
    continuousVars
      = RealVector(Teuchos::View, &allContinuousVars[cv_start], num_cv);
}
inline void SequentialHybridStrategy::
initialize_iterator(const VariablesArray& param_sets)
{
  // Note: in current usage, we update an iterator with either:
  // > 1 set from parameterSets (numIteratorJobs == parameterSets.size())
  // > all of parameterSets (numIteratorJobs == 1)
  size_t num_param_sets = param_sets.size();
  if (num_param_sets == 1)
    userDefinedModels[seqCount].active_variables(param_sets[0]);
  else if (selectedIterators[seqCount].accepts_multiple_points())
    selectedIterators[seqCount].initial_points(param_sets);
  else {
    std::cerr << "Error: bad parameter sets array in SequentialHybridStrategy::"
	      << "initialize_iterator()" << std::endl;
    abort_handler(-1);
  }
}
void SharedSurfpackApproxData::
vars_to_realarray(const Variables& vars, RealArray& ra)
{
  // check incoming vars for correct length (active or all views)
  if (vars.cv() + vars.div() + vars.drv() == numVars)
    merge_variable_arrays(vars.continuous_variables(),
			  vars.discrete_int_variables(),
			  vars.discrete_real_variables(), ra);
  else if (vars.acv() + vars.adiv() + vars.adrv() == numVars)
    merge_variable_arrays(vars.all_continuous_variables(),
			  vars.all_discrete_int_variables(),
			  vars.all_discrete_real_variables(), ra);
  else {
    Cerr << "Error: bad parameter set length in SharedSurfpackApproxData::"
	 << "vars_to_realarray()." << std::endl;
    abort_handler(-1);
  }
}
void SharedSurfpackApproxData::
copy_matrix(const RealSymMatrix& rsm, SurfpackMatrix<Real>& surfpack_matrix)
{
  // SymmetricMatrix = symmetric and square, but Dakota::Matrix can be general
  // (e.g., functionGradients = numFns x numVars).  Therefore, have to verify
  // sanity of the copy.  Could copy square submatrix of rsm into sm, but 
  // aborting with an error seems better since this should only currently be
  // used for copying Hessian matrices.
  size_t nr = rsm.numRows(), nc = rsm.numCols();
  if (nr != nc) {
    Cerr << "Error: copy_data(const Dakota::RealSymMatrix& rsm, "
	 << "SurfpackMatrix<Real>& sm) called with nonsquare rsm." << std::endl;
    abort_handler(-1);
  }
  if (surfpack_matrix.getNRows() != nr | surfpack_matrix.getNCols() != nc) 
    surfpack_matrix.resize(nr, nc);
  for (size_t i=0; i<nr; ++i)
    for (size_t j=0; j<nc; ++j)
      surfpack_matrix(i,j) = rsm(i,j);
}
/// Utility function from boost/test, not available in the DAKOTA snapshot
inline void putenv_impl(const char* name_and_value)
{
  if ( putenv( (char*)name_and_value) ) {
    Cerr << "\nError: putenv(" << name_and_value
         << ") failed in putenv_impl()" << std::endl;
    abort_handler(-1);
  }

/* WJB: alternate impl IF I believe what I read at following site:
  // http://stackoverflow.com/questions/5873029/questions-about-putenv-and-setenv
  std::vector<std::string> var_name_and_val_tokens;

  boost::split( var_name_and_val_tokens,
                name_and_value, boost::is_any_of("=") );

  if ( setenv(var_name_and_val_tokens[0].c_str(),
              var_name_and_val_tokens[1].c_str(), true) ) {
    Cerr << "\nError: setenv(" << name_and_value
         << ") failed in putenv_impl()" << std::endl;
  }
*/
}
Пример #30
0
  /// The main point: a python interface that passes a python object back to the interface function
  NRELPythonApplicInterface::NRELPythonApplicInterface(const ProblemDescDB& problem_db, void *pData) : NRELApplicInterface(problem_db, pData)
  {
    if (!Py_IsInitialized())
      printf ("NOT initializing python here in NRELPythonApplicInterface constructor, should have already been done\n");
      Py_Initialize();
    if (Py_IsInitialized()) {
      if (outputLevel >= NORMAL_OUTPUT)
	Cout << "Python interpreter initialized for direct function evaluation."
	     << std::endl;
    }
    else {
      Cerr << "Error: Could not initialize Python for direct function "
	   << "evaluation." << std::endl;
      abort_handler(-1);
    }

    import_array();
    //      userNumpyFlag = problem_db.get_bool("python_numpy");
    //    userNumpyFlag = true;

    userNumpyFlag = true;
  }