Пример #1
0
  inline static dt::string call(vm_heap &heap, const dt::string &command) {
    // Set stdout to be a temporary file.
    FILE *tmpfp = tmpfile();
    NP1_ASSERT(tmpfp, "Unable to create temporary file for meta.shell function");
    ::np1::io::file tmpf;
    tmpf.from_handle(fileno(tmpfp));
    int saved_stdout = dup(1);
    dup2(tmpf.handle(), 1);

    // Execute the command.
    NP1_ASSERT(system(command.to_string().c_str()) >= 0, "system() failed for meta.shell");

    // Set stdout back to what it was and read the whole file.
    dup2(saved_stdout, 1);
    close(saved_stdout);
    tmpf.rewind();
    ::np1::io::mandatory_input_stream<np1::io::file> mandatory_tmpf(tmpf);
    ::np1::io::ext_heap_buffer_output_stream<vm_heap> command_output(heap, TEMP_FILE_READ_BUFFER_SIZE);
    mandatory_tmpf.copy(command_output);
    
    // Ensure that the command output is valid UTF-8.
    char *command_output_p = (char *)command_output.ptr();
    size_t command_output_size = command_output.size();
    str::replace_invalid_utf8_sequences(command_output_p, command_output_size, '?');

    return dt::string(command_output_p, command_output_size);
  }
Пример #2
0
static bool
submit_try( ArgList &args, CondorID &condorID, bool prohibitMultiJobs )
{
  MyString cmd; // for debug output
  args.GetArgsStringForDisplay( &cmd );

  FILE * fp = my_popen( args, "r", TRUE );
  if (fp == NULL) {
    debug_printf( DEBUG_NORMAL, 
		  "ERROR: my_popen(%s) in submit_try() failed!\n",
		  cmd.Value() );
    return false;
  }
  
  //----------------------------------------------------------------------
  // Parse submit command output for a HTCondor job ID.  This
  // desperately needs to be replaced by HTCondor submit APIs.
  //
  // Typical condor_submit output for HTCondor v6 looks like:
  //
  //   Submitting job(s).
  //   Logging submit event(s).
  //   1 job(s) submitted to cluster 2267.
  //----------------------------------------------------------------------

  char buffer[UTIL_MAX_LINE_LENGTH];
  buffer[0] = '\0';

  	// Configure what we look for in the command output according to
	// which type of job we have.
  const char *marker = NULL;
  parse_submit_fnc parseFnc = NULL;

  marker = " submitted to cluster ";

  // Note: we *could* check how many jobs got submitted here, and
  // correlate that with how many submit events we see later on.
  // I'm not worrying about that for now...  wenger 2006-02-07.
  // We also have to check the number of jobs to get an accurate
  // count of submitted jobs to report in the dagman.out file.

  // We should also check whether we got more than one cluster, and
  // either deal with it correctly or generate an error message.
  parseFnc = parse_condor_submit;
  
  // Take all of the output (both stdout and stderr) from condor_submit,
  // and echo it to the dagman.out file.  Look for
  // the line (if any) containing the word "cluster" (HTCondor).
  // If we don't find such a line, something
  // went wrong with the submit, so we return false.  The caller of this
  // function can retry the submit by repeatedly calling this function.

  MyString  command_output("");
  MyString keyLine("");
  while (fgets(buffer, UTIL_MAX_LINE_LENGTH, fp)) {
    MyString buf_line = buffer;
	buf_line.chomp();
	debug_printf(DEBUG_VERBOSE, "From submit: %s\n", buf_line.Value());
	command_output += buf_line;
    if (strstr(buffer, marker) != NULL) {
	  keyLine = buf_line;
	}
  }

  { // Relocated this curly bracket to its previous position to hopefully
    // fix Coverity warning.  Not sure why these curly brackets are here
	// at all...  wenger 2013-06-12
    int status = my_pclose(fp) & 0xff;

    if (keyLine == "") {
      debug_printf(DEBUG_NORMAL, "failed while reading from pipe.\n");
      debug_printf(DEBUG_NORMAL, "Read so far: %s\n", command_output.Value());
      return false;
    }

    if (status != 0) {
		debug_printf(DEBUG_NORMAL, "Read from pipe: %s\n", 
					 command_output.Value());
		debug_printf( DEBUG_QUIET, "ERROR while running \"%s\": "
					  "my_pclose() failed with status %d (errno %d, %s)!\n",
					  cmd.Value(), status, errno, strerror( errno ) );
		return false;
    }
  }

  int	jobProcCount;
  if ( !parseFnc( keyLine.Value(), jobProcCount, condorID._cluster) ) {
		// We are going forward (do not return false here)
		// Expectation is that higher levels will catch that we
		// did not get a cluster initialized properly here, fail,
		// and write a rescue DAG. gt3658 2013-06-03
		//
		// This is better than the old failure that would submit
		// DAGMAN_MAX_SUBMIT_ATTEMPT copies of the same job.
      debug_printf( DEBUG_NORMAL, "WARNING: submit returned 0, but "
        "parsing submit output failed!\n" );
		// Returning here so we don't try to process invalid values
		// below.  (This should really return something like "submit failed
		// don't retry" -- see gittrac #3685.)
  	  return true;
  }

  	// Check for multiple job procs if configured to disallow that.
  if ( prohibitMultiJobs && (jobProcCount > 1) ) {
	debug_printf( DEBUG_NORMAL, "Submit generated %d job procs; "
				"disallowed by DAGMAN_PROHIBIT_MULTI_JOBS setting\n",
				jobProcCount );
	main_shutdown_rescue( EXIT_ERROR, Dag::DAG_STATUS_ERROR );
  }
  
  return true;
}