コード例 #1
0
ファイル: docker-api.cpp プロジェクト: dcbradley/htcondor
int DockerAPI::detect( CondorError & err ) {
	// FIXME: Remove ::version() as a public API and return it from here,
	// because there's no point in doing this twice.
	std::string version;
	int rval = DockerAPI::version( version, err );
	if( rval  != 0 ) {
		dprintf(D_ALWAYS, "DockerAPI::detect() failed to detect the Docker version; assuming absent.\n" );
		return -4;
	}

	ArgList infoArgs;
	if ( ! add_docker_arg(infoArgs))
		return -1;
	infoArgs.AppendArg( "info" );

	MyString displayString;
	infoArgs.GetArgsStringForLogging( & displayString );
	dprintf( D_FULLDEBUG, "Attempting to run: '%s'.\n", displayString.c_str() );

#if 1
	MyPopenTimer pgm;
	if (pgm.start_program(infoArgs, true, NULL, false) < 0) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
		return -2;
	}

	int exitCode;
	if ( ! pgm.wait_for_exit(default_timeout, &exitCode) || exitCode != 0) {
		pgm.close_program(1);
		MyString line;
		line.readLine(pgm.output(), false); line.chomp();
		dprintf( D_ALWAYS, "'%s' did not exit successfully (code %d); the first line of output was '%s'.\n", displayString.c_str(), exitCode, line.c_str());
		return -3;
	}

	if (IsFulldebug(D_ALWAYS)) {
		MyString line;
		do {
			line.readLine(pgm.output(), false);
			line.chomp();
			dprintf( D_FULLDEBUG, "[docker info] %s\n", line.c_str() );
		} while (line.readLine(pgm.output(), false));
	}

#else
	FILE * dockerResults = my_popen( infoArgs, "r", 1 , 0, false);
	if( dockerResults == NULL ) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
		return -2;
	}

	// Even if we don't care about the success output, the failure output
	// can be handy for debugging...
	char buffer[1024];
	std::vector< std::string > output;
	while( fgets( buffer, 1024, dockerResults ) != NULL ) {
		size_t end = strlen(buffer);
		if (end > 0 && buffer[end-1] == '\n') { buffer[end-1] = '\0'; }
		output.push_back( buffer );
	}
	for( unsigned i = 0; i < output.size(); ++i ) {
		dprintf( D_FULLDEBUG, "[docker info] %s\n", output[i].c_str() );
	}

	int exitCode = my_pclose( dockerResults );
	if( exitCode != 0 ) {
		dprintf( D_ALWAYS, "'%s' did not exit successfully (code %d); the first line of output was '%s'.\n", displayString.c_str(), exitCode, output[0].c_str() );
		return -3;
	}
#endif
	return 0;
}
コード例 #2
0
ファイル: vmgahp_common.cpp プロジェクト: dcbradley/htcondor
const char* 
get_caller_name(void)
{
	return caller_name.Value();
}
コード例 #3
0
ファイル: vmgahp_common.cpp プロジェクト: dcbradley/htcondor
/**
 * merge_stderr_with_stdout is intended for clients of this function
 * that wish to have the old behavior, where stderr and stdout were
 * both added to the same StringList.
 */
int systemCommand( ArgList &args, priv_state priv, StringList *cmd_out, StringList * cmd_in,
		   StringList *cmd_err, bool merge_stderr_with_stdout)
{
	int result = 0;
	FILE *fp = NULL;
	FILE * fp_for_stdin = NULL;
	FILE * childerr = NULL;
	MyString line;
	char buff[1024];
	StringList *my_cmd_out = cmd_out;

	priv_state prev = get_priv_state();

	int stdout_pipes[2];
	int stdin_pipes[2];
	int pid;
	bool use_privsep = false;
	switch ( priv ) {
	case PRIV_ROOT:
		prev = set_root_priv();
		break;
	case PRIV_USER:
	case PRIV_USER_FINAL:
		prev = set_user_priv();
#if !defined(WIN32)
		if ( privsep_enabled() && (job_user_uid != get_condor_uid()) ) {
			use_privsep = true;
		}
#endif
		break;
	default:
		// Stay as Condor user, this should be a no-op
		prev = set_condor_priv();
	}
#if defined(WIN32)
	if((cmd_in != NULL) || (cmd_err != NULL))
	  {
	    vmprintf(D_ALWAYS, "Invalid use of systemCommand() in Windows.\n");
	    set_priv( prev );
	    return -1;
	  }
	//if ( use_privsep ) {
	//	fp = privsep_popen(args, "r", want_stderr, job_user_uid);
	//}
	//else {
	fp = my_popen( args, "r", merge_stderr_with_stdout ? MY_POPEN_OPT_WANT_STDERR : 0 );
	//}
#else
	// The old way of doing things (and the Win32 way of doing
	//	things)
	// fp = my_popen( args, "r", want_stderr ? MY_POPEN_OPT_WANT_STDERR : 0 );
	if((cmd_err != NULL) && merge_stderr_with_stdout)
	  {
	    vmprintf(D_ALWAYS, "Invalid use of systemCommand().\n");
	    set_priv( prev );
	    return -1;
	  }

	PrivSepForkExec psforkexec;
	char ** args_array = args.GetStringArray();
	int error_pipe[2];
		// AIX 5.2, Solaris 5.9, HPUX 11 don't have AF_LOCAL

	if(pipe(stdin_pipes) < 0)
	  {
	    vmprintf(D_ALWAYS, "Error creating pipe: %s\n", strerror(errno));
		deleteStringArray( args_array );
	    set_priv( prev );
	    return -1;
	  }
	if(pipe(stdout_pipes) < 0)
	  {
	    vmprintf(D_ALWAYS, "Error creating pipe: %s\n", strerror(errno));
	    close(stdin_pipes[0]);
	    close(stdin_pipes[1]);
		deleteStringArray( args_array );
	    set_priv( prev );
	    return -1;
	  }

	if ( use_privsep ) {
	  if(!psforkexec.init())
	    {
	      vmprintf(D_ALWAYS,
		       "my_popenv failure on %s\n",
		       args_array[0]);
	      close(stdin_pipes[0]);
	      close(stdin_pipes[1]);
	      close(stdout_pipes[0]);
	      close(stdout_pipes[1]);
		  deleteStringArray( args_array );
	      set_priv( prev );
	      return -1;
	    }
	}

	if(cmd_err != NULL)
	  {
	    if(pipe(error_pipe) < 0)
	      {
		vmprintf(D_ALWAYS, "Could not open pipe for error output: %s\n", strerror(errno));
		close(stdin_pipes[0]);
		close(stdin_pipes[1]);
		close(stdout_pipes[0]);
		close(stdout_pipes[1]);
		deleteStringArray( args_array );
		set_priv( prev );
		return -1;
	      }
	  }
	// Now fork and do what my_popen used to do
	pid = fork();
	if(pid < 0)
	  {
	    vmprintf(D_ALWAYS, "Error forking: %s\n", strerror(errno));
		close(stdin_pipes[0]);
		close(stdin_pipes[1]);
		close(stdout_pipes[0]);
		close(stdout_pipes[1]);
		if(cmd_err != NULL) {
			close(error_pipe[0]);
			close(error_pipe[1]);
		}
		deleteStringArray( args_array );
		set_priv( prev );
	    return -1;
	  }
	if(pid == 0)
	  {
	    close(stdout_pipes[0]);
	    close(stdin_pipes[1]);
	    dup2(stdout_pipes[1], STDOUT_FILENO);
	    dup2(stdin_pipes[0], STDIN_FILENO);

	    if(merge_stderr_with_stdout) dup2(stdout_pipes[1], STDERR_FILENO);
	    else if(cmd_err != NULL) 
	      {
		close(error_pipe[0]);
		dup2(error_pipe[1], STDERR_FILENO);
	      }


			/* to be safe, we want to switch our real uid/gid to our
			   effective uid/gid (shedding any privledges we've got).
			   we also want to drop any supplimental groups we're in.
			   we want to run this popen()'ed thing as our effective
			   uid/gid, dropping the real uid/gid.  all of these calls
			   will fail if we don't have a ruid of 0 (root), but
			   that's harmless.  also, note that we have to stash our
			   effective uid, then switch our euid to 0 to be able to
			   set our real uid/gid.
			   We wrap some of the calls in if-statements to quiet some
			   compilers that object to us not checking the return values.
			*/
	    uid_t euid = geteuid();
	    gid_t egid = getegid();
	    if ( seteuid( 0 ) ) { }
	    setgroups( 1, &egid );
	    if ( setgid( egid ) ) { }
	    if ( setuid( euid ) ) _exit(ENOEXEC); // Unsafe?
	    
	    install_sig_handler(SIGPIPE, SIG_DFL);
	    sigset_t sigs;
	    sigfillset(&sigs);
	    sigprocmask(SIG_UNBLOCK, &sigs, NULL);


	    MyString cmd = args_array[0];

	    if ( use_privsep ) {
	    
	      ArgList al;
	      psforkexec.in_child(cmd, al);
          deleteStringArray( args_array );
	      args_array = al.GetStringArray();
	    }


	    execvp(cmd.Value(), args_array);
	    vmprintf(D_ALWAYS, "Could not execute %s: %s\n", args_array[0], strerror(errno));
	    exit(-1);
	  }
	close(stdin_pipes[0]);
	close(stdout_pipes[1]);
	fp_for_stdin = fdopen(stdin_pipes[1], "w");
	fp = fdopen(stdout_pipes[0], "r");
	if(cmd_err != NULL)
	  {
	    close(error_pipe[1]);
	    childerr = fdopen(error_pipe[0],"r");
	    if(childerr == 0)
	      {
		vmprintf(D_ALWAYS, "Could not open pipe for reading child error output: %s\n", strerror(errno));
		close(error_pipe[0]);
		close(stdin_pipes[1]);
		close(stdout_pipes[0]);
	    fclose(fp);
		fclose(fp_for_stdin);
		deleteStringArray( args_array );
		set_priv( prev );
		return -1;
	      }
	  }

	if ( use_privsep ) {
	  FILE* _fp = psforkexec.parent_begin();
	  privsep_exec_set_uid(_fp, job_user_uid);
	  privsep_exec_set_path(_fp, args_array[0]);
	  privsep_exec_set_args(_fp, args);
	  Env env;
	  env.MergeFrom(environ);
	  privsep_exec_set_env(_fp, env);
	  privsep_exec_set_iwd(_fp, ".");

	  privsep_exec_set_inherit_fd(_fp, 1);
	  privsep_exec_set_inherit_fd(_fp, 2);
	  privsep_exec_set_inherit_fd(_fp, 0);
	
	  if (!psforkexec.parent_end()) {
	    vmprintf(D_ALWAYS,
		     "my_popenv failure on %s\n",
		     args_array[0]);
	    fclose(fp);
		fclose(fp_for_stdin);
		if (childerr) {
			fclose(childerr);
		}
		deleteStringArray( args_array );
		set_priv( prev );
	    return -1;
	  }
	}

	deleteStringArray( args_array );
#endif
	set_priv( prev );
	if ( fp == NULL ) {
		MyString args_string;
		args.GetArgsStringForDisplay( &args_string, 0 );
		vmprintf( D_ALWAYS, "Failed to execute command: %s\n",
				  args_string.Value() );
		if (childerr)
			fclose(childerr);
		return -1;
	}

	if(cmd_in != NULL) {
	  cmd_in->rewind();
	  char * tmp;
	  while((tmp = cmd_in->next()) != NULL)
	    {
	      fprintf(fp_for_stdin, "%s\n", tmp);
	      fflush(fp_for_stdin);
	    }
	}
	if (fp_for_stdin) {
	  // So that we will not be waiting for output while the
	  // script waits for stdin to be closed.
	  fclose(fp_for_stdin);
	}

	if ( my_cmd_out == NULL ) {
		my_cmd_out = new StringList();
	}

	while ( fgets( buff, sizeof(buff), fp ) != NULL ) {
		line += buff;
		if ( line.chomp() ) {
			my_cmd_out->append( line.Value() );
			line = "";
		}
	}

	if(cmd_err != NULL)
	  {
	    while(fgets(buff, sizeof(buff), childerr) != NULL)
	      {
		line += buff;
		if(line.chomp())
		  {
		    cmd_err->append(line.Value());
		    line = "";
		  }
	      }
	    fclose(childerr);
	  }
#if defined(WIN32)
	result = my_pclose( fp );
#else
	// Why close first?  Just in case the child process is waiting
	// on a read, and we have nothing more to send it.  It will
	// now receive a SIGPIPE.
	fclose(fp);
	if(waitpid(pid, &result, 0) < 0)
	  {
	    vmprintf(D_ALWAYS, "Unable to wait: %s\n", strerror(errno));
		if ( cmd_out == NULL ) {
			delete my_cmd_out;
		}
	   
	    return -1;
	  }
#endif
	if( result != 0 ) {
		MyString args_string;
		args.GetArgsStringForDisplay(&args_string,0);
		vmprintf(D_ALWAYS,
		         "Command returned non-zero: %s\n",
		         args_string.Value());
		my_cmd_out->rewind();
		const char *next_line;
		while ( (next_line = my_cmd_out->next()) ) {
			vmprintf( D_ALWAYS, "  %s\n", next_line );
		}
	}
	if ( cmd_out == NULL ) {
		delete my_cmd_out;
	}
	return result;
}
コード例 #4
0
ファイル: sample2_test.cpp プロジェクト: liyustar/liblyx
TEST(MyString, CopyConstructor) {
    const MyString s1(kHelloString);
    const MyString s2 = s1;
    EXPECT_EQ(0, strcmp(s2.c_string(), kHelloString));
}
コード例 #5
0
// Note: for this to work correctly, it's vital that the events we generate
// in recovery mode exactly match how they were output in "non-recovery"
// mode, so we can compare timestamps, and, if the timestamp matches
// the last pre-recovery timestamp, the entire event string.
void
JobstateLog::InitializeRecovery()
{
	debug_printf( DEBUG_DEBUG_2, "JobstateLog::InitializeRecovery()\n" );

	if ( !_jobstateLogFile ) {
		return;
	}

		//
		// Find the timestamp of the last "real" event written to the
		// jobstate.log file.  Any events that we see in recovery mode
		// that have an earlier timestamp should *not* be re-written
		// to the jobstate.log file.  Any events with later timestamps
		// should be written.  Events with equal timestamps need to be
		// tested individually.
		//

	FILE *infile = safe_fopen_wrapper_follow( _jobstateLogFile, "r" );
	if ( !infile ) {
			// This is a fatal error, because by the time we get here,
			// we should, at the very least, have written the
			// DAGMAN_STARTED "event".
		debug_printf( DEBUG_QUIET,
					"Could not open jobstate log file %s for reading.\n",
					_jobstateLogFile );
		main_shutdown_graceful();
		return;
	}

	MyString line;
	off_t startOfLastTimestamp = 0;

	while ( true ) {
		off_t currentOffset = ftell( infile );
		if ( !line.readLine( infile ) ) {
			break;
		}

		time_t newTimestamp;
		MyString nodeName;
		int seqNum;
		if ( ParseLine( line, newTimestamp, nodeName, seqNum ) ) {
				// We don't want to look at "INTERNAL" events here, or we'll
				// get goofed up by our own DAGMAN_STARTED event, etc.
			if ( nodeName != INTERNAL_NAME ) {
					// Note: we don't absolutely rely on the timestamps
					// being in order -- the > below rather than == is
					// important in that case.
				if ( newTimestamp > _lastTimestampWritten ) {
					startOfLastTimestamp = currentOffset;
					_lastTimestampWritten = newTimestamp;
				}
			}
		}
	}

	debug_printf( DEBUG_DEBUG_2, "_lastTimestampWritten: %lu\n",
				(unsigned long)_lastTimestampWritten );

		//
		// Now find all lines that match the last timestamp, and put
		// them into a hash table for future reference.
		//
	if ( fseek( infile, startOfLastTimestamp, SEEK_SET ) != 0 ) {
		debug_printf( DEBUG_QUIET,
					"Error seeking in jobstate log file %s.\n",
					_jobstateLogFile );
	}

	while ( line.readLine( infile ) ) {
		time_t newTimestamp;
		MyString nodeName;
		int seqNum;
		if ( ParseLine( line, newTimestamp, nodeName, seqNum ) ) {
			if ( (newTimestamp == _lastTimestampWritten) &&
						(nodeName != INTERNAL_NAME) ) {
				_lastTimestampLines.insert( line );
				debug_printf( DEBUG_DEBUG_2,
							"Appended <%s> to _lastTimestampLines\n",
							line.Value() );
			}
		}
	}

	fclose( infile );
}
コード例 #6
0
ファイル: VMRegister.cpp プロジェクト: emaste/htcondor
void
VMRegister::requestHostClassAds(void)
{
	// find host startd daemon
	if( !m_vm_host_daemon )
		m_vm_host_daemon = vmapi_findDaemon( m_vm_host_name, DT_STARTD);

	if( !m_vm_host_daemon ) {
		dprintf( D_FULLDEBUG, "Can't find host(%s) Startd daemon\n", m_vm_host_name );
		return;
	}

	ClassAd query_ad;
	query_ad.SetMyTypeName(QUERY_ADTYPE);
	query_ad.SetTargetTypeName(STARTD_ADTYPE);
	query_ad.Assign(ATTR_REQUIREMENTS, true);

	char *addr = m_vm_host_daemon->addr();
	Daemon hstartd(DT_STARTD, addr);
	ReliSock ssock;

	ssock.timeout( VM_SOCKET_TIMEOUT );
	ssock.encode();

	if( !ssock.connect(addr) ) {
		dprintf( D_FULLDEBUG, "Failed to connect to host startd(%s)\n to get host classAd", addr);
		return;
	}

	if(!hstartd.startCommand( QUERY_STARTD_ADS, &ssock )) {
		dprintf( D_FULLDEBUG, "Failed to send QUERY_STARTD_ADS command to host startd(%s)\n", addr);
		return;
	}

	if( !query_ad.put(ssock) ) {
		dprintf(D_FULLDEBUG, "Failed to send query Ad to host startd(%s)\n", addr);
	}

	if( !ssock.end_of_message() ) {
		dprintf(D_FULLDEBUG, "Failed to send query EOM to host startd(%s)\n", addr);
	}

	// Read host classAds
	ssock.timeout( VM_SOCKET_TIMEOUT );
	ssock.decode();
	int more = 1, num_ads = 0;
	ClassAdList adList;
	ClassAd *ad;

	while (more) {
		if( !ssock.code(more) ) {
			ssock.end_of_message();
			return;
		}

		if(more) {
			ad = new ClassAd;
			if( !ad->initFromStream(ssock) ) {
				ssock.end_of_message();
				delete ad;
				return;
			}

			adList.Insert(ad);
			num_ads++;
		}
	}

	ssock.end_of_message();

	dprintf(D_FULLDEBUG, "Got %d classAds from host\n", num_ads);

	// Although we can get more than one classAd from host machine, 
	// we use only the first one classAd
	adList.Rewind();
	ad = adList.Next();

#if !defined(WANT_OLD_CLASSADS)
	ad->AddTargetRefs( TargetJobAttrs );
#endif

	// Get each Attribute from the classAd
	// added "HOST_" in front of each Attribute name
	const char *name;
	ExprTree *expr;

	ad->ResetExpr();
	while( ad->NextExpr(name, expr) ) {
		MyString attr;
		attr += "HOST_";
		attr += name;

		// Insert or Update an attribute to host_classAd in a VMRegister object
		ExprTree * pTree = expr->Copy();
		host_classad->Insert(attr.Value(), pTree, true);
	}
}
コード例 #7
0
ファイル: FTEST_tokener.cpp プロジェクト: bbockelm/htcondor
static bool test_tokener_parse_realistic() {
    emit_test("Test realistic tokener parsing functions");

	MyString msg;
	std::string temp;
	std::set<std::string> attrs;
	std::set<std::string> labels;
	std::set<std::string> formats;

	StringLiteralInputStream lines(
		"# blackhole.cpf\n"
		"# show static slots with high job churn\n"
		"SELECT\n"
		"   Machine WIDTH -24 \n"
		"   splitslotname(Name)[0] AS Slot WIDTH -8\n"
		"   Strcat(Arch,\"_\",IfThenElse(OpSys==\"WINDOWS\",OpSysShortName,OpSysName)) AS Platform\n"
		"   Cpus AS CPU\n"
		"   Memory     PRINTF \"%6d\"     AS Mem\n"
		"   Strcat(State,\"/\",Activity) AS Status WIDTH -14 TRUNCATE\n"
		"   EnteredCurrentActivity AS '  StatusTime'  PRINTAS ACTIVITY_TIME NOPREFIX\n"
		"   IfThenElse(JobId isnt undefined, JobId, \"no\") AS JobId WIDTH -11\n"
		"   RecentJobStarts/20.0 AS J/Min PRINTF %.2f\n"
		"WHERE RecentJobStarts >= 1 && PartitionableSlot =!= true && DynamicSlot =!= true\n"
		"SUMMARY \n"
	);

	tokener toke("");
	int state = 0;
	while (toke.set(lines.nextline())) {
		REQUIRE(toke.next());
		if (toke.starts_with("#")) {
			REQUIRE(toke.next() && (toke.matches("blackhole.cpf") || toke.matches("show")));
			continue;
		}
		const TestTableItem * ti = Keywords.lookup_token(toke);
		if (ti) {
			if (ti->id == item_SELECT) { REQUIRE(state == 0 && ! toke.next()); state = ti->id; continue; }
			else if (ti->id == item_WHERE) { REQUIRE(state == item_SELECT); }
			else if (ti->id == item_SUMMARY) { REQUIRE(state == item_WHERE); }
			else {
				emit_step_failure(__LINE__, "invalid transition");
			}
			state = ti->id;
		}
		switch (state) {
		default:
			emit_step_failure(__LINE__, "invalid state");
			break;
		case item_WHERE:
			REQUIRE(toke.next());
			toke.copy_to_end(temp);
			REQUIRE(temp == "RecentJobStarts >= 1 && PartitionableSlot =!= true && DynamicSlot =!= true");
			break;
		case item_SUMMARY:
			REQUIRE(!toke.next());
			break;
		case item_SELECT:
			toke.mark();
			bool got_attr = false;
			while (toke.next()) {
				ti = Keywords.lookup_token(toke);
				REQUIRE(!ti || ti->index > 1);
				if (ti && ! got_attr) {
					toke.copy_marked(temp);
					trim(temp);
					attrs.insert(temp);
					got_attr = true;
				} else if ( ! ti && got_attr) {
					msg = "invalid token at: ";
					msg += (toke.content().c_str() + toke.offset());
					emit_step_failure(__LINE__, msg.c_str());
				}
				if ( ! ti) continue;
				switch (ti->id) {
				case item_AS:
					REQUIRE(toke.next()); toke.copy_token(temp);
					labels.insert(temp);
					toke.mark_after();
					break;
				case item_PRINTF:
				case item_PRINTAS:
				case item_WIDTH:
					REQUIRE(toke.next()); toke.copy_token(temp);
					formats.insert(temp);
					break;
				}
			}
			break;
		}
	}

	std::set<std::string>::const_iterator it;
	it = attrs.begin();
	REQUIRE(*it++ == "Cpus");
	REQUIRE(*it++ == "EnteredCurrentActivity");
	REQUIRE(*it++ == "IfThenElse(JobId isnt undefined, JobId, \"no\")");
	REQUIRE(*it++ == "Machine");
	REQUIRE(*it++ == "Memory");
	REQUIRE(*it++ == "RecentJobStarts/20.0");
	REQUIRE(*it++ == "Strcat(Arch,\"_\",IfThenElse(OpSys==\"WINDOWS\",OpSysShortName,OpSysName))");
	REQUIRE(*it++ == "Strcat(State,\"/\",Activity)");
	REQUIRE(*it++ == "splitslotname(Name)[0]");
	REQUIRE(it == attrs.end());

	it = labels.begin();
	REQUIRE(*it++ == "  StatusTime");
	REQUIRE(*it++ == "CPU");
	REQUIRE(*it++ == "J/Min");
	REQUIRE(*it++ == "JobId");
	REQUIRE(*it++ == "Mem");
	REQUIRE(*it++ == "Platform");
	REQUIRE(*it++ == "Slot");
	REQUIRE(*it++ == "Status");
	REQUIRE(it == labels.end());

	it = formats.begin();
	REQUIRE(*it++ == "%.2f");
	REQUIRE(*it++ == "%6d");
	REQUIRE(*it++ == "-11");
	REQUIRE(*it++ == "-14");
	REQUIRE(*it++ == "-24");
	REQUIRE(*it++ == "-8");
	REQUIRE(*it++ == "ACTIVITY_TIME");
	REQUIRE(it == formats.end());
	/*
	for (it = attrs.begin(); it != attrs.end(); ++it) {
		emit_comment(msg.formatstr("attr: '%s'", it->c_str()));
	}
	for (it = labels.begin(); it != labels.end(); ++it) {
		emit_comment(msg.formatstr("label: '%s'", it->c_str()));
	}
	for (it = formats.begin(); it != formats.end(); ++it) {
		emit_comment(msg.formatstr("format: '%s'", it->c_str()));
	}
	*/

	return REQUIRED_RESULT();
}
コード例 #8
0
ファイル: io_qpath.cpp プロジェクト: CS-svnmirror/farmanager
void MYRTLEXP QueryHotPaths( PHotPathArray arr )
  {
     arr->DeleteAll();
#if defined(__QNX__)
    int     num_nids;
    char    node_name[22];
    char    buffer[ 500 ];
    char    str[100];
    struct _osinfo osi;

    if ( (num_nids=qnx_net_alive(buffer,sizeof(buffer))) == -1)
      return;

    for( int n = 1; n < num_nids+1; n++) {
      if ( !buffer[n] ||
           qnx_osinfo( n,&osi ) == -1 )
        continue;

      qnx_nidtostr( n, node_name, sizeof(node_name));
      SNprintf( str, sizeof(str), "//%ld/ ~%-6s~ CPU:~%3u~-~%3d~/~%3d~ Ver:~%2d.%02d%c~ Mem:~%d~/~%d~",
                osi.nodename,
                osi.machine, osi.cpu, osi.fpu,
                osi.cpu_speed, osi.version/100, osi.version%100, osi.release,
                osi.freememk,  osi.totmemk );

      PHotPathEntry pe = arr->Add( new HotPathEntry );
      pe->Path.printf( "//%s/", node_name );
      pe->Label.printf( " ~%c~ ³",'A'+n-1 );
      pe->Description = str;
    }
#else
#if defined(__HDOS__)
   int  count,c,old;
   char astr[] = "X:\\",
        str[]  = "X";

   count = setdisk( old = getdisk() );
   for ( c = 0; c < count; c++ ) {
     if ( _chdrive(c+1) != 0 )
       continue;

     astr[0] = (char)('A'+c);
     astr[0] = (char)('A'+c);

     CONSTSTR d;
     switch( GetDiskType( c+1 ) ) {
      case     DRIVE_CDROM: d = "CD-ROM drive";     break;
      case   DRIVE_RAMDISK: d = "RAM drive";        break;
      case    DRIVE_REMOTE: d = "Remote drive";     break;
      case     DRIVE_FIXED: d = "Hard drive";       break;
      case DRIVE_REMOVABLE: d = "Removable drive";  break;
      case     DRIVE_SUBST: d = "Subst drive";      break;
      case  DRIVE_DBLSPACE: d = "DblSpace drive";   break;
                   default: d = NULL;
     }
     if ( d ) {
       MyString s;
       s.printf( " ~%s~ ¦", str );
       arr->Add( new HotPathEntry( astr, s, d ) );
     }
   }
   setdisk( old );

#else
#if defined(__HWIN32__)
   DWORD dw = GetLogicalDrives();
   UINT  type;
   char  astr[] = "X:\\",
         str[]  = "X";
   for ( int n = 0; n < 32; n++ ) {
     astr[0] = (char)('A'+n);
     str[0]  = (char)('A'+n);

     if ( (dw & (1UL<<n)) == 0 || (type=GetDriveType(astr)) <= 1 )
       continue;

     CONSTSTR d;
     switch( type ) {
      case     DRIVE_CDROM: d = "CD-ROM drive";    break;
      case   DRIVE_RAMDISK: d = "RAM drive";        break;
      case    DRIVE_REMOTE: d = "Remote drive";     break;
      case     DRIVE_FIXED: d = "Hard drive";       break;
      case DRIVE_REMOVABLE: d = "Removable drive";  break;
                   default: d = NULL;
     }
     if ( d ) {
       MyString s;
       s.printf( " ~%s~ ¦", str );
       arr->Add( new HotPathEntry( astr, s, d ) );
     }
   }
#else
#if defined(__HWIN16__)
   UINT  type;
   char  astr[] = "X:\\",
         str[]  = "X";

   for ( int n = 0; n < 26; n++ ) {
     if ( (type=GetDriveType(n)) == 1 )
       continue;

     astr[0] = (char)('A'+n);
     str[0]  = (char)('A'+n);

     CONSTSTR d;
     switch( type ) {
      case    DRIVE_REMOTE: d = "Remote (network) drive"; break;
      case     DRIVE_FIXED: d = "Hard drive";             break;
      case DRIVE_REMOVABLE: d = "Removable drive";        break;
      case               0: d = "Undefined drive";        break;
                   default: d = NULL;
     }
     if ( d ) {
       MyString s;
       s.printf( " ~%s~ ¦", str );
       arr->Add( new HotPathEntry( astr, s, d ) );
     }
   }
#else
#error ERR_PLATFORM
#endif
#endif
#endif
#endif
}
コード例 #9
0
int
OsProc::StartJob(FamilyInfo* family_info, NetworkNamespaceManager * network_manager = NULL, FilesystemRemap* fs_remap=NULL)
{
	int nice_inc = 0;
	bool has_wrapper = false;

	dprintf(D_FULLDEBUG,"in OsProc::StartJob()\n");

	if ( !JobAd ) {
		dprintf ( D_ALWAYS, "No JobAd in OsProc::StartJob()!\n" );
		return 0;
	}

	MyString JobName;
	if ( JobAd->LookupString( ATTR_JOB_CMD, JobName ) != 1 ) {
		dprintf( D_ALWAYS, "%s not found in JobAd.  Aborting StartJob.\n", 
				 ATTR_JOB_CMD );
		return 0;
	}

	const char* job_iwd = Starter->jic->jobRemoteIWD();
	dprintf( D_ALWAYS, "IWD: %s\n", job_iwd );

		// some operations below will require a PrivSepHelper if
		// PrivSep is enabled (if it's not, privsep_helper will be
		// NULL)
	PrivSepHelper* privsep_helper = Starter->privSepHelper();

		// // // // // // 
		// Arguments
		// // // // // // 

		// prepend the full path to this name so that we
		// don't have to rely on the PATH inside the
		// USER_JOB_WRAPPER or for exec().

    bool transfer_exe = false;
    if (!JobAd->LookupBool(ATTR_TRANSFER_EXECUTABLE, transfer_exe)) {
        transfer_exe = false;
    }

    bool preserve_rel = false;
    if (!JobAd->LookupBool(ATTR_PRESERVE_RELATIVE_EXECUTABLE, preserve_rel)) {
        preserve_rel = false;
    }

    bool relative_exe = is_relative_to_cwd(JobName.Value());

    if (relative_exe && preserve_rel && !transfer_exe) {
        dprintf(D_ALWAYS, "Preserving relative executable path: %s\n", JobName.Value());
    }
	else if ( strcmp(CONDOR_EXEC,JobName.Value()) == 0 ) {
		JobName.sprintf( "%s%c%s",
		                 Starter->GetWorkingDir(),
		                 DIR_DELIM_CHAR,
		                 CONDOR_EXEC );
    }
	else if (relative_exe && job_iwd && *job_iwd) {
		MyString full_name;
		full_name.sprintf("%s%c%s",
		                  job_iwd,
		                  DIR_DELIM_CHAR,
		                  JobName.Value());
		JobName = full_name;

	}

	if( Starter->isGridshell() ) {
			// if we're a gridshell, just try to chmod our job, since
			// globus probably transfered it for us and left it with
			// bad permissions...
		priv_state old_priv = set_user_priv();
		int retval = chmod( JobName.Value(), S_IRWXU | S_IRWXO | S_IRWXG );
		set_priv( old_priv );
		if( retval < 0 ) {
			dprintf ( D_ALWAYS, "Failed to chmod %s!\n", JobName.Value() );
			return 0;
		}
	} 

	ArgList args;

		// Since we may be adding to the argument list, we may need to deal
		// with platform-specific arg syntax in the user's args in order
		// to successfully merge them with the additional wrapper args.
	args.SetArgV1SyntaxToCurrentPlatform();

		// First, put "condor_exec" or whatever at the front of Args,
		// since that will become argv[0] of what we exec(), either
		// the wrapper or the actual job.

	if( !getArgv0() ) {
		args.AppendArg(JobName.Value());
	} else {
		args.AppendArg(getArgv0());
	}
	
		// Support USER_JOB_WRAPPER parameter...
	char *wrapper = NULL;
	if( (wrapper=param("USER_JOB_WRAPPER")) ) {

			// make certain this wrapper program exists and is executable
		if( access(wrapper,X_OK) < 0 ) {
			dprintf( D_ALWAYS, 
					 "Cannot find/execute USER_JOB_WRAPPER file %s\n",
					 wrapper );
			free( wrapper );
			return 0;
		}
		has_wrapper = true;
			// Now, we've got a valid wrapper.  We want that to become
			// "JobName" so we exec it directly, and we want to put
			// what was the JobName (with the full path) as the first
			// argument to the wrapper
		args.AppendArg(JobName.Value());
		JobName = wrapper;
		free(wrapper);
	}
	
		// Support USE_PARROT 
	bool use_parrot = false;
	if( JobAd->LookupBool( ATTR_USE_PARROT, use_parrot) ) {
			// Check for parrot executable
		char *parrot = NULL;
		if( (parrot=param("PARROT")) ) {
			if( access(parrot,X_OK) < 0 ) {
				dprintf( D_ALWAYS, "Unable to use parrot(Cannot find/execute "
					"at %s(%s)).\n", parrot, strerror(errno) );
				free( parrot );
				return 0;
			} else {
				args.AppendArg(JobName.Value());
				JobName = parrot;
				free( parrot );
			}
		} else {
			dprintf( D_ALWAYS, "Unable to use parrot(Undefined path in config"
			" file)" );
			return 0;
		}
	}

		// Either way, we now have to add the user-specified args as
		// the rest of the Args string.
	MyString args_error;
	if(!args.AppendArgsFromClassAd(JobAd,&args_error)) {
		dprintf(D_ALWAYS, "Failed to read job arguments from JobAd.  "
				"Aborting OsProc::StartJob: %s\n",args_error.Value());
		return 0;
	}

		// // // // // // 
		// Environment 
		// // // // // // 

		// Now, instantiate an Env object so we can manipulate the
		// environment as needed.
	Env job_env;

	MyString env_errors;
	if( !Starter->GetJobEnv(JobAd,&job_env,&env_errors) ) {
		dprintf( D_ALWAYS, "Aborting OSProc::StartJob: %s\n",
				 env_errors.Value());
		return 0;
	}


		// // // // // // 
		// Standard Files
		// // // // // // 

	// handle stdin, stdout, and stderr redirection
	int fds[3];
		// initialize these to -2 to mean they're not specified.
		// -1 will be treated as an error.
	fds[0] = -2; fds[1] = -2; fds[2] = -2;

		// in order to open these files we must have the user's privs:
	priv_state priv;
	priv = set_user_priv();

		// if we're in PrivSep mode, we won't necessarily be able to
		// open the files for the job. getStdFile will return us an
		// open FD in some situations, but otherwise will give us
		// a filename that we'll pass to the PrivSep Switchboard
		//
	bool stdin_ok;
	bool stdout_ok;
	bool stderr_ok;
	MyString privsep_stdin_name;
	MyString privsep_stdout_name;
	MyString privsep_stderr_name;
	if (privsep_helper != NULL) {
		stdin_ok = getStdFile(SFT_IN,
		                      NULL,
		                      true,
		                      "Input file",
		                      &fds[0],
		                      &privsep_stdin_name);
		stdout_ok = getStdFile(SFT_OUT,
		                       NULL,
		                       true,
		                       "Output file",
		                       &fds[1],
		                       &privsep_stdout_name);
		stderr_ok = getStdFile(SFT_ERR,
		                       NULL,
		                       true,
		                       "Error file",
		                       &fds[2],
		                       &privsep_stderr_name);
	}
	else {
		fds[0] = openStdFile( SFT_IN,
		                      NULL,
		                      true,
		                      "Input file");
		stdin_ok = (fds[0] != -1);
		fds[1] = openStdFile( SFT_OUT,
		                      NULL,
		                      true,
		                      "Output file");
		stdout_ok = (fds[1] != -1);
		fds[2] = openStdFile( SFT_ERR,
		                      NULL,
		                      true,
		                      "Error file");
		stderr_ok = (fds[2] != -1);
	}

	/* Bail out if we couldn't open the std files correctly */
	if( !stdin_ok || !stdout_ok || !stderr_ok ) {
		/* only close ones that had been opened correctly */
		for ( int i = 0; i <= 2; i++ ) {
			if ( fds[i] >= 0 ) {
				daemonCore->Close_FD ( fds[i] );
			}
		}
		dprintf(D_ALWAYS, "Failed to open some/all of the std files...\n");
		dprintf(D_ALWAYS, "Aborting OsProc::StartJob.\n");
		set_priv(priv); /* go back to original priv state before leaving */
		return 0;
	}

		// // // // // // 
		// Misc + Exec
		// // // // // // 

	if( !ThisProcRunsAlongsideMainProc() ) {
		Starter->jic->notifyJobPreSpawn();
	}

	// compute job's renice value by evaluating the machine's
	// JOB_RENICE_INCREMENT in the context of the job ad...

    char* ptmp = param( "JOB_RENICE_INCREMENT" );
	if( ptmp ) {
			// insert renice expr into our copy of the job ad
		MyString reniceAttr = "Renice = ";
		reniceAttr += ptmp;
		if( !JobAd->Insert( reniceAttr.Value() ) ) {
			dprintf( D_ALWAYS, "ERROR: failed to insert JOB_RENICE_INCREMENT "
				"into job ad, Aborting OsProc::StartJob...\n" );
			free( ptmp );
			return 0;
		}
			// evaluate
		if( JobAd->EvalInteger( "Renice", NULL, nice_inc ) ) {
			dprintf( D_ALWAYS, "Renice expr \"%s\" evaluated to %d\n",
					 ptmp, nice_inc );
		} else {
			dprintf( D_ALWAYS, "WARNING: job renice expr (\"%s\") doesn't "
					 "eval to int!  Using default of 10...\n", ptmp );
			nice_inc = 10;
		}

			// enforce valid ranges for nice_inc
		if( nice_inc < 0 ) {
			dprintf( D_FULLDEBUG, "WARNING: job renice value (%d) is too "
					 "low: adjusted to 0\n", nice_inc );
			nice_inc = 0;
		}
		else if( nice_inc > 19 ) {
			dprintf( D_FULLDEBUG, "WARNING: job renice value (%d) is too "
					 "high: adjusted to 19\n", nice_inc );
			nice_inc = 19;
		}

		ASSERT( ptmp );
		free( ptmp );
		ptmp = NULL;
	} else {
			// if JOB_RENICE_INCREMENT is undefined, default to 10
		nice_inc = 10;
	}

		// in the below dprintfs, we want to skip past argv[0], which
		// is sometimes condor_exec, in the Args string. 

	MyString args_string;
	args.GetArgsStringForDisplay(&args_string, 1);
	if( has_wrapper ) { 
			// print out exactly what we're doing so folks can debug
			// it, if they need to.
		dprintf( D_ALWAYS, "Using wrapper %s to exec %s\n", JobName.Value(), 
				 args_string.Value() );

		MyString wrapper_err;
		wrapper_err.sprintf("%s%c%s", Starter->GetWorkingDir(),
				 	DIR_DELIM_CHAR,
					JOB_WRAPPER_FAILURE_FILE);
		if( ! job_env.SetEnv("_CONDOR_WRAPPER_ERROR_FILE", wrapper_err.Value()) ) {
			dprintf( D_ALWAYS, "Failed to set _CONDOR_WRAPPER_ERROR_FILE environment variable\n");
		}
	} else {
		dprintf( D_ALWAYS, "About to exec %s %s\n", JobName.Value(),
				 args_string.Value() );
	}

	MyString path;
	path.sprintf("%s%c%s", Starter->GetWorkingDir(),
			 	DIR_DELIM_CHAR,
				MACHINE_AD_FILENAME);
	if( ! job_env.SetEnv("_CONDOR_MACHINE_AD", path.Value()) ) {
		dprintf( D_ALWAYS, "Failed to set _CONDOR_MACHINE_AD environment variable\n");
	}

	path.sprintf("%s%c%s", Starter->GetWorkingDir(),
			 	DIR_DELIM_CHAR,
				JOB_AD_FILENAME);
	if( ! job_env.SetEnv("_CONDOR_JOB_AD", path.Value()) ) {
		dprintf( D_ALWAYS, "Failed to set _CONDOR_JOB_AD environment variable\n");
	}

		// Grab the full environment back out of the Env object 
	if(IsFulldebug(D_FULLDEBUG)) {
		MyString env_string;
		job_env.getDelimitedStringForDisplay(&env_string);
		dprintf(D_FULLDEBUG, "Env = %s\n", env_string.Value());
	}

	// Check to see if we need to start this process paused, and if
	// so, pass the right flag to DC::Create_Process().
	int job_opt_mask = DCJOBOPT_NO_CONDOR_ENV_INHERIT;
	if (!param_boolean("JOB_INHERITS_STARTER_ENVIRONMENT",false)) {
		job_opt_mask |= DCJOBOPT_NO_ENV_INHERIT;
	}
	int suspend_job_at_exec = 0;
	JobAd->LookupBool( ATTR_SUSPEND_JOB_AT_EXEC, suspend_job_at_exec);
	if( suspend_job_at_exec ) {
		dprintf( D_FULLDEBUG, "OsProc::StartJob(): "
				 "Job wants to be suspended at exec\n" );
		job_opt_mask |= DCJOBOPT_SUSPEND_ON_EXEC;
	}

	// If there is a requested coresize for this job, enforce it.
	// It is truncated because you can't put an unsigned integer
	// into a classad. I could rewrite condor's use of ATTR_CORE_SIZE to
	// be a float, but then when that attribute is read/written to the
	// job queue log by/or shared between versions of Condor which view the
	// type of that attribute differently, calamity would arise.
	int core_size_truncated;
	size_t core_size;
	size_t *core_size_ptr = NULL;
	if ( JobAd->LookupInteger( ATTR_CORE_SIZE, core_size_truncated ) ) {
		core_size = (size_t)core_size_truncated;
		core_size_ptr = &core_size;
	}

	long rlimit_as_hard_limit = 0;
	char *rlimit_expr = param("STARTER_RLIMIT_AS");
	if (rlimit_expr) {
		classad::ClassAdParser parser;

		classad::ExprTree *tree = parser.ParseExpression(rlimit_expr);
		if (tree) {
			classad::Value val;
			int result;

			if (EvalExprTree(tree, Starter->jic->machClassAd(), JobAd, val) && 
				val.IsIntegerValue(result)) {
					rlimit_as_hard_limit = ((long)result) * 1024 * 1024;
					dprintf(D_ALWAYS, "Setting job's virtual memory rlimit to %ld megabytes\n", rlimit_as_hard_limit);
			} else {
				dprintf(D_ALWAYS, "Can't evaluate STARTER_RLIMIT_AS expression %s\n", rlimit_expr);
			}
		} else {
			dprintf(D_ALWAYS, "Can't parse STARTER_RLIMIT_AS expression: %s\n", rlimit_expr);
		}
	}

	int *affinity_mask = makeCpuAffinityMask(Starter->getMySlotNumber());

#if defined ( WIN32 )
    owner_profile_.update ();
    /*************************************************************
    NOTE: We currently *ONLY* support loading slot-user profiles.
    This limitation will be addressed shortly, by allowing regular 
    users to load their registry hive - Ben [2008-09-31]
    **************************************************************/
    bool load_profile = false,
         run_as_owner = false;
    JobAd->LookupBool ( ATTR_JOB_LOAD_PROFILE, load_profile );
    JobAd->LookupBool ( ATTR_JOB_RUNAS_OWNER,  run_as_owner );
    if ( load_profile && !run_as_owner ) {
        if ( owner_profile_.load () ) {
            /* publish the users environment into that of the main 

            job's environment */
            if ( !owner_profile_.environment ( job_env ) ) {
                dprintf ( D_ALWAYS, "OsProc::StartJob(): Failed to "
                    "export owner's environment.\n" );
            }            
        } else {
            dprintf ( D_ALWAYS, "OsProc::StartJob(): Failed to load "
                "owner's profile.\n" );
        }
    }
#endif

		// While we are still in user priv, print out the username
#if defined(LINUX)
	if( Starter->glexecPrivSepHelper() ) {
			// TODO: if there is some way to figure out the final username,
			// print it out here or after starting the job.
		dprintf(D_ALWAYS,"Running job via glexec\n");
	}
#else
	if( false ) {
	}
#endif
	else {
		char const *username = NULL;
		char const *how = "";
		CondorPrivSepHelper* cpsh = Starter->condorPrivSepHelper();
		if( cpsh ) {
			username = cpsh->get_user_name();
			how = "via privsep switchboard ";
		}
		else {
			username = get_real_username();
		}
		if( !username ) {
			username = "******";
		}
		dprintf(D_ALWAYS,"Running job %sas user %s\n",how,username);
	}

	set_priv ( priv );

    // use this to return more detailed and reliable error message info
    // from create-process operation.
    MyString create_process_err_msg;

	if (privsep_helper != NULL) {
		const char* std_file_names[3] = {
			privsep_stdin_name.Value(),
			privsep_stdout_name.Value(),
			privsep_stderr_name.Value()
		};
		JobPid = privsep_helper->create_process(JobName.Value(),
		                                        args,
		                                        job_env,
		                                        job_iwd,
		                                        fds,
		                                        std_file_names,
		                                        nice_inc,
		                                        core_size_ptr,
		                                        1,
		                                        job_opt_mask,
		                                        family_info,
												affinity_mask,
												&create_process_err_msg);
	}
	else {
		JobPid = daemonCore->Create_Process( JobName.Value(),
		                                     args,
		                                     PRIV_USER_FINAL,
		                                     1,
		                                     FALSE,
		                                     &job_env,
		                                     job_iwd,
		                                     family_info,
		                                     NULL,
		                                     fds,
		                                     NULL,
		                                     nice_inc,
		                                     NULL,
		                                     job_opt_mask, 
		                                     core_size_ptr,
                                             affinity_mask,
											 NULL,
                                             &create_process_err_msg,
					     fs_remap,
					     rlimit_as_hard_limit,
                                             network_manager);
	}

	// Create_Process() saves the errno for us if it is an "interesting" error.
	int create_process_errno = errno;

    // errno is 0 in the privsep case.  This executes for the daemon core create-process logic
    if ((FALSE == JobPid) && (0 != create_process_errno)) {
        if (create_process_err_msg != "") create_process_err_msg += " ";
        MyString errbuf;
        errbuf.sprintf("(errno=%d: '%s')", create_process_errno, strerror(create_process_errno));
        create_process_err_msg += errbuf;
    }

	// now close the descriptors in fds array.  our child has inherited
	// them already, so we should close them so we do not leak descriptors.
	// NOTE, we want to use a special method to close the starter's
	// versions, if that's what we're using, so we don't think we've
	// still got those available in other parts of the code for any
	// reason.
	for ( int i = 0; i <= 2; i++ ) {
		if ( fds[i] >= 0 ) {
			daemonCore->Close_FD ( fds[i] );
		}
	}

	if ( JobPid == FALSE ) {
		JobPid = -1;

		if(!create_process_err_msg.IsEmpty()) {

			// if the reason Create_Process failed was that registering
			// a family with the ProcD failed, it is indicative of a
			// problem regarding this execute machine, not the job. in
			// this case, we'll want to EXCEPT instead of telling the
			// Shadow to put the job on hold. there are probably other
			// error conditions where EXCEPTing would be more appropriate
			// as well...
			//
			if (create_process_errno == DaemonCore::ERRNO_REGISTRATION_FAILED) {
				EXCEPT("Create_Process failed to register the job with the ProcD");
			}

			MyString err_msg = "Failed to execute '";
			err_msg += JobName;
			err_msg += "'";
			if(!args_string.IsEmpty()) {
				err_msg += " with arguments ";
				err_msg += args_string.Value();
			}
			err_msg += ": ";
			err_msg += create_process_err_msg;
			if( !ThisProcRunsAlongsideMainProc() ) {
				Starter->jic->notifyStarterError( err_msg.Value(),
			    	                              true,
			        	                          CONDOR_HOLD_CODE_FailedToCreateProcess,
			            	                      create_process_errno );
			}
		}

		dprintf(D_ALWAYS,"Create_Process(%s,%s, ...) failed: %s\n",
			JobName.Value(), args_string.Value(), create_process_err_msg.Value());
		return 0;
	}

	num_pids++;

	dprintf(D_ALWAYS,"Create_Process succeeded, pid=%d\n",JobPid);

	job_start_time.getTime();

	return 1;
}
コード例 #10
0
ファイル: io_part.cpp プロジェクト: CS-svnmirror/farmanager
//---------------------------------------------------------------------------
MyString MYRTLEXP AddLastSlash( MyString& path, char Slash )
  {
    if ( path.Length() && path[ path.Length()-1 ] != Slash )
      path.Add( Slash );
 return path;
}
コード例 #11
0
int
main( int argc, char* argv[] )
{
	int		i;
	param_functions *p_funcs = NULL;
	
	set_mySubSystem( "DAEMON-TOOL", SUBSYSTEM_TYPE_TOOL );

	MyName = argv[0];
	myDistro->Init( argc, argv );

	FILE *input_fp = stdin;

	for( i=1; i<argc; i++ ) {
		if( match_prefix( argv[i], "-daemontype" ) ) {
			if( argv[i + 1] ) {
				get_mySubSystem()->setName( argv[++i] );
				get_mySubSystem()->setTypeFromName( );
			} else {
				usage();
			}
		} else if( match_prefix( argv[i], "-debug" ) ) {
				// dprintf to console
			Termlog = 1;
			p_funcs = get_param_functions();
			dprintf_config( "DAEMON-TOOL", p_funcs );
			set_debug_flags(NULL, D_FULLDEBUG|D_SECURITY);
		} else if( match_prefix( argv[i], "-" ) ) {
			usage();
		} else {
			usage();
		}
	}

	// If we didn't get told what subsystem we should use, set it
	// to "TOOL".

	if( !get_mySubSystem()->isNameValid() ) {
		get_mySubSystem()->setName( "DAEMON-TOOL" );
	}

	config( 0, true );

	IpVerify ipverify;

	MyString line;
	while( line.readLine(input_fp) ) {
		line.chomp();
		if( line.IsEmpty() || line[0] == '#' ) {
			printf("%s\n",line.Value());
			continue;
		}

		StringList fields(line.Value()," ");
		fields.rewind();

		char const *perm_str = fields.next();
		char const *fqu = fields.next();
		char const *ip = fields.next();
		char const *expected = fields.next();

		MyString sin_str = generate_sinful(ip, 0);

		condor_sockaddr addr;
		if( !addr.from_sinful(sin_str) ) {
			fprintf(stderr,"Invalid ip address: %s\n",ip);
			exit(1);
		}

		DCpermission perm = StringToDCpermission(perm_str);
		if( perm == LAST_PERM ) {
			fprintf(stderr,"Invalid permission level: %s\n",perm_str);
			exit(1);
		}

		if( strcmp(fqu,"*") == 0 ) {
			fqu = "";
		}

		char const *result;
		MyString reason;
		if( ipverify.Verify(perm,addr,fqu,&reason,&reason) != USER_AUTH_SUCCESS ) {
			result = "DENIED";
		}
		else {
			result = "ALLOWED";
		}

		if( expected && strcasecmp(expected,result) != 0 ) {
			printf("Got wrong result '%s' for '%s': reason: %s!\n",
				   result,line.Value(),reason.Value());
			printf("Aborting.\n");
			exit(1);
		}
		if( expected ) {
			printf("%s\n",line.Value());
		}
		else {
			printf("%s %s\n",line.Value(),result);
		}
	}
}
コード例 #12
0
ファイル: docker-api.cpp プロジェクト: dcbradley/htcondor
int
run_simple_docker_command(const std::string &command, const std::string &container, int timeout, CondorError &, bool ignore_output)
{
  ArgList args;
  if ( ! add_docker_arg(args))
    return -1;
  args.AppendArg( command );
  args.AppendArg( container.c_str() );

  MyString displayString;
  args.GetArgsStringForLogging( & displayString );
  dprintf( D_FULLDEBUG, "Attempting to run: %s\n", displayString.c_str() );

#if 1
	MyPopenTimer pgm;
	if (pgm.start_program( args, true, NULL, false ) < 0) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
		return -2;
	}

	if ( ! pgm.wait_and_close(timeout) || pgm.output_size() <= 0) {
		int error = pgm.error_code();
		if( error ) {
			dprintf( D_ALWAYS | D_FAILURE, "Failed to read results from '%s': '%s' (%d)\n", displayString.c_str(), pgm.error_str(), error );
			if (pgm.was_timeout()) {
				dprintf( D_ALWAYS | D_FAILURE, "Declaring a hung docker\n");
				return DockerAPI::docker_hung;
			}
		} else {
			dprintf( D_ALWAYS | D_FAILURE, "'%s' returned nothing.\n", displayString.c_str() );
		}
		return -3;
	}

	// On a success, Docker writes the containerID back out.
	MyString line;
	line.readLine(pgm.output());
	line.chomp(); line.trim();
	if (!ignore_output && line != container.c_str()) {
		// Didn't get back the result I expected, report the error and check to see if docker is hung.
		dprintf( D_ALWAYS | D_FAILURE, "Docker %s failed, printing first few lines of output.\n", command.c_str());
		for (int ii = 0; ii < 10; ++ii) {
			if ( ! line.readLine(pgm.output(), false)) break;
			dprintf( D_ALWAYS | D_FAILURE, "%s\n", line.c_str() );
		}
		return -4;
	}

#else
  // Read from Docker's combined output and error streams.
  FILE * dockerResults = my_popen( args, "r", 1 , 0, false);
  if( dockerResults == NULL ) {
    dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
    return -2;
  }

  // On a success, Docker writes the containerID back out.
  char buffer[1024];
  if( NULL == fgets( buffer, 1024, dockerResults ) ) {
    if( errno ) {
      dprintf( D_ALWAYS | D_FAILURE, "Failed to read results from '%s': '%s' (%d)\n", displayString.c_str(), strerror( errno ), errno );
    } else {
      dprintf( D_ALWAYS | D_FAILURE, "'%s' returned nothing.\n", displayString.c_str() );
    }
    my_pclose( dockerResults );
    return -3;
  }

  size_t length = strlen( buffer );
  if (!ignore_output) {
    if( length < 1 || strncmp( buffer, container.c_str(), length - 1 ) != 0 ) {
      dprintf( D_ALWAYS | D_FAILURE, "Docker %s failed, printing first few lines of output.\n", command.c_str() );
      dprintf( D_ALWAYS | D_FAILURE, "%s", buffer );
      while( NULL != fgets( buffer, 1024, dockerResults ) ) {
	dprintf( D_ALWAYS | D_FAILURE, "%s", buffer );
      }
      my_pclose( dockerResults );
      return -4;
    }
  }

  my_pclose( dockerResults );
#endif
  return 0;
}
コード例 #13
0
ファイル: docker-api.cpp プロジェクト: dcbradley/htcondor
int DockerAPI::inspect( const std::string & containerID, ClassAd * dockerAd, CondorError & /* err */ ) {
	if( dockerAd == NULL ) {
		dprintf( D_ALWAYS | D_FAILURE, "dockerAd is NULL.\n" );
		return -2;
	}

	ArgList inspectArgs;
	if ( ! add_docker_arg(inspectArgs))
		return -1;
	inspectArgs.AppendArg( "inspect" );
	inspectArgs.AppendArg( "--format" );
	StringList formatElements(	"ContainerId=\"{{.Id}}\" "
								"Pid={{.State.Pid}} "
								"Name=\"{{.Name}}\" "
								"Running={{.State.Running}} "
								"ExitCode={{.State.ExitCode}} "
								"StartedAt=\"{{.State.StartedAt}}\" "
								"FinishedAt=\"{{.State.FinishedAt}}\" "
								"DockerError=\"{{.State.Error}}\" "
								"OOMKilled=\"{{.State.OOMKilled}}\" " );
	char * formatArg = formatElements.print_to_delimed_string( "\n" );
	inspectArgs.AppendArg( formatArg );
	free( formatArg );
	inspectArgs.AppendArg( containerID );

	MyString displayString;
	inspectArgs.GetArgsStringForLogging( & displayString );
	dprintf( D_FULLDEBUG, "Attempting to run: %s\n", displayString.c_str() );

#if 1
	MyPopenTimer pgm;
	if (pgm.start_program(inspectArgs, true, NULL, false) < 0) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
		return -6;
	}

	MyStringSource * src = NULL;
	if (pgm.wait_and_close(default_timeout)) {
		src = &pgm.output();
	}

	int expected_rows = formatElements.number();
	dprintf( D_FULLDEBUG, "exit_status=%d, error=%d, %d bytes. expecting %d lines\n",
		pgm.exit_status(), pgm.error_code(), pgm.output_size(), expected_rows );

	// If the output isn't exactly formatElements.number() lines long,
	// something has gone wrong and we'll at least be able to print out
	// the error message(s).
	std::vector<std::string> correctOutput(expected_rows);
	if (src) {
		MyString line;
		int i=0;
		while (line.readLine(*src,false)) {
			line.chomp();
			//dprintf( D_FULLDEBUG, "\t[%2d] %s\n", i, line.c_str() );
			if (i >= expected_rows) {
				if (line.empty()) continue;
				correctOutput.push_back(line.c_str());
			} else {
				correctOutput[i] = line.c_str();
			}
			std::string::iterator first = 
				std::find(correctOutput[i].begin(),
					correctOutput[i].end(),
					'\"');
			if (first != correctOutput[i].end()) {
				std::replace(++first,
					--correctOutput[i].end(), '\"','\'');
			}
			//dprintf( D_FULLDEBUG, "\tfix: %s\n", correctOutput[i].c_str() );
			++i;
		}
	}
#else
	FILE * dockerResults = my_popen( inspectArgs, "r", 1 , 0, false);
	if( dockerResults == NULL ) {
		dprintf( D_ALWAYS | D_FAILURE, "Unable to run '%s'.\n", displayString.c_str() );
		return -6;
	}

	// If the output isn't exactly formatElements.number() lines long,
	// something has gone wrong and we'll at least be able to print out
	// the error message(s).
	char buffer[1024];
	std::vector<std::string> correctOutput(formatElements.number());
	for( int i = 0; i < formatElements.number(); ++i ) {
		if( fgets( buffer, 1024, dockerResults ) != NULL ) {
			correctOutput[i] = buffer;
			std::string::iterator first = 
				std::find(correctOutput[i].begin(),
					correctOutput[i].end(),
					'\"');
			if (first != correctOutput[i].end()) {
				std::replace(++first,
					-- --correctOutput[i].end(), '\"','\'');
			}
		}
	}
	my_pclose( dockerResults );
#endif

	int attrCount = 0;
	for( int i = 0; i < formatElements.number(); ++i ) {
		if( correctOutput[i].empty() || dockerAd->Insert( correctOutput[i].c_str() ) == FALSE ) {
			break;
		}
		++attrCount;
	}

	if( attrCount != formatElements.number() ) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to create classad from Docker output (%d).  Printing up to the first %d (nonblank) lines.\n", attrCount, formatElements.number() );
		for( int i = 0; i < formatElements.number() && ! correctOutput[i].empty(); ++i ) {
			dprintf( D_ALWAYS | D_FAILURE, "%s", correctOutput[i].c_str() );
		}
		return -4;
	}

	dprintf( D_FULLDEBUG, "docker inspect printed:\n" );
	for( int i = 0; i < formatElements.number() && ! correctOutput[i].empty(); ++i ) {
		dprintf( D_FULLDEBUG, "\t%s\n", correctOutput[i].c_str() );
	}
	return 0;
}
コード例 #14
0
ファイル: docker-api.cpp プロジェクト: dcbradley/htcondor
//
// FIXME: We have a lot of boilerplate code in this function and file.
//
int DockerAPI::version( std::string & version, CondorError & /* err */ ) {

	ArgList versionArgs;
	if ( ! add_docker_arg(versionArgs))
		return -1;
	versionArgs.AppendArg( "-v" );

	MyString displayString;
	versionArgs.GetArgsStringForLogging( & displayString );
	dprintf( D_FULLDEBUG, "Attempting to run: '%s'.\n", displayString.c_str() );

#if 1
	MyPopenTimer pgm;
	if (pgm.start_program(versionArgs, true, NULL, false) < 0) {
		// treat 'file not found' as not really error
		int d_level = (pgm.error_code() == ENOENT) ? D_FULLDEBUG : (D_ALWAYS | D_FAILURE);
		dprintf(d_level, "Failed to run '%s' errno=%d %s.\n", displayString.c_str(), pgm.error_code(), pgm.error_str() );
		return -2;
	}

	int exitCode;
	if ( ! pgm.wait_for_exit(default_timeout, &exitCode)) {
		pgm.close_program(1);
		dprintf( D_ALWAYS | D_FAILURE, "Failed to read results from '%s': '%s' (%d)\n", displayString.c_str(), pgm.error_str(), pgm.error_code() );
		return -3;
	}

	if (pgm.output_size() <= 0) {
		dprintf( D_ALWAYS | D_FAILURE, "'%s' returned nothing.\n", displayString.c_str() );
		return -3;
	}

	MyStringSource * src = &pgm.output();
	MyString line;
	if (line.readLine(*src, false)) {
		line.chomp();
		bool jansens = strstr( line.c_str(), "Jansens" ) != NULL;
		bool bad_size = ! src->isEof() || line.size() > 1024 || line.size() < (int)sizeof("Docker version ");
		if (bad_size && ! jansens) {
			// check second line of output for the word Jansens also.
			MyString tmp; tmp.readLine(*src, false);
			jansens = strstr( tmp.c_str(), "Jansens" ) != NULL;
		}
		if (jansens) {
			dprintf( D_ALWAYS | D_FAILURE, "The DOCKER configuration setting appears to point to OpenBox's docker.  If you want to use Docker.IO, please set DOCKER appropriately in your configuration.\n" );
			return -5;
		} else if (bad_size) {
			dprintf( D_ALWAYS | D_FAILURE, "Read more than one line (or a very long line) from '%s', which we think means it's not Docker.  The (first line of the) trailing text was '%s'.\n", displayString.c_str(), line.c_str() );
			return -5;
		}
	}

	if( exitCode != 0 ) {
		dprintf( D_ALWAYS, "'%s' did not exit successfully (code %d); the first line of output was '%s'.\n", displayString.c_str(), exitCode, line.c_str() );
		return -4;
	}

	version = line.c_str();

#else
	FILE * dockerResults = my_popen( versionArgs, "r", 1 , 0, false);
	if( dockerResults == NULL ) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to run '%s'.\n", displayString.c_str() );
		return -2;
	}

	char buffer[1024];
	if( NULL == fgets( buffer, 1024, dockerResults ) ) {
		if( errno ) {
			dprintf( D_ALWAYS | D_FAILURE, "Failed to read results from '%s': '%s' (%d)\n", displayString.c_str(), strerror( errno ), errno );
		} else {
			dprintf( D_ALWAYS | D_FAILURE, "'%s' returned nothing.\n", displayString.c_str() );
		}
		my_pclose( dockerResults );
		return -3;
	}

	if( NULL != fgets( buffer, 1024, dockerResults ) ) {
		if( strstr( buffer, "Jansens" ) != NULL ) {
			dprintf( D_ALWAYS | D_FAILURE, "The DOCKER configuration setting appears to point to OpenBox's docker.  If you want to use Docker.IO, please set DOCKER appropriately in your configuration.\n" );
		} else {
			dprintf( D_ALWAYS | D_FAILURE, "Read more than one line (or a very long line) from '%s', which we think means it's not Docker.  The (first line of the) trailing text was '%s'.\n", displayString.c_str(), buffer );
		}
		my_pclose( dockerResults );
		return -5;
	}

	int exitCode = my_pclose( dockerResults );
	if( exitCode != 0 ) {
		dprintf( D_ALWAYS, "'%s' did not exit successfully (code %d); the first line of output was '%s'.\n", displayString.c_str(), exitCode, buffer );
		return -4;
	}

	size_t end = strlen(buffer);
	if (end > 0 && buffer[end-1] == '\n') { buffer[end-1] = '\0'; }
	version = buffer;
#endif
	sscanf(version.c_str(), "Docker version %d.%d", &DockerAPI::majorVersion, &DockerAPI::minorVersion);
	return 0;
}
コード例 #15
0
ファイル: dagman_main.cpp プロジェクト: AlanDeSmet/htcondor
//---------------------------------------------------------------------------
void main_init (int argc, char ** const argv) {

	printf ("Executing condor dagman ... \n");

		// flag used if DAGMan is invoked with -WaitForDebug so we
		// wait for a developer to attach with a debugger...
	volatile int wait_for_debug = 0;

		// process any config vars -- this happens before we process
		// argv[], since arguments should override config settings
	dagman.Config();

	// The DCpermission (last parm) should probably be PARENT, if it existed
    daemonCore->Register_Signal( SIGUSR1, "SIGUSR1",
                                 (SignalHandler) main_shutdown_remove,
                                 "main_shutdown_remove", NULL);

/****** FOR TESTING *******
    daemonCore->Register_Signal( SIGUSR2, "SIGUSR2",
                                 (SignalHandler) main_testing_stub,
                                 "main_testing_stub", NULL);
****** FOR TESTING ********/
    debug_progname = condor_basename(argv[0]);

		// condor_submit_dag version from .condor.sub
	bool allowVerMismatch = false;
	const char *csdVersion = "undefined";

	int i;
    for (i = 0 ; i < argc ; i++) {
        debug_printf( DEBUG_NORMAL, "argv[%d] == \"%s\"\n", i, argv[i] );
    }

    if (argc < 2) Usage();  //  Make sure an input file was specified

		// get dagman job id from environment, if it's there
		// (otherwise it will be set to "-1.-1.-1")
	dagman.DAGManJobId.SetFromString( getenv( EnvGetName( ENV_ID ) ) );

	dagman._dagmanClassad = new DagmanClassad( dagman.DAGManJobId );

	//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
		// Minimum legal version for a .condor.sub file to be compatible
		// with this condor_dagman binary.

		// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
		// Be sure to change this if the arguments or environment
		// passed to condor_dagman change in an incompatible way!!
		// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

	struct DagVersionData {
		int majorVer;
		int minorVer;
		int subMinorVer;
	};
	const DagVersionData MIN_SUBMIT_FILE_VERSION = { 7, 1, 2 };

		// Construct a string of the minimum submit file version.
	MyString minSubmitVersionStr;
	minSubmitVersionStr.formatstr( "%d.%d.%d",
				MIN_SUBMIT_FILE_VERSION.majorVer,
				MIN_SUBMIT_FILE_VERSION.minorVer,
				MIN_SUBMIT_FILE_VERSION.subMinorVer );
	//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    //
    // Process command-line arguments
    //
    for (i = 1; i < argc; i++) {
        if( !strcasecmp( "-Debug", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No debug level specified\n" );
                Usage();
            }
            debug_level = (debug_level_t) atoi (argv[i]);
        } else if( !strcasecmp( "-Lockfile", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No DagMan lockfile specified\n" );
                Usage();
            }
            lockFileName = argv[i];
        } else if( !strcasecmp( "-Help", argv[i] ) ) {
            Usage();
        } else if (!strcasecmp( "-Dag", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No DAG specified\n" );
                Usage();
            }
			dagman.dagFiles.append( argv[i] );
        } else if( !strcasecmp( "-MaxIdle", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT,
							  "Integer missing after -MaxIdle\n" );
                Usage();
            }
            dagman.maxIdle = atoi( argv[i] );
        } else if( !strcasecmp( "-MaxJobs", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT,
							  "Integer missing after -MaxJobs\n" );
                Usage();
            }
            dagman.maxJobs = atoi( argv[i] );
        } else if( !strcasecmp( "-MaxScripts", argv[i] ) ) {
			debug_printf( DEBUG_SILENT, "-MaxScripts has been replaced with "
						   "-MaxPre and -MaxPost arguments\n" );
			Usage();
        } else if( !strcasecmp( "-MaxPre", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT,
							  "Integer missing after -MaxPre\n" );
                Usage();
            }
            dagman.maxPreScripts = atoi( argv[i] );
        } else if( !strcasecmp( "-MaxPost", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT,
							  "Integer missing after -MaxPost\n" );
                Usage();
            }
            dagman.maxPostScripts = atoi( argv[i] );
        } else if( !strcasecmp( "-NoEventChecks", argv[i] ) ) {
			debug_printf( DEBUG_QUIET, "Warning: -NoEventChecks is "
						"ignored; please use the DAGMAN_ALLOW_EVENTS "
						"config parameter instead\n");
			check_warning_strictness( DAG_STRICT_2 );

        } else if( !strcasecmp( "-AllowLogError", argv[i] ) ) {
			dagman.allowLogError = true;

        } else if( !strcasecmp( "-DontAlwaysRunPost",argv[i] ) ) {
			dagman._runPost = false;

        } else if( !strcasecmp( "-WaitForDebug", argv[i] ) ) {
			wait_for_debug = 1;

        } else if( !strcasecmp( "-UseDagDir", argv[i] ) ) {
			dagman.useDagDir = true;

        } else if( !strcasecmp( "-AutoRescue", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No AutoRescue value specified\n" );
                Usage();
            }
            dagman.autoRescue = (atoi( argv[i] ) != 0);

        } else if( !strcasecmp( "-DoRescueFrom", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No rescue DAG number specified\n" );
                Usage();
            }
            dagman.doRescueFrom = atoi (argv[i]);

        } else if( !strcasecmp( "-CsdVersion", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No CsdVersion value specified\n" );
                Usage();
            }
			csdVersion = argv[i];

        } else if( !strcasecmp( "-AllowVersionMismatch", argv[i] ) ) {
			allowVerMismatch = true;

        } else if( !strcasecmp( "-DumpRescue", argv[i] ) ) {
			dagman.dumpRescueDag = true;

        } else if( !strcasecmp( "-verbose", argv[i] ) ) {
			dagman._submitDagDeepOpts.bVerbose = true;

        } else if( !strcasecmp( "-force", argv[i] ) ) {
			dagman._submitDagDeepOpts.bForce = true;
		
        } else if( !strcasecmp( "-notification", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No notification value specified\n" );
                Usage();
            }
			dagman._submitDagDeepOpts.strNotification = argv[i];

		} else if( !strcasecmp( "-suppress_notification",argv[i] ) ) {
			dagman._submitDagDeepOpts.suppress_notification = true;

		} else if( !strcasecmp( "-dont_suppress_notification",argv[i] ) ) {
			dagman._submitDagDeepOpts.suppress_notification = false;

        } else if( !strcasecmp( "-dagman", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No dagman value specified\n" );
                Usage();
            }
			dagman._submitDagDeepOpts.strDagmanPath = argv[i];

        } else if( !strcasecmp( "-outfile_dir", argv[i] ) ) {
            i++;
            if( argc <= i || strcmp( argv[i], "" ) == 0 ) {
                debug_printf( DEBUG_SILENT, "No outfile_dir value specified\n" );
                Usage();
            }
			dagman._submitDagDeepOpts.strOutfileDir = argv[i];

        } else if( !strcasecmp( "-update_submit", argv[i] ) ) {
			dagman._submitDagDeepOpts.updateSubmit = true;

        } else if( !strcasecmp( "-import_env", argv[i] ) ) {
			dagman._submitDagDeepOpts.importEnv = true;

        } else if( !strcasecmp( "-priority", argv[i] ) ) {
		++i;
		if( i >= argc || strcmp( argv[i], "" ) == 0 ) {
			debug_printf( DEBUG_NORMAL, "No priority value specified\n");
			Usage();
		}
		dagman._submitDagDeepOpts.priority = atoi(argv[i]);
		} else if( !strcasecmp( "-dont_use_default_node_log", argv[i] ) ) {
			dagman._submitDagDeepOpts.always_use_node_log = false;
        } else {
    		debug_printf( DEBUG_SILENT, "\nUnrecognized argument: %s\n",
						argv[i] );
			Usage();
		}
    }

	dagman.dagFiles.rewind();
	dagman.primaryDagFile = dagman.dagFiles.next();
	dagman.multiDags = (dagman.dagFiles.number() > 1);

	MyString tmpDefaultLog;
	if ( dagman._defaultNodeLog != NULL ) {
		tmpDefaultLog = dagman._defaultNodeLog;
		free( dagman._defaultNodeLog );
	} else {
		tmpDefaultLog = dagman.primaryDagFile + ".nodes.log";
	}

		// Force default log file path to be absolute so it works
		// with -usedagdir and DIR nodes.
	CondorError errstack;
	if ( !MultiLogFiles::makePathAbsolute( tmpDefaultLog, errstack) ) {
       	debug_printf( DEBUG_QUIET, "Unable to convert default log "
					"file name to absolute path: %s\n",
					errstack.getFullText().c_str() );
		dagman.dag->GetJobstateLog().WriteDagmanFinished( EXIT_ERROR );
		DC_Exit( EXIT_ERROR );
	}
	dagman._defaultNodeLog = strdup( tmpDefaultLog.Value() );
	debug_printf( DEBUG_NORMAL, "Default node log file is: <%s>\n",
				dagman._defaultNodeLog);

    //
    // Check the arguments
    //

	//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
	// Checking for version compatibility between the .condor.sub
	// file and this condor_dagman binary...

	// Note: if we're in recovery mode and the submit file version
	// causes us to quit, we leave any existing node jobs still
	// running -- may want to change that eventually.  wenger 2009-10-13.

		// Version of the condor_submit_dag that created our submit file.
	CondorVersionInfo submitFileVersion( csdVersion );

		// Version of this condor_dagman binary.
	CondorVersionInfo dagmanVersion;

		// Just generate this message fragment in one place.
	MyString versionMsg;
	versionMsg.formatstr("the version (%s) of this DAG's Condor submit "
				"file (created by condor_submit_dag)", csdVersion );

		// Make sure version in submit file is valid.
	if( !submitFileVersion.is_valid() ) {
		if ( !allowVerMismatch ) {
        	debug_printf( DEBUG_QUIET, "Error: %s is invalid!\n",
						versionMsg.Value() );
			DC_Exit( EXIT_ERROR );
		} else {
        	debug_printf( DEBUG_NORMAL, "Warning: %s is invalid; "
						"continuing because of -AllowVersionMismatch flag\n",
						versionMsg.Value() );
		}

		// Make sure .condor.sub file is recent enough.
	} else if ( submitFileVersion.compare_versions(
				CondorVersion() ) != 0 ) {

		if( !submitFileVersion.built_since_version(
					MIN_SUBMIT_FILE_VERSION.majorVer,
					MIN_SUBMIT_FILE_VERSION.minorVer,
					MIN_SUBMIT_FILE_VERSION.subMinorVer ) ) {
			if ( !allowVerMismatch ) {
        		debug_printf( DEBUG_QUIET, "Error: %s is older than "
							"oldest permissible version (%s)\n",
							versionMsg.Value(), minSubmitVersionStr.Value() );
				DC_Exit( EXIT_ERROR );
			} else {
        		debug_printf( DEBUG_NORMAL, "Warning: %s is older than "
							"oldest permissible version (%s); continuing "
							"because of -AllowVersionMismatch flag\n",
							versionMsg.Value(), minSubmitVersionStr.Value() );
			}

			// Warn if .condor.sub file is a newer version than this binary.
		} else if (dagmanVersion.compare_versions( csdVersion ) > 0 ) {
        	debug_printf( DEBUG_NORMAL, "Warning: %s is newer than "
						"condor_dagman version (%s)\n", versionMsg.Value(),
						CondorVersion() );
			check_warning_strictness( DAG_STRICT_3 );
		} else {
        	debug_printf( DEBUG_NORMAL, "Note: %s differs from "
						"condor_dagman version (%s), but the "
						"difference is permissible\n", 
						versionMsg.Value(), CondorVersion() );
		}
	}
	//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    if( dagman.primaryDagFile == "" ) {
        debug_printf( DEBUG_SILENT, "No DAG file was specified\n" );
        Usage();
    }
    if (lockFileName == NULL) {
        debug_printf( DEBUG_SILENT, "No DAG lock file was specified\n" );
        Usage();
    }
    if( dagman.maxJobs < 0 ) {
        debug_printf( DEBUG_SILENT, "-MaxJobs must be non-negative\n");
        Usage();
    }
    if( dagman.maxPreScripts < 0 ) {
        debug_printf( DEBUG_SILENT, "-MaxPre must be non-negative\n" );
        Usage();
    }
    if( dagman.maxPostScripts < 0 ) {
        debug_printf( DEBUG_SILENT, "-MaxPost must be non-negative\n" );
        Usage();
    }
    if( dagman.doRescueFrom < 0 ) {
        debug_printf( DEBUG_SILENT, "-DoRescueFrom must be non-negative\n" );
        Usage();
    }

    debug_printf( DEBUG_VERBOSE, "DAG Lockfile will be written to %s\n",
                   lockFileName );
	if ( dagman.dagFiles.number() == 1 ) {
    	debug_printf( DEBUG_VERBOSE, "DAG Input file is %s\n",
				  	dagman.primaryDagFile.Value() );
	} else {
		MyString msg = "DAG Input files are ";
		dagman.dagFiles.rewind();
		const char *dagFile;
		while ( (dagFile = dagman.dagFiles.next()) != NULL ) {
			msg += dagFile;
			msg += " ";
		}
		msg += "\n";
    	debug_printf( DEBUG_VERBOSE, "%s", msg.Value() );
	}

		// if requested, wait for someone to attach with a debugger...
	while( wait_for_debug ) { }

    {
		MyString cwd;
		if( !condor_getcwd(cwd) ) {
			cwd = "<null>";
		}
        debug_printf( DEBUG_DEBUG_1, "Current path is %s\n",cwd.Value());

		char *temp = my_username();
		debug_printf( DEBUG_DEBUG_1, "Current user is %s\n",
					   temp ? temp : "<null>" );
		if( temp ) {
			free( temp );
		}
    }

		//
		// Figure out the rescue DAG to run, if any (this is with "new-
		// style" rescue DAGs).
		//
	int rescueDagNum = 0;
	MyString rescueDagMsg;

	if ( dagman.doRescueFrom != 0 ) {
		rescueDagNum = dagman.doRescueFrom;
		rescueDagMsg.formatstr( "Rescue DAG number %d specified", rescueDagNum );
		RenameRescueDagsAfter( dagman.primaryDagFile.Value(),
					dagman.multiDags, rescueDagNum, dagman.maxRescueDagNum );

	} else if ( dagman.autoRescue ) {
		rescueDagNum = FindLastRescueDagNum(
					dagman.primaryDagFile.Value(),
					dagman.multiDags, dagman.maxRescueDagNum );
		rescueDagMsg.formatstr( "Found rescue DAG number %d", rescueDagNum );
	}

		//
		// Fill in values in the deep submit options that we haven't
		// already set.
		//
	dagman._submitDagDeepOpts.bAllowLogError = dagman.allowLogError;
	dagman._submitDagDeepOpts.useDagDir = dagman.useDagDir;
	dagman._submitDagDeepOpts.autoRescue = dagman.autoRescue;
	dagman._submitDagDeepOpts.doRescueFrom = dagman.doRescueFrom;
	dagman._submitDagDeepOpts.allowVerMismatch = allowVerMismatch;
	dagman._submitDagDeepOpts.recurse = false;

    //
    // Create the DAG
    //

	// Note: a bunch of the parameters we pass here duplicate things
	// in submitDagOpts, but I'm keeping them separate so we don't have to
	// bother to construct a new SubmitDagOtions object for splices.
	// wenger 2010-03-25
    dagman.dag = new Dag( dagman.dagFiles, dagman.maxJobs,
						  dagman.maxPreScripts, dagman.maxPostScripts,
						  dagman.allowLogError, dagman.useDagDir,
						  dagman.maxIdle, dagman.retrySubmitFirst,
						  dagman.retryNodeFirst, dagman.condorRmExe,
						  dagman.storkRmExe, &dagman.DAGManJobId,
						  dagman.prohibitMultiJobs, dagman.submitDepthFirst,
						  dagman._defaultNodeLog,
						  dagman._generateSubdagSubmits,
						  &dagman._submitDagDeepOpts,
						  false ); /* toplevel dag! */

    if( dagman.dag == NULL ) {
        EXCEPT( "ERROR: out of memory!\n");
    }

	dagman.dag->SetAbortOnScarySubmit( dagman.abortOnScarySubmit );
	dagman.dag->SetAllowEvents( dagman.allow_events );
	dagman.dag->SetConfigFile( dagman._dagmanConfigFile );
	dagman.dag->SetMaxJobHolds( dagman._maxJobHolds );
	dagman.dag->SetPostRun(dagman._runPost);
	if( dagman._submitDagDeepOpts.priority != 0 ) { // From command line
		dagman.dag->SetDefaultPriority(dagman._submitDagDeepOpts.priority);
	} else if( dagman._defaultPriority != 0 ) { // From config file
		dagman.dag->SetDefaultPriority(dagman._defaultPriority);
		dagman._submitDagDeepOpts.priority = dagman._defaultPriority;
	}

    //
    // Parse the input files.  The parse() routine
    // takes care of adding jobs and dependencies to the DagMan
    //
	dagman.mungeNodeNames = (dagman.dagFiles.number() > 1);
	parseSetDoNameMunge( dagman.mungeNodeNames );
   	debug_printf( DEBUG_VERBOSE, "Parsing %d dagfiles\n", 
		dagman.dagFiles.number() );
	dagman.dagFiles.rewind();
	char *dagFile;

	// Here we make a copy of the dagFiles for iteration purposes. Deep inside
	// of the parsing, copies of the dagman.dagFile string list happen which
	// mess up the iteration of this list.
	StringList sl( dagman.dagFiles );
	sl.rewind();
	while ( (dagFile = sl.next()) != NULL ) {
    	debug_printf( DEBUG_VERBOSE, "Parsing %s ...\n", dagFile );

    	if( !parse( dagman.dag, dagFile, dagman.useDagDir ) ) {
			if ( dagman.dumpRescueDag ) {
					// Dump the rescue DAG so we can see what we got
					// in the failed parse attempt.
    			debug_printf( DEBUG_QUIET, "Dumping rescue DAG "
							"because of -DumpRescue flag\n" );
				dagman.dag->Rescue( dagman.primaryDagFile.Value(),
							dagman.multiDags, dagman.maxRescueDagNum,
							false, true, false );
			}
			
			dagman.dag->RemoveRunningJobs(dagman, true);
			tolerant_unlink( lockFileName );
			dagman.CleanUp();
			
				// Note: debug_error calls DC_Exit().
        	debug_error( 1, DEBUG_QUIET, "Failed to parse %s\n",
					 	dagFile );
    	}
	}
	if( dagman.dag->GetDefaultPriority() != 0 ) {
		dagman.dag->SetDefaultPriorities(); // Applies to the nodes of the dag
	}
	dagman.dag->GetJobstateLog().WriteDagmanStarted( dagman.DAGManJobId );
	if ( rescueDagNum > 0 ) {
			// Get our Pegasus sequence numbers set correctly.
		dagman.dag->GetJobstateLog().InitializeRescue();
	}

	// lift the final set of splices into the main dag.
	dagman.dag->LiftSplices(SELF);

		//
		// Actually parse the "new-new" style (partial DAG info only)
		// rescue DAG here.  Note: this *must* be done after splices
		// are lifted!
		//
	if ( rescueDagNum > 0 ) {
		dagman.rescueFileToRun = RescueDagName(
					dagman.primaryDagFile.Value(),
					dagman.multiDags, rescueDagNum );
		debug_printf ( DEBUG_QUIET, "%s; running %s in combination with "
					"normal DAG file%s\n", rescueDagMsg.Value(),
					dagman.rescueFileToRun.Value(),
					dagman.multiDags ? "s" : "");
		debug_printf ( DEBUG_QUIET,
					"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");

		debug_printf ( DEBUG_QUIET, "USING RESCUE DAG %s\n",
					dagman.rescueFileToRun.Value() );

			// Turn off node name munging for the rescue DAG, because
			// it will already have munged node names.
		parseSetDoNameMunge( false );

    	if( !parse( dagman.dag, dagman.rescueFileToRun.Value(),
					dagman.useDagDir ) ) {
			if ( dagman.dumpRescueDag ) {
					// Dump the rescue DAG so we can see what we got
					// in the failed parse attempt.
    			debug_printf( DEBUG_QUIET, "Dumping rescue DAG "
							"because of -DumpRescue flag\n" );
				dagman.dag->Rescue( dagman.primaryDagFile.Value(),
							dagman.multiDags, dagman.maxRescueDagNum,
							true, false );
			}
			
			dagman.dag->RemoveRunningJobs(dagman, true);
			tolerant_unlink( lockFileName );
			dagman.CleanUp();
			
				// Note: debug_error calls DC_Exit().
        	debug_error( 1, DEBUG_QUIET, "Failed to parse %s\n",
					 	dagFile );
    	}
	}

	dagman.dag->CheckThrottleCats();

	// fix up any use of $(JOB) in the vars values for any node
	dagman.dag->ResolveVarsInterpolations();

/*	debug_printf(DEBUG_QUIET, "COMPLETED DAG!\n");*/
/*	dagman.dag->PrintJobList();*/

#ifndef NOT_DETECT_CYCLE
	if( dagman.startup_cycle_detect && dagman.dag->isCycle() )
	{
		// Note: maybe we should run the final node here, if there is one.
		// wenger 2011-12-19.
		debug_error (1, DEBUG_QUIET, "ERROR: a cycle exists in the dag, please check input\n");
	}
#endif
    debug_printf( DEBUG_VERBOSE, "Dag contains %d total jobs\n",
				  dagman.dag->NumNodes( true ) );

	MyString firstLocation;
	if ( dagman.dag->GetReject( firstLocation ) ) {
    	debug_printf( DEBUG_QUIET, "Exiting because of REJECT "
					"specification in %s.  This most likely means "
					"that the DAG file was produced with the -DumpRescue "
					"flag when parsing the original DAG failed.\n",
					firstLocation.Value() );
		DC_Exit( EXIT_ERROR );
		return;
	}

	dagman.dag->DumpDotFile();

	if ( dagman.dumpRescueDag ) {
    	debug_printf( DEBUG_QUIET, "Dumping rescue DAG and exiting "
					"because of -DumpRescue flag\n" );
		dagman.dag->Rescue( dagman.primaryDagFile.Value(),
					dagman.multiDags, dagman.maxRescueDagNum, false,
					false, false );
		ExitSuccess();
		return;
	}

    //------------------------------------------------------------------------
    // Bootstrap and Recovery
    //
    // If the Lockfile exists, this indicates a premature termination
    // of a previous run of Dagman. If condor log is also present,
    // we run in recovery mode
  
    // If the Daglog is not present, then we do not run in recovery
    // mode
  
    {
      bool recovery = access(lockFileName,  F_OK) == 0;
      
        if (recovery) {
            debug_printf( DEBUG_VERBOSE, "Lock file %s detected, \n",
                           lockFileName);
			if (dagman.abortDuplicates) {
				if (util_check_lock_file(lockFileName) == 1) {
        			debug_printf( DEBUG_QUIET, "Aborting because it "
							"looks like another instance of DAGMan is "
							"currently running on this DAG; if that is "
							"not the case, delete the lock file (%s) "
							"and re-submit the DAG.\n", lockFileName );
					dagman.dag->GetJobstateLog().
								WriteDagmanFinished( EXIT_RESTART );
    				dagman.CleanUp();
					DC_Exit( EXIT_ERROR );
					// We should never get to here!
				}
			}
        }

			//
			// If this DAGMan continues, it should overwrite the lock
			// file if it exists.
			//
		util_create_lock_file(lockFileName, dagman.abortDuplicates);

        debug_printf( DEBUG_VERBOSE, "Bootstrapping...\n");
        if( !dagman.dag->Bootstrap( recovery ) ) {
            dagman.dag->PrintReadyQ( DEBUG_DEBUG_1 );
            debug_error( 1, DEBUG_QUIET, "ERROR while bootstrapping\n");
        }
		print_status();
    }

    debug_printf( DEBUG_VERBOSE, "Registering condor_event_timer...\n" );
    daemonCore->Register_Timer( 1, dagman.m_user_log_scan_interval, 
				condor_event_timer, "condor_event_timer" );

	dagman.dag->SetPendingNodeReportInterval(
				dagman.pendingReportInterval );
}
コード例 #16
0
bool MyString::operator< (const MyString & aMyString)
{
	return (*this) < aMyString._cstr();
}
コード例 #17
0
// takes the type (as defined in handshake bitmask, CAUTH_*) and result of authentication,
// and maps it to the cannonical condor name.
//
void Authentication::map_authentication_name_to_canonical_name(int authentication_type, const char* method_string, const char* authentication_name) {

	// make sure the mapfile is loaded.  it's a static global variable.
	if (global_map_file_load_attempted == false) {
		if (global_map_file) {
			delete global_map_file;
			global_map_file = NULL;
		}

		global_map_file = new MapFile();

		dprintf (D_SECURITY, "ZKM: Parsing map file.\n");
        char * credential_mapfile;
        if (NULL == (credential_mapfile = param("CERTIFICATE_MAPFILE"))) {
            dprintf(D_SECURITY, "ZKM: No CERTIFICATE_MAPFILE defined\n");
			delete global_map_file;
			global_map_file = NULL;
        } else {
        	int line;
        	if (0 != (line = global_map_file->ParseCanonicalizationFile(credential_mapfile))) {
            	dprintf(D_SECURITY, "ZKM: Error parsing %s at line %d", credential_mapfile, line);
				delete global_map_file;
				global_map_file = NULL;
			}
			free( credential_mapfile );
		}
		global_map_file_load_attempted = true;
	} else {
		dprintf (D_SECURITY, "ZKM: map file already loaded.\n");
	}

#if defined(HAVE_EXT_GLOBUS)
	if (globus_activated == false) {
		dprintf (D_FULLDEBUG, "Activating Globus GSI_GSSAPI_ASSIST module.\n");
		globus_module_activate(GLOBUS_GSI_GSS_ASSIST_MODULE);
		globus_activated = true;
	}
#endif
      

	dprintf (D_SECURITY, "ZKM: attempting to map '%s'\n", authentication_name);

	// this will hold what we pass to the mapping function
	MyString auth_name_to_map = authentication_name;

	bool included_voms = false;

#if defined(HAVE_EXT_GLOBUS)
	// if GSI, try first with the FQAN (dn plus voms attrs)
	if (authentication_type == CAUTH_GSI) {
		const char *fqan = ((Condor_Auth_X509*)authenticator_)->getFQAN();
		if (fqan && fqan[0]) {
			dprintf (D_SECURITY, "ZKM: GSI was used, and FQAN is present.\n");
			auth_name_to_map = fqan;
			included_voms = true;
		}
	}
#endif

	if (global_map_file) {
		MyString canonical_user;

		dprintf (D_SECURITY, "ZKM: 1: attempting to map '%s'\n", auth_name_to_map.Value());
		bool mapret = global_map_file->GetCanonicalization(method_string, auth_name_to_map.Value(), canonical_user);
		dprintf (D_SECURITY, "ZKM: 2: mapret: %i included_voms: %i canonical_user: %s\n", mapret, included_voms, canonical_user.Value());

		// if it did not find a user, and we included voms attrs, try again without voms
		if (mapret && included_voms) {
			dprintf (D_SECURITY, "ZKM: now attempting to map '%s'\n", authentication_name);
			mapret = global_map_file->GetCanonicalization(method_string, authentication_name, canonical_user);
			dprintf (D_SECURITY, "ZKM: now 2: mapret: %i included_voms: %i canonical_user: %s\n", mapret, included_voms, canonical_user.Value());
		}

		if (!mapret) {
			// returns true on failure?
			dprintf (D_FULLDEBUG, "ZKM: successful mapping to %s\n", canonical_user.Value());

			// there is a switch for GSI to use the default globus function for this, in
			// case there is some custom globus mapping add-on, or the admin just wants
			// to use the grid-mapfile in use by other globus software.
			//
			// if they don't opt for globus to map, just fall through to the condor
			// mapfile.
			//
			if ((authentication_type == CAUTH_GSI) && (canonical_user == "GSS_ASSIST_GRIDMAP")) {
#if defined(HAVE_EXT_GLOBUS)

				// nameGssToLocal calls setRemoteFoo directly.
				int retval = ((Condor_Auth_X509*)authenticator_)->nameGssToLocal( authentication_name );

				if (retval) {
					dprintf (D_SECURITY, "Globus-based mapping was successful.\n");
				} else {
					dprintf (D_SECURITY, "Globus-based mapping failed; will use gsi@unmapped.\n");
				}
#else
				dprintf(D_ALWAYS, "ZKM: GSI not compiled, but was used?!!");
#endif
				return;
			} else {

				dprintf (D_SECURITY, "ZKM: found user %s, splitting.\n", canonical_user.Value());

				MyString user;
				MyString domain;

				// this sets user and domain
				split_canonical_name( canonical_user, user, domain);

				authenticator_->setRemoteUser( user.Value() );
				authenticator_->setRemoteDomain( domain.Value() );

				// we're done.
				return;
			}
		} else {
			dprintf (D_FULLDEBUG, "ZKM: did not find user %s.\n", canonical_user.Value());
		}
	} else if (authentication_type == CAUTH_GSI) {
        // See notes above around the nameGssToLocal call about why we invoke GSI authorization here.
        // Theoretically, it should be impossible to hit this case - the invoking function thought
		// we had a mapfile, but we couldnt create one.  This just covers weird corner cases.

#if defined(HAVE_EXT_GLOBUS)
		int retval = ((Condor_Auth_X509*)authenticator_)->nameGssToLocal(authentication_name);
		dprintf(D_SECURITY, "nameGssToLocal returned %s\n", retval ? "success" : "failure");
#else
		dprintf(D_ALWAYS, "ZKM: GSI not compiled, so can't call nameGssToLocal!!");
#endif
	} else {
		dprintf (D_FULLDEBUG, "ZKM: global_map_file not present!\n");
	}
}
コード例 #18
0
bool MyString::operator!= (const MyString & aMyString)
{
	return (*this) != aMyString._cstr();
}
コード例 #19
0
ファイル: FTEST_tokener.cpp プロジェクト: bbockelm/htcondor
static bool test_tokener_parse_basic() {
    emit_test("Test basic tokener parsing functions");

	MyString msg;
	std::string temp;
	tokener toke("now is the time");

	REQUIRE (toke.content() == "now is the time");

	REQUIRE(toke.next() && toke.matches("now"));
	toke.copy_token(temp);
	REQUIRE(temp == "now");
	REQUIRE(toke.compare_nocase("NAW") > 0);
	REQUIRE(toke.compare_nocase("NoW") == 0);
	REQUIRE(toke.compare_nocase("pow") < 0);

	REQUIRE(toke.next() && toke.matches("is"));
	REQUIRE(toke.next() && toke.matches("the"));
	REQUIRE(toke.next() && toke.matches("time"));
	REQUIRE( ! toke.next());

	REQUIRE (toke.content() == "now is the time");

	toke.set("this is 'the end', really");
	REQUIRE(toke.next() && toke.matches("this"));
	REQUIRE(toke.next() && toke.matches("is"));
	REQUIRE( ! toke.is_quoted_string());
	REQUIRE( ! toke.is_regex());

	REQUIRE(toke.next() && toke.matches("the end"));
	REQUIRE(toke.is_quoted_string());
	REQUIRE( ! toke.is_regex());
	REQUIRE(toke.starts_with("the "));

	REQUIRE(toke.next() && toke.matches(","));
	REQUIRE( ! toke.is_quoted_string());
	REQUIRE( ! toke.is_regex());

	REQUIRE(toke.next() && toke.matches("really"));
	REQUIRE( ! toke.starts_with("the"));

	REQUIRE(toke.at_end());
	REQUIRE( ! toke.next());

	toke.rewind();
	REQUIRE(toke.next() && toke.matches("this"));
	toke.copy_to_end(temp);
	REQUIRE(temp == "this is 'the end', really");
	toke.mark_after();
	toke.next();
	REQUIRE(toke.next() && toke.matches("the end"));
	toke.next();
	toke.copy_marked(temp);
	if (temp != " is 'the end'") {
		emit_step_failure(__LINE__, msg.formatstr("toke.copy_marked() returned |%s| should be | is 'the end'|", temp.c_str()));
	}
	toke.copy_to_end(temp);
	if (temp != ", really") {
		emit_step_failure(__LINE__, msg.formatstr("toke.copy_marked() returned |%s| should be |, really|", temp.c_str()));
	}


	return REQUIRED_RESULT();
}
コード例 #20
0
bool VanillaToGrid::vanillaToGrid(classad::ClassAd * ad, int target_universe, const char * gridresource, bool is_sandboxed)
{
	ASSERT(ad);

	/* TODO:
		- If job fails to specify transfer_input_files but has some (relying on
		  a shared filesystem), we're doomed.  Probably
		  if(should_transfer_files is NO, we should just fail.
		  (We'll do the right thing if grid_type=condor, but not other
		  grid types.)

		- Job may be relying on Condor automatically transfer output files.  If
		  transfer_output_files is set, we're probably good (again, assuming no
		  shared filesystem).  If it's not set, we can't tell the difference
		  between no output files and "everything"
		  (We'll do the right thing if grid_type=condor, but not other
		  grid types.)

		- I'm leaving WhenToTransferFiles alone.  It should work fine.

		- Should I leave FilesystemDomain in?  May be useful, but
		simultaneously will likely do the Wrong Thing on the remote
		side
	*/

	MyString remoteattr;
	remoteattr = "Remote_";
	remoteattr += ATTR_JOB_UNIVERSE;

	// Things we don't need or want
	ad->Delete(ATTR_CLUSTER_ID); // Definate no-no
	ad->Delete(ATTR_PROC_ID);


	ad->Delete(ATTR_BUFFER_BLOCK_SIZE);
	ad->Delete(ATTR_BUFFER_SIZE);
	ad->Delete(ATTR_BUFFER_SIZE);
	ad->Delete("CondorPlatform"); // TODO: Find #define
	ad->Delete("CondorVersion");  // TODO: Find #define
	ad->Delete(ATTR_CORE_SIZE);
	ad->Delete(ATTR_GLOBAL_JOB_ID); // Store in different ATTR here?
	//ad->Delete(ATTR_OWNER); // How does schedd filter? 
	ad->Delete(ATTR_Q_DATE);
	ad->Delete(ATTR_JOB_REMOTE_WALL_CLOCK);
	ad->Delete(ATTR_SERVER_TIME);
	ad->Delete(ATTR_AUTO_CLUSTER_ID);
	ad->Delete(ATTR_AUTO_CLUSTER_ATTRS);
	ad->Delete(ATTR_TOTAL_SUBMIT_PROCS);
	ad->Delete( ATTR_STAGE_IN_FINISH );
	ad->Delete( ATTR_STAGE_IN_START );

	// We aren't going to forward updates to this attribute,
	// so strip it out.
	// We do evaluate it locally in the source job ad.
	ad->Delete(ATTR_TIMER_REMOVE_CHECK);

	ad->Delete("SUBMIT_" ATTR_JOB_IWD); // the presence of this would prevent schedd from rewriting spooled iwd

	// Stuff to reset
	ad->InsertAttr(ATTR_JOB_STATUS, 1); // Idle
	ad->InsertAttr(ATTR_JOB_REMOTE_USER_CPU, 0.0);
	ad->InsertAttr(ATTR_JOB_REMOTE_SYS_CPU, 0.0);
	ad->InsertAttr(ATTR_JOB_EXIT_STATUS, 0);
	ad->InsertAttr(ATTR_COMPLETION_DATE, 0);
	ad->InsertAttr(ATTR_JOB_LOCAL_SYS_CPU, 0.0);
	ad->InsertAttr(ATTR_JOB_LOCAL_USER_CPU, 0.0);
	ad->InsertAttr(ATTR_NUM_CKPTS, 0);
	ad->InsertAttr(ATTR_NUM_RESTARTS, 0);
	ad->InsertAttr(ATTR_NUM_SYSTEM_HOLDS, 0);
	ad->InsertAttr(ATTR_JOB_COMMITTED_TIME, 0);
	ad->InsertAttr(ATTR_COMMITTED_SLOT_TIME, 0);
	ad->InsertAttr(ATTR_CUMULATIVE_SLOT_TIME, 0);
	ad->InsertAttr(ATTR_TOTAL_SUSPENSIONS, 0);
	ad->InsertAttr(ATTR_LAST_SUSPENSION_TIME, 0);
	ad->InsertAttr(ATTR_CUMULATIVE_SUSPENSION_TIME, 0);
	ad->InsertAttr(ATTR_COMMITTED_SUSPENSION_TIME, 0);
	ad->InsertAttr(ATTR_ON_EXIT_BY_SIGNAL, false);


	//ad->Delete(ATTR_MY_TYPE); // Should be implied
	//ad->Delete(ATTR_TARGET_TYPE); // Should be implied.

	// Remap the universe
	classad::ExprTree * tmp;
	tmp = ad->Lookup(ATTR_JOB_UNIVERSE);
	if( ! tmp ) {
		EXCEPT("VanillaToGrid: job ad lacks universe");
	}
	classad::ExprTree * olduniv = tmp->Copy();
	if( ! olduniv) {
		EXCEPT("Unable to copy old universe");
	}

	ad->InsertAttr(ATTR_JOB_UNIVERSE, target_universe);
	ad->Insert(remoteattr.Value(), olduniv, false);
		// olduniv is now controlled by ClassAd

	if( target_universe == CONDOR_UNIVERSE_GRID ) {
		ad->Delete(ATTR_CURRENT_HOSTS);

		// Set the grid resource
		if( gridresource ) {
			ad->InsertAttr(ATTR_GRID_RESOURCE, gridresource);
		}

		// Grid universe, unlike vanilla universe expects full output
		// paths for Out/Err.  In vanilla, these are basenames that
		// point into TransferOutputRemaps.  If the job is sandboxed,
		// then our remaps will apply when we fetch the output from
		// the completed job.  That's fine, so we leave the remaps
		// in place in that case.  Otherwise, we undo the remaps and
		// let the grid job write directly to the correct output
		// paths.

		std::string remaps;
		ad->EvaluateAttrString(ATTR_TRANSFER_OUTPUT_REMAPS,remaps);
		if( !is_sandboxed && remaps.size() ) {
			MyString remap_filename;
			std::string filename,filenames;

				// Don't need the remaps in the grid copy of the ad.
			ad->Delete(ATTR_TRANSFER_OUTPUT_REMAPS);

			if( ad->EvaluateAttrString(ATTR_JOB_OUTPUT,filename) ) {
				if( filename_remap_find(remaps.c_str(),filename.c_str(),remap_filename) ) {
					ad->InsertAttr(ATTR_JOB_OUTPUT,remap_filename.Value());
				}
			}

			if( ad->EvaluateAttrString(ATTR_JOB_ERROR,filename) ) {
				if( filename_remap_find(remaps.c_str(),filename.c_str(),remap_filename) ) {
					ad->InsertAttr(ATTR_JOB_ERROR,remap_filename.Value());
				}
			}

				// TransferOutputFiles appears to be different.  If it
				// behaved similarly to Out/Err, then we would want to
				// do the following:
#if 0
			if( ad->EvaluateAttrString(ATTR_TRANSFER_OUTPUT_FILES,filename) ) {
				StringList output_files(filename.c_str(),",");
				StringList new_list;
				char const *fname;

				output_files.rewind();
				while( (fname=output_files.next()) ) {
					if( filename_remap_find(remaps.c_str(),fname,remap_filename) )
						{
							new_list.append(remap_filename.Value());
						}
					else {
						new_list.append(fname);
					}
				}

				char *new_list_str = new_list.print_to_string();
				ASSERT( new_list_str );

				ad->InsertAttr(ATTR_TRANSFER_OUTPUT_FILES,new_list_str);

				free( new_list_str );
			}
#endif

		}
	}

	return true;
}
コード例 #21
0
ファイル: MyString.cpp プロジェクト: LeonaLiu/MyString
 //拷贝构造
 MyString::MyString(const MyString& str)
 {
     init(str.size(), str.data);
 }
コード例 #22
0
ファイル: rooster.cpp プロジェクト: emaste/htcondor
void Rooster::poll()
{
	dprintf(D_FULLDEBUG,"C**k-a-doodle-doo! (Time to look for machines to wake up.)\n");

	ClassAdList startdAds;
	CondorQuery unhibernateQuery(STARTD_AD);
	ExprTree *requirements = NULL;

	if( ParseClassAdRvalExpr( m_unhibernate_constraint.Value(), requirements )!=0 || requirements==NULL )
	{
		EXCEPT("Invalid expression for ROOSTER_UNHIBERNATE: %s\n",
			   m_unhibernate_constraint.Value());
	}

	unhibernateQuery.addANDConstraint(m_unhibernate_constraint.Value());

	CollectorList* collects = daemonCore->getCollectorList();
	ASSERT( collects );

	QueryResult result;
	result = collects->query(unhibernateQuery,startdAds);
	if( result != Q_OK ) {
		dprintf(D_ALWAYS,
				"Couldn't fetch startd ads using constraint "
				"ROOSTER_UNHIBERNATE=%s: %s\n",
				m_unhibernate_constraint.Value(), getStrQueryResult(result));
		return;
	}

	dprintf(D_FULLDEBUG,"Got %d startd ads matching ROOSTER_UNHIBERNATE=%s\n",
			startdAds.MyLength(), m_unhibernate_constraint.Value());

	startdAds.Sort(StartdSortFunc,&m_rank_ad);

	startdAds.Open();
	int num_woken = 0;
	ClassAd *startd_ad;
	HashTable<MyString,bool> machines_done(MyStringHash);
	while( (startd_ad=startdAds.Next()) ) {
		MyString machine;
		MyString name;
		startd_ad->LookupString(ATTR_MACHINE,machine);
		startd_ad->LookupString(ATTR_NAME,name);

		if( machines_done.exists(machine)==0 ) {
			dprintf(D_FULLDEBUG,
					"Skipping %s: already attempted to wake up %s in this cycle.\n",
					name.Value(),machine.Value());
			continue;
		}

			// in case the unhibernate expression is time-sensitive,
			// re-evaluate it now to make sure it still passes
		if( !EvalBool(startd_ad,requirements) ) {
			dprintf(D_ALWAYS,
					"Skipping %s: ROOSTER_UNHIBERNATE is no longer true.\n",
					name.Value());
			continue;
		}

		if( wakeUp(startd_ad) ) {
			machines_done.insert(machine,true);

			if( ++num_woken >= m_max_unhibernate && m_max_unhibernate > 0 ) {
				dprintf(D_ALWAYS,
						"Reached ROOSTER_MAX_UNHIBERNATE=%d in this cycle.\n",
						m_max_unhibernate);
				break;
			}
		}
	}
	startdAds.Close();

	delete requirements;
	requirements = NULL;

	if( startdAds.MyLength() ) {
		dprintf(D_FULLDEBUG,"Done sending wakeup calls.\n");
	}
}
コード例 #23
0
void* DaemonCore::Stats::New(const char * category, const char * name, int as)
{
   MyString attr;
   attr.formatstr("DC%s_%s", category, name);
   cleanStringForUseAsAttr(attr);

   void * ret = NULL;
   switch (as & (AS_TYPE_MASK | IS_CLASS_MASK))
      {
      case AS_COUNT | IS_RECENT:
         {
         stats_entry_recent<int> * probe =
         Pool.NewProbe< stats_entry_recent<int> >(name,  attr.Value(), as);
         probe->SetRecentMax(this->RecentWindowMax / this->RecentWindowQuantum);
         ret = probe;
         }
         break;

     case  AS_COUNT | IS_CLS_EMA:
         {
         stats_entry_ema<int>* probe =
         Pool.NewProbe< stats_entry_ema<int> >(name, attr.Value(), as | stats_entry_ema<int>::PubDefault);
         probe->ConfigureEMAHorizons(ema_config);
         probe->Clear();
         ret = probe;
         }
         break;

     case  STATS_ENTRY_TYPE_DOUBLE | IS_CLS_EMA:
         {
         stats_entry_ema<double>* probe =
         Pool.NewProbe< stats_entry_ema<double> >(name, attr.Value(), as | stats_entry_ema<double>::PubDefault);
         probe->ConfigureEMAHorizons(ema_config);
         probe->Clear();
         ret = probe;
         }
         break;

     case  AS_COUNT | IS_CLS_SUM_EMA_RATE:
         {
         stats_entry_sum_ema_rate<int>* probe =
         Pool.NewProbe< stats_entry_sum_ema_rate<int> >(name, attr.Value(), as | stats_entry_sum_ema_rate<int>::PubDefault);
         probe->ConfigureEMAHorizons(ema_config);
         probe->Clear();
         ret = probe;
         }
         break;

     case  STATS_ENTRY_TYPE_DOUBLE | IS_CLS_SUM_EMA_RATE:
         {
         stats_entry_sum_ema_rate<double>* probe =
         Pool.NewProbe< stats_entry_sum_ema_rate<double> >(name, attr.Value(), as | stats_entry_sum_ema_rate<double>::PubDefault);
         probe->ConfigureEMAHorizons(ema_config);
         probe->Clear();
         ret = probe;
         }
         break;

      case AS_ABSTIME | IS_RECENT:
      case AS_RELTIME | IS_RECENT:
         {
         stats_entry_recent<time_t> * probe =
         Pool.NewProbe< stats_entry_recent<time_t> >(name,  attr.Value(), as);
         probe->SetRecentMax(this->RecentWindowMax / this->RecentWindowQuantum);
         ret = probe;
         }
         break;

      case AS_COUNT | IS_RCT:
#ifdef USE_MIRON_PROBE_FOR_DC_RUNTIME_STATS
         {
         as &= ~(IS_CLASS_MASK);  // strip off IS_RTC class
         as |= IS_CLS_PROBE | IF_RT_SUM; // and set IS_CLS_PROBE & IF_RT_SUM classes
         stats_entry_probe<double> * probe =
         Pool.NewProbe< stats_entry_probe<double> >(name, attr.Value(), as);
         ret = probe;
         }
         break;
#else
          // fall through
#endif
      case AS_RELTIME | IS_RCT:
         {
         stats_recent_counter_timer * probe =
         Pool.NewProbe<stats_recent_counter_timer>(name, attr.Value(), as);
        #if 0 // def DEBUG
         attr += "Debug";
         Pool.AddPublish(attr.Value(), probe, NULL, 0, 
                       (FN_STATS_ENTRY_PUBLISH)&stats_recent_counter_timer::PublishDebug);
        #endif
         probe->SetRecentMax(this->RecentWindowMax / this->RecentWindowQuantum);
         ret = probe;
         }
         break;

      default:
         EXCEPT("unsupported probe type");
         break;
      }

   return ret;
}
コード例 #24
0
ファイル: rooster.cpp プロジェクト: emaste/htcondor
bool
Rooster::wakeUp(ClassAd *startd_ad)
{
	ASSERT( startd_ad );

	MyString name;
	startd_ad->LookupString(ATTR_NAME,name);

	dprintf(D_ALWAYS,"Sending wakeup call to %s.\n",name.Value());

	int stdin_pipe_fds[2];
	stdin_pipe_fds[0] = -1; // child's side
	stdin_pipe_fds[1] = -1; // my side
	if( !daemonCore->Create_Pipe(stdin_pipe_fds) ) {
		dprintf(D_ALWAYS,"Rooster::wakeUp: failed to create stdin pipe.");
		return false;
	}

	int stdout_pipe_fds[2];
	stdout_pipe_fds[0] = -1; // my side
	stdout_pipe_fds[1] = -1; // child's side
	if( !daemonCore->Create_Pipe(stdout_pipe_fds) ) {
		dprintf(D_ALWAYS,"Rooster::wakeUp: failed to create stdout pipe.");
		daemonCore->Close_Pipe(stdin_pipe_fds[0]);
		daemonCore->Close_Pipe(stdin_pipe_fds[1]);
		return false;
	}

	int std_fds[3];
	std_fds[0] = stdin_pipe_fds[0];
	std_fds[1] = stdout_pipe_fds[1];
	std_fds[2] = stdout_pipe_fds[1];

	int pid = daemonCore->Create_Process(
		m_wakeup_args.GetArg(0),
		m_wakeup_args,
		PRIV_CONDOR_FINAL,
		0,
		FALSE,
		NULL,
		NULL,
		NULL,
		NULL,
		std_fds);

	daemonCore->Close_Pipe(stdin_pipe_fds[0]);
	daemonCore->Close_Pipe(stdout_pipe_fds[1]);

	if( pid == -1 ) {
		dprintf(D_ALWAYS,"Failed to run %s: %s\n",
				m_wakeup_cmd.Value(), strerror(errno));
		daemonCore->Close_Pipe(stdin_pipe_fds[1]);
		daemonCore->Close_Pipe(stdout_pipe_fds[0]);
		return false;
	}

	MyString stdin_str;
	startd_ad->sPrint(stdin_str);

		// Beware: the following code assumes that we will not
		// deadlock by filling up the stdin pipe while the tool blocks
		// filling up the stdout pipe.  The tool must consume all
		// input before generating more than a pipe buffer full of output.

	int n = daemonCore->Write_Pipe(stdin_pipe_fds[1],stdin_str.Value(),stdin_str.Length());
	if( n != stdin_str.Length() ) {
		dprintf(D_ALWAYS,"Rooster::wakeUp: failed to write to %s: %s\n",
				m_wakeup_cmd.Value(), strerror(errno));
		daemonCore->Close_Pipe(stdin_pipe_fds[1]);
		daemonCore->Close_Pipe(stdout_pipe_fds[0]);
		return false;
	}

		// done writing to tool
	daemonCore->Close_Pipe(stdin_pipe_fds[1]);

	MyString stdout_str;
	while( true ) {
		char pipe_buf[1024];
		n = daemonCore->Read_Pipe(stdout_pipe_fds[0],pipe_buf,1023);
		if( n <= 0 ) {
			break;
		}
		ASSERT( n < 1024 );
		pipe_buf[n] = '\0';
		stdout_str += pipe_buf;
	}

		// done reading from tool
	daemonCore->Close_Pipe(stdout_pipe_fds[0]);

	if( stdout_str.Length() ) {
			// log debugging output from the tool
		dprintf(D_ALWAYS|D_NOHEADER,"%s",stdout_str.Value());
	}

		// Would be nice to get final exit status of tool, but
		// daemonCore() doesn't provide a waitpid() equivalent.
		// Why didn't I just use my_popen()?  Because it doesn't
		// allow us to write to the tool _and_ log debugging output.
	return true;
}
コード例 #25
0
// make query
int GenericQuery::
makeQuery (ExprTree *&tree)
{
    int		i, value;
    char	*item;
    float   fvalue;
    MyString req = "";

    tree = NULL;

    // construct query requirement expression
    bool firstCategory = true;

    // add string constraints
    for (i = 0; i < stringThreshold; i++)
    {
        stringConstraints [i].Rewind ();
        if (!stringConstraints [i].AtEnd ())
        {
            bool firstTime = true;
            req += firstCategory ? "(" : " && (";
            while ((item = stringConstraints [i].Next ()))
            {
                req.sprintf_cat ("%s(%s == \"%s\")",
                                 firstTime ? " " : " || ",
                                 stringKeywordList [i], item);
                firstTime = false;
                firstCategory = false;
            }
            req += " )";
        }
    }

    // add integer constraints
    for (i = 0; i < integerThreshold; i++)
    {
        integerConstraints [i].Rewind ();
        if (!integerConstraints [i].AtEnd ())
        {
            bool firstTime = true;
            req += firstCategory ? "(" : " && (";
            while (integerConstraints [i].Next (value))
            {
                req.sprintf_cat ("%s(%s == %d)",
                                 firstTime ? " " : " || ",
                                 integerKeywordList [i], value);
                firstTime = false;
                firstCategory = false;
            }
            req += " )";
        }
    }

    // add float constraints
    for (i = 0; i < floatThreshold; i++)
    {
        floatConstraints [i].Rewind ();
        if (!floatConstraints [i].AtEnd ())
        {
            bool firstTime = true;
            req += firstCategory ? "(" : " && (";
            while (floatConstraints [i].Next (fvalue))
            {
                req.sprintf_cat ("%s(%s == %f)",
                                 firstTime ? " " : " || ",
                                 floatKeywordList [i], fvalue);
                firstTime = false;
                firstCategory = false;
            }
            req += " )";
        }
    }

    // add custom AND constraints
    customANDConstraints.Rewind ();
    if (!customANDConstraints.AtEnd ())
    {
        bool firstTime = true;
        req += firstCategory ? "(" : " && (";
        while ((item = customANDConstraints.Next ()))
        {
            req.sprintf_cat ("%s(%s)", firstTime ? " " : " && ", item);
            firstTime = false;
            firstCategory = false;
        }
        req += " )";
    }

    // add custom OR constraints
    customORConstraints.Rewind ();
    if (!customORConstraints.AtEnd ())
    {
        bool firstTime = true;
        req += firstCategory ? "(" : " && (";
        while ((item = customORConstraints.Next ()))
        {
            req.sprintf_cat ("%s(%s)", firstTime ? " " : " || ", item);
            firstTime = false;
            firstCategory = false;
        }
        req += " )";
    }

    // absolutely no constraints at all
    if (firstCategory) {
        req += "TRUE";
    }

    // parse constraints and insert into query ad
    if (ParseClassAdRvalExpr (req.Value(), tree) > 0) return Q_PARSE_ERROR;

    return Q_OK;
}
コード例 #26
0
ファイル: rooster.cpp プロジェクト: emaste/htcondor
void Rooster::config()
{
	int old_polling_interval = m_polling_interval;
	m_polling_interval = param_integer("ROOSTER_INTERVAL",300);
	if( m_polling_interval < 0 ) {
		dprintf(D_ALWAYS,
				"ROOSTER_INTERVAL is less than 0, so no unhibernate checks "
				"will be made.\n");
		if( m_polling_timer != -1 ) {
			daemonCore->Cancel_Timer(m_polling_timer);
			m_polling_timer = -1;
		}
	}
	else if( m_polling_timer >= 0 ) {
		if( old_polling_interval != m_polling_interval ) {
			daemonCore->Reset_Timer(
				m_polling_timer,
				m_polling_interval,
				m_polling_interval);
		}
	}
	else {
		m_polling_timer = daemonCore->Register_Timer(
			m_polling_interval,
			m_polling_interval,
			(TimerHandlercpp)&Rooster::poll,
			"Rooster::poll",
			this );
	}
	if( old_polling_interval != m_polling_interval && m_polling_interval > 0 )
	{
		dprintf(D_ALWAYS,
				"Will perform unhibernate checks every ROOSTER_INTERVAL=%d "
				"seconds.\n", m_polling_interval);
	}

	ASSERT( param(m_unhibernate_constraint,"ROOSTER_UNHIBERNATE") );


	ASSERT( param(m_wakeup_cmd,"ROOSTER_WAKEUP_CMD") );

	m_wakeup_args.Clear();
	MyString error_msg;
	if( !m_wakeup_args.AppendArgsV2Quoted(m_wakeup_cmd.Value(),&error_msg) ) {
		EXCEPT("Invalid wakeup command %s: %s\n",
			   m_wakeup_cmd.Value(), error_msg.Value());
	}

	MyString rank;
	param(rank,"ROOSTER_UNHIBERNATE_RANK");
	if( rank.IsEmpty() ) {
		m_rank_ad.Delete(ATTR_RANK);
	}
	else {
		if( !m_rank_ad.AssignExpr(ATTR_RANK,rank.Value()) ) {
			EXCEPT("Invalid expression for ROOSTER_UNHIBERNATE_RANK: %s\n",
				   rank.Value());
		}
	}

	m_max_unhibernate = param_integer("ROOSTER_MAX_UNHIBERNATE",0,0);
}
コード例 #27
0
ファイル: vmgahp_common.cpp プロジェクト: dcbradley/htcondor
const char* 
get_job_user_name(void)
{
	return job_user_name.Value();
}
コード例 #28
0
bool user_map_do_mapping(const char *, const char *, MyString & output) { output.clear(); return false; }
コード例 #29
0
ファイル: log_events.cpp プロジェクト: AlainRoy/htcondor
extern "C" void
log_termination (struct rusage *localr, struct rusage *remoter)
{
	check_execute_event();

	switch (WTERMSIG(JobStatus))
	{
	  case 0:
	  case -1: 
		// if core, bad exectuable --- otherwise, a normal exit
		if (WCOREDUMP(JobStatus) && WEXITSTATUS(JobStatus) == ENOEXEC)
		{
			// log the ULOG_EXECUTABLE_ERROR event
			ExecutableErrorEvent event;
			event.errType = CONDOR_EVENT_NOT_EXECUTABLE;
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS, "Unable to log NOT_EXECUTABLE event\n");
			}
		}
		else
		if (WCOREDUMP(JobStatus) && WEXITSTATUS(JobStatus) == 0)
		{		
			// log the ULOG_EXECUTABLE_ERROR event
			ExecutableErrorEvent event;
			event.errType = CONDOR_EVENT_BAD_LINK;
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS, "Unable to log BAD_LINK event\n");
			}
		}
		else
		{
			// log the ULOG_JOB_TERMINATED event
			JobTerminatedEvent event;
			event.normal = true; // normal termination
			event.returnValue = WEXITSTATUS(JobStatus);
			event.total_local_rusage = Proc->local_usage;
			event.total_remote_rusage = Proc->remote_usage[0];
			event.run_local_rusage = *localr;
			event.run_remote_rusage = *remoter;
			// we want to log the events from the perspective of the
			// user job, so if the shadow *sent* the bytes, then that
			// means the user job *received* the bytes
			event.recvd_bytes = BytesSent;
			event.sent_bytes = BytesRecvd;
			if (syscall_sock) {
				event.recvd_bytes += syscall_sock->get_bytes_sent();
				event.sent_bytes += syscall_sock->get_bytes_recvd();
			}
			event.total_recvd_bytes = TotalBytesSent + event.recvd_bytes;
			event.total_sent_bytes = TotalBytesRecvd + event.sent_bytes;
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS,"Unable to log ULOG_JOB_TERMINATED event\n");
			}
		}
		break;


	  case SIGKILL:
		// evicted without a checkpoint
		{
			JobEvictedEvent event;
			event.checkpointed = false;
			event.run_local_rusage = *localr;
			event.run_remote_rusage = *remoter;
			// we want to log the events from the perspective of the
			// user job, so if the shadow *sent* the bytes, then that
			// means the user job *received* the bytes
			event.recvd_bytes = BytesSent;
			event.sent_bytes = BytesRecvd;
			if (syscall_sock) {
				event.recvd_bytes += syscall_sock->get_bytes_sent();
				event.sent_bytes += syscall_sock->get_bytes_recvd();
			}
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS, "Unable to log ULOG_JOB_EVICTED event\n");
			}
		}
		break;

			
	  case SIGQUIT:
		// evicted, but *with* a checkpoint
		{
			JobEvictedEvent event;
			event.checkpointed = true;
			event.run_local_rusage = *localr;
			event.run_remote_rusage = *remoter;
			// we want to log the events from the perspective of the
			// user job, so if the shadow *sent* the bytes, then that
			// means the user job *received* the bytes
			event.recvd_bytes = BytesSent;
			event.sent_bytes = BytesRecvd;
			if (syscall_sock) {
				event.recvd_bytes += syscall_sock->get_bytes_sent();
				event.sent_bytes += syscall_sock->get_bytes_recvd();
			}
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS, "Unable to log ULOG_JOB_EVICTED event\n");
			}
		}
		break;


	  default:
		// abnormal termination
		{
			MyString coredir;
			MyString coreFile;
			JobTerminatedEvent event;
			event.normal = false;
			event.signalNumber = WTERMSIG(JobStatus);

			if (WCOREDUMP(JobStatus))
			{
				/* look up the corefile name in the job ad if one exists... */
				if (!JobAd->LookupString(ATTR_JOB_CORE_FILENAME, coreFile)) {
					/* if it didn't exist in the job ad, then construct what it
						should be. */

					ASSERT( condor_getcwd(coredir) );

					if (strcmp (Proc->rootdir, "/") == 0)
					{
						coreFile.formatstr( "%s/core.%d.%d", coredir.Value(),
							 	Proc->id.cluster, Proc->id.proc );
					}
					else
					{
						coreFile.formatstr( "%s%s/core.%d.%d", Proc->rootdir,
							 	coredir.Value(), Proc->id.cluster, Proc->id.proc );
					}
				} 
				
				event.setCoreFile( coreFile.Value() );
			}
			event.run_local_rusage = *localr;
			event.run_remote_rusage = *remoter;
			event.total_local_rusage = Proc->local_usage;
			event.total_remote_rusage = Proc->remote_usage[0];
			// we want to log the events from the perspective of the
			// user job, so if the shadow *sent* the bytes, then that
			// means the user job *received* the bytes
			event.recvd_bytes = BytesSent;
			event.sent_bytes = BytesRecvd;
			if (syscall_sock) {
				event.recvd_bytes += syscall_sock->get_bytes_sent();
				event.sent_bytes += syscall_sock->get_bytes_recvd();
			}
			if (!ULog.writeEvent (&event))
			{
				dprintf (D_ALWAYS,"Unable to log ULOG_JOB_TERMINATED event\n");
			}
		}
	}
}
コード例 #30
0
ファイル: docker-api.cpp プロジェクト: dcbradley/htcondor
//
// Because we fork before calling docker, we don't actually
// care if the image is stored locally or not (except to the extent that
// remote image pull violates the principle of least astonishment).
//
int DockerAPI::run(
	ClassAd &machineAd,
	ClassAd &jobAd,
	const std::string & containerName,
	const std::string & imageID,
	const std::string & command,
	const ArgList & args,
	const Env & env,
	const std::string & sandboxPath,
	const std::list<std::string> extraVolumes,
	int & pid,
	int * childFDs,
	CondorError & /* err */ )
{
	gc_image(imageID);
	//
	// We currently assume that the system has been configured so that
	// anyone (user) who can run an HTCondor job can also run docker.  It's
	// also apparently a security worry to run Docker as root, so let's not.
	//
	ArgList runArgs;
	if ( ! add_docker_arg(runArgs))
		return -1;
	runArgs.AppendArg( "run" );

	// Write out a file with the container ID.
	// FIXME: The startd can check this to clean up after us.
	// This needs to go into a directory that condor user
	// can write to.

/*
	std::string cidFileName = sandboxPath + "/.cidfile";
	runArgs.AppendArg( "--cidfile=" + cidFileName );
*/

	
	// Configure resource limits.
	
	// First cpus
	int  cpus;
	int cpuShare;

	if (machineAd.LookupInteger(ATTR_CPUS, cpus)) {
		cpuShare = 10 * cpus;
	} else {
		cpuShare = 10;
	}
	std::string cpuShareStr;
	formatstr(cpuShareStr, "--cpu-shares=%d", cpuShare);
	runArgs.AppendArg(cpuShareStr);

	// Now memory
	int memory; // in Megabytes
	if (machineAd.LookupInteger(ATTR_MEMORY, memory)) {
		std::string mem;
		formatstr(mem, "--memory=%dm", memory);
		runArgs.AppendArg(mem);
	} 

	// drop unneeded Linux capabilities
	if (param_boolean("DOCKER_DROP_ALL_CAPABILITIES", true /*default*/,
		true /*do_log*/, &machineAd, &jobAd)) {
		runArgs.AppendArg("--cap-drop=all");
			
		// --no-new-privileges flag appears in docker 1.11
		if (DockerAPI::majorVersion > 1 ||
		    DockerAPI::minorVersion > 10) {
			runArgs.AppendArg("--no-new-privileges");
		}
	}

	// Give the container a useful name
	std::string hname = makeHostname(&machineAd, &jobAd);
	runArgs.AppendArg("--hostname");
	runArgs.AppendArg(hname.c_str());

		// Now the container name
	runArgs.AppendArg( "--name" );
	runArgs.AppendArg( containerName );

	if ( ! add_env_to_args_for_docker(runArgs, env)) {
		dprintf( D_ALWAYS | D_FAILURE, "Failed to pass enviroment to docker.\n" );
		return -8;
	}

	// Map the external sanbox to the internal sandbox.
	runArgs.AppendArg( "--volume" );
	runArgs.AppendArg( sandboxPath + ":" + sandboxPath );

	// Now any extra volumes
	for (std::list<std::string>::const_iterator it = extraVolumes.begin(); it != extraVolumes.end(); it++) {
		runArgs.AppendArg("--volume");
		std::string volume = *it;
		runArgs.AppendArg(volume);
	}
	
	// Start in the sandbox.
	runArgs.AppendArg( "--workdir" );
	runArgs.AppendArg( sandboxPath );

	// Run with the uid that condor selects for the user
	// either a slot user or submitting user or nobody
	uid_t uid = 0;
	uid_t gid = 0;

	// Docker doesn't actually run on Windows, but we compile
	// on Windows because...
#ifndef WIN32
	uid = get_user_uid();
	gid = get_user_gid();
#endif
	
	if ((uid == 0) || (gid == 0)) {
		dprintf(D_ALWAYS|D_FAILURE, "Failed to get userid to run docker job\n");
		return -9;
	}

	runArgs.AppendArg("--user");
	std::string uidgidarg;
	formatstr(uidgidarg, "%d:%d", uid, gid);
	runArgs.AppendArg(uidgidarg);

	// Run the command with its arguments in the image.
	runArgs.AppendArg( imageID );

	
	// If no command given, the default command in the image will run
	if (command.length() > 0) {
		runArgs.AppendArg( command );
	}

	runArgs.AppendArgsFromArgList( args );

	MyString displayString;
	runArgs.GetArgsStringForLogging( & displayString );
	dprintf( D_ALWAYS, "Attempting to run: %s\n", displayString.c_str() );

	//
	// If we run Docker attached, we avoid a race condition where
	// 'docker logs --follow' returns before 'docker rm' knows that the
	// container is gone (and refuses to remove it).  Of course, we
	// can't block, so we have a proxy process run attached for us.
	//
	FamilyInfo fi;
	fi.max_snapshot_interval = param_integer( "PID_SNAPSHOT_INTERVAL", 15 );
	int childPID = daemonCore->Create_Process( runArgs.GetArg(0), runArgs,
		PRIV_CONDOR_FINAL, 1, FALSE, FALSE, NULL, "/",
		& fi, NULL, childFDs );

	if( childPID == FALSE ) {
		dprintf( D_ALWAYS | D_FAILURE, "Create_Process() failed.\n" );
		return -1;
	}
	pid = childPID;

	return 0;
}