StatInfo::StatInfo( const char *path ) { char *s, *last = NULL, *trail_slash = NULL, chslash; fullpath = strnewp( path ); dirpath = strnewp( path ); // Since we've got our own copy of the full path now sitting // in dirpath, we can find the last directory delimiter, make // a copy of whatever is beyond it as the filename, and put a // NULL in the first character after the delim character so // that the dirpath always contains the directory delim. for( s = dirpath; s && *s != '\0'; s++ ) { if( *s == '\\' || *s == '/' ) { last = s; } } if( last != NULL && last[1] ) { filename = strnewp( &last[1] ); last[1] = '\0'; } else { filename = NULL; if (last != NULL) { // we only get here if the input path ended with a dir separator // we can't stat that on windows, and *nix does't care, so we remove it. trail_slash = &fullpath[last - dirpath]; } } // remove trailing slash before we stat, and then put it back after. this fixes #4747 // why do we put it back? because things crash if we don't and this is a stable series fix. if (trail_slash) { chslash = *trail_slash; *trail_slash = 0; } stat_file( fullpath ); if (trail_slash) { *trail_slash = chslash; } }
//--------------------------------------------------------------------------- Job::Job( const char* jobName, const char *directory, const char* cmdFile ) : _preskip( PRE_SKIP_INVALID ), _final( false ) { ASSERT( jobName != NULL ); ASSERT( cmdFile != NULL ); debug_printf( DEBUG_DEBUG_1, "Job::Job(%s, %s, %s)\n", jobName, directory, cmdFile ); _scriptPre = NULL; _scriptPost = NULL; _Status = STATUS_READY; _isIdle = false; countedAsDone = false; _jobName = strnewp (jobName); _directory = strnewp (directory); _cmdFile = strnewp (cmdFile); _dagFile = NULL; _throttleInfo = NULL; // _condorID struct initializes itself // jobID is a primary key (a database term). All should be unique _jobID = _jobID_counter++; retry_max = 0; retries = 0; _submitTries = 0; retval = -1; // so Coverity is happy have_retry_abort_val = false; retry_abort_val = 0xdeadbeef; have_abort_dag_val = false; abort_dag_val = -1; // so Coverity is happy have_abort_dag_return_val = false; abort_dag_return_val = -1; // so Coverity is happy _visited = false; _dfsOrder = -1; // so Coverity is happy _queuedNodeJobProcs = 0; _hasNodePriority = false; _nodePriority = 0; _noop = false; _jobTag = NULL; _jobstateSeqNum = 0; _lastEventTime = 0; varsFromDag = new List<NodeVar>; snprintf( error_text, JOB_ERROR_TEXT_MAXLEN, "unknown" ); _timesHeld = 0; _jobProcsOnHold = 0; return; }
DCStartd::DCStartd( const char* tName, const char* tPool, const char* tAddr, const char* tId ) : Daemon( DT_STARTD, tName, tPool ) { if( tAddr ) { New_addr( strnewp(tAddr) ); } // claim_id isn't initialized by Daemon's constructor, so we // have to treat it slightly differently claim_id = NULL; if( tId ) { claim_id = strnewp( tId ); } }
Starter::Starter( const Starter& s ) : Service( s ) { if( s.s_claim || s.s_pid || s.s_birthdate || s.s_port1 >= 0 || s.s_port2 >= 0 ) { EXCEPT( "Trying to copy a Starter object that's already running!" ); } if( s.s_ad ) { s_ad = new ClassAd( *(s.s_ad) ); } else { s_ad = NULL; } if( s.s_path ) { s_path = strnewp( s.s_path ); } else { s_path = NULL; } s_is_dc = s.s_is_dc; initRunData(); }
//--------------------------------------------------------------------------- void Job::PrefixDirectory(MyString &prefix) { MyString newdir; // don't add an unnecessary prefix if (prefix == ".") { return; } // If the job DIR is absolute, leave it alone if (_directory[0] == '/') { return; } // otherwise, prefix it. newdir += prefix; newdir += "/"; newdir += _directory; delete [] _directory; _directory = strnewp(newdir.Value()); }
void DCCollector::deepCopy( const DCCollector& copy ) { if( update_rsock ) { delete update_rsock; update_rsock = NULL; } /* for now, we're not going to attempt to copy the update_rsock from the copy, since i'm not sure i trust ReliSock's copy constructor to do the right thing once the original goes away... DCCollector will be able to re-create this socket for TCP updates. it's a little expensive, since we need a whole new connect(), etc, but in most cases, we're not going to be doing this very often, and correctness is more important than speed at the moment. once we have more time for testing, we can figure out if just copying the update_rsock works and TCP updates are still happy... */ use_tcp = copy.use_tcp; use_nonblocking_update = copy.use_nonblocking_update; up_type = copy.up_type; if( update_destination ) { delete [] update_destination; } update_destination = strnewp( copy.update_destination ); startTime = copy.startTime; }
//--------------------------------------------------------------------------- const char * Job::GetJobstateJobTag() { if ( !_jobTag ) { MyString jobTagName = MultiLogFiles::loadValueFromSubFile( _cmdFile, _directory, JOB_TAG_NAME ); if ( jobTagName == "" ) { jobTagName = PEGASUS_SITE; } else { // Remove double-quotes int begin = jobTagName[0] == '\"' ? 1 : 0; int last = jobTagName.Length() - 1; int end = jobTagName[last] == '\"' ? last - 1 : last; jobTagName = jobTagName.Substr( begin, end ); } MyString tmpJobTag = MultiLogFiles::loadValueFromSubFile( _cmdFile, _directory, jobTagName.Value() ); if ( tmpJobTag == "" ) { tmpJobTag = "-"; } else { // Remove double-quotes int begin = tmpJobTag[0] == '\"' ? 1 : 0; int last = tmpJobTag.Length() - 1; int end = tmpJobTag[last] == '\"' ? last - 1 : last; tmpJobTag = tmpJobTag.Substr( begin, end ); } _jobTag = strnewp( tmpJobTag.Value() ); } return _jobTag; }
StatInfo::StatInfo( const char *param_dirpath, const char *param_filename ) { this->filename = strnewp( param_filename ); this->dirpath = make_dirpath( param_dirpath ); fullpath = dircat( param_dirpath, param_filename ); stat_file( fullpath ); }
void Starter::setPath( const char* updated_path ) { if( s_path ) { delete [] s_path; } s_path = strnewp( updated_path ); }
// The GlobusSubmitEvent is now deprecated and should be removed at // some point in the future (6.9?). bool WriteGlobusSubmitEventToUserLog( ClassAd *job_ad ) { int cluster, proc; std::string contact; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing globus submit record to user logfile\n", cluster, proc ); GlobusSubmitEvent event; job_ad->LookupString( ATTR_GRID_RESOURCE, contact ); Tokenize( contact ); GetNextToken( " ", false ); event.rmContact = strnewp(GetNextToken( " ", false )); job_ad->LookupString( ATTR_GRID_JOB_ID, contact ); Tokenize( contact ); if ( strcasecmp( GetNextToken( " ", false ), "gt2" ) == 0 ) { GetNextToken( " ", false ); } event.jmContact = strnewp(GetNextToken( " ", false )); event.restartableJM = true; int rc = ulog->writeEvent(&event,job_ad); delete ulog; if (!rc) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GLOBUS_SUBMIT event\n", cluster, proc ); return false; } return true; }
void XInterface::ReadUtmp() { #if USES_UTMPX struct utmpx utmp_entry; #else struct utmp utmp_entry; #endif if ( logged_on_users ) { for (int foo =0; foo <= logged_on_users->getlast(); foo++) { delete[] (*logged_on_users)[foo]; } delete logged_on_users; } logged_on_users = new ExtArray< char * >; // fopen the Utmp. If we fail, bail... if ((utmp_fp=safe_fopen_wrapper(UtmpName,"r")) == NULL) { if ((utmp_fp=safe_fopen_wrapper(AltUtmpName,"r")) == NULL) { EXCEPT("fopen of \"%s\" (and \"%s\") failed!", UtmpName, AltUtmpName); } } while(fread((char *)&utmp_entry, #if USES_UTMPX sizeof( struct utmpx ), #else sizeof( struct utmp ), #endif 1, utmp_fp)) { if (utmp_entry.ut_type == USER_PROCESS) { bool _found_it = false; for (int i=0; (i<=logged_on_users->getlast()) && (! _found_it); i++) { if (!strcmp(utmp_entry.ut_user, (*logged_on_users)[i])) { _found_it = true; } } if (! _found_it) { dprintf(D_FULLDEBUG, "User %s is logged in.\n", utmp_entry.ut_user ); (*logged_on_users)[logged_on_users->getlast()+1] = strnewp( utmp_entry.ut_user ); } } } int fclose_ret = fclose( utmp_fp ); if( fclose_ret ) { EXCEPT("fclose of \"%s\" (or \"%s\") failed! " "This message brought to you by the fatal error %d", UtmpName, AltUtmpName, errno); } return; }
StatInfo::StatInfo( const char* dirpath, const char* filename, time_t time_access, time_t time_create, time_t time_modify, filesize_t fsize, bool is_dir, bool is_symlink ) { this->dirpath = strnewp( dirpath ); this->filename = strnewp( filename ); fullpath = dircat( dirpath, filename ); si_error = SIGood; si_errno = 0; access_time = time_access; modify_time = time_modify; create_time = time_create; valid = false; file_size = fsize; m_isDirectory = is_dir; m_isSymlink = is_symlink; }
void Job::PrefixName(const MyString &prefix) { MyString tmp = prefix + _jobName; delete[] _jobName; _jobName = strnewp(tmp.Value()); }
bool DCStarter::initFromClassAd( ClassAd* ad ) { char* tmp = NULL; if( ! ad ) { dprintf( D_ALWAYS, "ERROR: DCStarter::initFromClassAd() called with NULL ad\n" ); return false; } ad->LookupString( ATTR_STARTER_IP_ADDR, &tmp ); if( ! tmp ) { // If that's not defined, try ATTR_MY_ADDRESS ad->LookupString( ATTR_MY_ADDRESS, &tmp ); } if( ! tmp ) { dprintf( D_FULLDEBUG, "ERROR: DCStarter::initFromClassAd(): " "Can't find starter address in ad\n" ); return false; } else { if( is_valid_sinful(tmp) ) { New_addr( strnewp(tmp) ); is_initialized = true; } else { dprintf( D_FULLDEBUG, "ERROR: DCStarter::initFromClassAd(): invalid %s in ad (%s)\n", ATTR_STARTER_IP_ADDR, tmp ); } free( tmp ); tmp = NULL; } if( ad->LookupString(ATTR_VERSION, &tmp) ) { New_version( strnewp(tmp) ); free( tmp ); tmp = NULL; } return is_initialized; }
DCShadow::DCShadow( const char* tName ) : Daemon( DT_SHADOW, tName, NULL ) { is_initialized = false; shadow_safesock = NULL; if(_addr && !_name) { // We must have been given a sinful string instead of a hostname. // Just use the sinful string in place of a hostname, contrary // to the default behavior in Daemon::Daemon(). _name = strnewp(_addr); } }
//----------------------------------------------------------------------------- Script::Script( bool post, const char* cmd, Job* node ) : _post (post), _retValScript (-1), _retValJob (-1), _pid (0), _done (FALSE), _node (node) { ASSERT( cmd != NULL ); _cmd = strnewp (cmd); return; }
bool DCStartd::setClaimId( const char* id ) { if( ! id ) { return false; } if( claim_id ) { delete [] claim_id; claim_id = NULL; } claim_id = strnewp( id ); return true; }
bool Env::SetEnvWithErrorMessage( const char *nameValueExpr, MyString *error_msg ) { char *expr, *delim; int retval; if( nameValueExpr == NULL || nameValueExpr[0] == '\0' ) { return false; } // make a copy of nameValueExpr for modifying expr = strnewp( nameValueExpr ); ASSERT( expr ); // find the delimiter delim = strchr( expr, '=' ); if(delim == NULL && strstr(expr,"$$")) { // This environment entry is an unexpanded $$() macro. // We just want to keep it in the environment verbatim. SetEnv(expr,NO_ENVIRONMENT_VALUE); delete[] expr; return true; } // fail if either name or delim is missing if( expr == delim || delim == NULL ) { if(error_msg) { MyString msg; if(delim == NULL) { msg.sprintf( "ERROR: Missing '=' after environment variable '%s'.", nameValueExpr); } else { msg.sprintf("ERROR: missing variable in '%s'.",expr); } AddErrorMessage(msg.Value(),error_msg); } delete[] expr; return false; } // overwrite delim with '\0' so we have two valid strings *delim = '\0'; // do the deed retval = SetEnv( expr, delim + 1 ); delete[] expr; return retval; }
bool WriteGridSubmitEventToUserLog( ClassAd *job_ad ) { int cluster, proc; std::string contact; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing grid submit record to user logfile\n", cluster, proc ); GridSubmitEvent event; job_ad->LookupString( ATTR_GRID_RESOURCE, contact ); event.resourceName = strnewp( contact.c_str() ); job_ad->LookupString( ATTR_GRID_JOB_ID, contact ); event.jobId = strnewp( contact.c_str() ); int rc = ulog->writeEvent( &event,job_ad ); delete ulog; if ( !rc ) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GRID_SUBMIT event\n", cluster, proc ); return false; } return true; }
//----------------------------------------------------------------------------- Script::Script( bool post, const char* cmd, int deferStatus, time_t deferTime, Job* node ) : _post (post), _retValScript (-1), _retValJob (-1), _pid (0), _done (FALSE), _deferStatus (deferStatus), _deferTime (deferTime), _nextRunTime (0), _node (node) { ASSERT( cmd != NULL ); _cmd = strnewp (cmd); return; }
bool writePreSkipEvent( CondorID& condorID, Job* job, const char* DAGNodeName, const char* directory, const char *logFile ) { TmpDir tmpDir; MyString errMsg; if ( !tmpDir.Cd2TmpDir( directory, errMsg ) ) { debug_printf( DEBUG_QUIET, "Could not change to node directory %s: %s\n", directory, errMsg.Value() ); return false; } // Special HTCondorID for NOOP jobs -- actually indexed by // otherwise-unused subprocID. condorID._cluster = 0; condorID._proc = Job::NOOP_NODE_PROCID; condorID._subproc = 1+get_fake_condorID(); // Increment this value set_fake_condorID(condorID._subproc); if( job ) { job->SetCondorID( condorID ); } WriteUserLog ulog; ulog.setEnableGlobalLog( false ); ulog.setUseXML( false ); ulog.initialize( std::vector<const char*>(1,logFile), condorID._cluster, condorID._proc, condorID._subproc, NULL ); PreSkipEvent pEvent; pEvent.cluster = condorID._cluster; pEvent.proc = condorID._proc; pEvent.subproc = condorID._subproc; MyString pEventNotes("DAG Node: " ); pEventNotes += DAGNodeName; // skipEventLogNotes gets deleted in PreSkipEvent destructor. pEvent.skipEventLogNotes = strnewp( pEventNotes.Value() ); if ( !ulog.writeEvent( &pEvent ) ) { EXCEPT( "Error: writing PRESKIP event failed!" ); return false; } return true; }
Shadow::Shadow( const char* path_arg, ClassAd* ad ) { s_path = strnewp( path_arg ); s_ad = ad; s_is_dc = false; m_version_info = NULL; if ( s_ad ) { s_ad->LookupBool( ATTR_IS_DAEMON_CORE, s_is_dc ); char* version_string = NULL; if (s_ad->LookupString(ATTR_VERSION, &version_string)) { m_version_info = new CondorVersionInfo(version_string, "SHADOW", NULL); free(version_string); } } }
// The GlobusResourceDownEvent is now deprecated and should be removed at // some point in the future (6.9?). bool WriteGlobusResourceDownEventToUserLog( ClassAd *job_ad ) { int cluster, proc; std::string contact; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing globus down record to user logfile\n", cluster, proc ); GlobusResourceDownEvent event; job_ad->LookupString( ATTR_GRID_RESOURCE, contact ); if ( contact.empty() ) { // Not a Globus job, don't log the event delete ulog; return true; } Tokenize( contact ); GetNextToken( " ", false ); event.rmContact = strnewp(GetNextToken( " ", false )); int rc = ulog->writeEvent(&event,job_ad); delete ulog; if (!rc) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GLOBUS_RESOURCE_DOWN event\n", cluster, proc ); return false; } return true; }
bool WriteGridResourceDownEventToUserLog( ClassAd *job_ad ) { int cluster, proc; std::string contact; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing grid source down record to user logfile\n", cluster, proc ); GridResourceDownEvent event; job_ad->LookupString( ATTR_GRID_RESOURCE, contact ); if ( contact.empty() ) { dprintf( D_ALWAYS, "(%d.%d) %s attribute missing in job ad\n", cluster, proc, ATTR_GRID_RESOURCE ); } event.resourceName = strnewp( contact.c_str() ); int rc = ulog->writeEvent(&event,job_ad); delete ulog; if (!rc) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GRID_RESOURCE_DOWN event\n", cluster, proc ); return false; } return true; }
bool WriteGlobusSubmitFailedEventToUserLog( ClassAd *job_ad, int failure_code, const char *failure_mesg ) { int cluster, proc; char buf[1024]; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing submit-failed record to user logfile\n", cluster, proc ); GlobusSubmitFailedEvent event; snprintf( buf, 1024, "%d %s", failure_code, failure_mesg ? failure_mesg : ""); event.reason = strnewp(buf); int rc = ulog->writeEvent(&event,job_ad); delete ulog; if (!rc) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GLOBUS_SUBMIT_FAILED event\n", cluster, proc); return false; } return true; }
Shadow::Shadow( const Shadow& s ) { if( s.s_path ) { s_path = strnewp( s.s_path ); } else { s_path = NULL; } if( s.s_ad ) { s_ad = new ClassAd( *(s.s_ad) ); } else { s_ad = NULL; } s_is_dc = s.s_is_dc; char* version_string = NULL; if (s_ad && s_ad->LookupString(ATTR_VERSION, &version_string)) { m_version_info = new CondorVersionInfo(version_string, "SHADOW", NULL); free(version_string); } else { m_version_info = NULL; } }
void DCCollector::initDestinationStrings( void ) { if( update_destination ) { delete [] update_destination; update_destination = NULL; } std::string dest; // Updates will always be sent to whatever info we've got // in the Daemon object. So, there's nothing hard to do for // this... just see what useful info we have and use it. if( _full_hostname ) { dest = _full_hostname; if ( _addr) { dest += ' '; dest += _addr; } } else { if (_addr) dest = _addr; } update_destination = strnewp( dest.c_str() ); }
//----------------------------------------------------------------------------- int Script::BackgroundRun( int reaperId, int dagStatus, int failedCount ) { TmpDir tmpDir; MyString errMsg; if ( !tmpDir.Cd2TmpDir( _node->GetDirectory(), errMsg ) ) { debug_printf( DEBUG_QUIET, "Could not change to node directory %s: %s\n", _node->GetDirectory(), errMsg.Value() ); return 0; } // Construct the command line, replacing some tokens with // information about the job. All of these values would probably // be better inserted into the environment, rather than passed on // the command-line... some should be in the job's env as well... const char *delimiters = " \t"; char * token; ArgList args; char * cmd = strnewp(_cmd); for (token = strtok (cmd, delimiters) ; token != NULL ; token = strtok (NULL, delimiters)) { MyString arg; if ( !strcasecmp( token, "$JOB" ) ) { arg += _node->GetJobName(); } else if ( !strcasecmp( token, "$RETRY" ) ) { arg += _node->GetRetries(); } else if ( !strcasecmp( token, "$MAX_RETRIES" ) ) { arg += _node->GetRetryMax(); } else if ( !strcasecmp( token, "$JOBID" ) ) { if ( !_post ) { debug_printf( DEBUG_QUIET, "Warning: $JOBID macro should " "not be used as a PRE script argument!\n" ); check_warning_strictness( DAG_STRICT_1 ); arg += token; } else { arg += _node->_CondorID._cluster; arg += '.'; arg += _node->_CondorID._proc; } } else if (!strcasecmp(token, "$RETURN")) { if ( !_post ) { debug_printf( DEBUG_QUIET, "Warning: $RETURN macro should " "not be used as a PRE script argument!\n" ); check_warning_strictness( DAG_STRICT_1 ); } arg += _retValJob; } else if (!strcasecmp( token, "$PRE_SCRIPT_RETURN" ) ) { if ( !_post ) { debug_printf( DEBUG_QUIET, "Warning: $PRE_SCRIPT_RETURN macro should " "not be used as a PRE script argument!\n" ); check_warning_strictness( DAG_STRICT_1 ); } arg += _retValScript; } else if (!strcasecmp(token, "$DAG_STATUS")) { arg += dagStatus; } else if (!strcasecmp(token, "$FAILED_COUNT")) { arg += failedCount; } else if (token[0] == '$') { // This should probably be a fatal error when -strict is // implemented. debug_printf( DEBUG_QUIET, "Warning: unrecognized macro %s " "in node %s %s script arguments\n", token, _node->GetJobName(), _post ? "POST" : "PRE" ); check_warning_strictness( DAG_STRICT_1 ); arg += token; } else { arg += token; } args.AppendArg(arg.Value()); } _pid = daemonCore->Create_Process( cmd, args, PRIV_UNKNOWN, reaperId, FALSE, NULL, NULL, NULL, NULL, NULL, 0 ); delete [] cmd; if ( !tmpDir.Cd2MainDir( errMsg ) ) { debug_printf( DEBUG_QUIET, "Could not change to original directory: %s\n", errMsg.Value() ); return 0; } return _pid; }
//--------------------------------------------------------------------------- void Job::SetDagFile(const char *dagFile) { delete _dagFile; _dagFile = strnewp( dagFile ); }
//------------------------------------------------------------------------- bool fake_condor_submit( CondorID& condorID, Job* job, const char* DAGNodeName, const char* directory, const char *logFile ) { TmpDir tmpDir; MyString errMsg; if ( !tmpDir.Cd2TmpDir( directory, errMsg ) ) { debug_printf( DEBUG_QUIET, "Could not change to node directory %s: %s\n", directory, errMsg.Value() ); return false; } _subprocID++; // Special HTCondorID for NOOP jobs -- actually indexed by // otherwise-unused subprocID. condorID._cluster = 0; condorID._proc = Job::NOOP_NODE_PROCID; condorID._subproc = _subprocID; // Make sure that this job gets marked as a NOOP if( job ) { job->SetCondorID( condorID ); } WriteUserLog ulog; ulog.setEnableGlobalLog( false ); ulog.setUseXML( false ); ulog.initialize( logFile, condorID._cluster, condorID._proc, condorID._subproc, NULL ); SubmitEvent subEvent; subEvent.cluster = condorID._cluster; subEvent.proc = condorID._proc; subEvent.subproc = condorID._subproc; // We need some value for submitHost for the event to be read // correctly. subEvent.setSubmitHost( "<dummy-submit-for-noop-job>" ); MyString subEventNotes("DAG Node: " ); subEventNotes += DAGNodeName; // submitEventLogNotes get deleted in SubmitEvent destructor. subEvent.submitEventLogNotes = strnewp( subEventNotes.Value() ); if ( !ulog.writeEvent( &subEvent ) ) { EXCEPT( "Error: writing dummy submit event for NOOP node failed!" ); return false; } JobTerminatedEvent termEvent; termEvent.cluster = condorID._cluster; termEvent.proc = condorID._proc; termEvent.subproc = condorID._subproc; termEvent.normal = true; termEvent.returnValue = 0; termEvent.signalNumber = 0; if ( !ulog.writeEvent( &termEvent ) ) { EXCEPT( "Error: writing dummy terminated event for NOOP node failed!" ); return false; } return true; }