//--------------------------------------------------------------------------- int main(int argc, char *argv[]) { printf("\n"); // Set up the dprintf stuff to write to stderr, so that Condor // libraries which use it will write to the right place... dprintf_set_tool_debug("TOOL", 0); set_debug_flags(NULL, D_ALWAYS | D_NOHEADER); config(); // Initialize our Distribution object -- condor vs. hawkeye, etc. myDistro->Init( argc, argv ); // Load command-line arguments into the deepOpts and shallowOpts // structures. SubmitDagDeepOptions deepOpts; SubmitDagShallowOptions shallowOpts; // We're setting strScheddDaemonAdFile and strScheddAddressFile // here so that the classad updating feature (see gittrac #1782) // works properly. The problem is that the schedd daemon ad and // address files are normally defined relative to the $LOG value. // Because we specify a different log directory on the condor_dagman // command line, if we don't set the values here, condor_dagman // won't be able to find those files when it tries to communicate /// with the schedd. wenger 2013-03-11 shallowOpts.strScheddDaemonAdFile = param( "SCHEDD_DAEMON_AD_FILE" ); shallowOpts.strScheddAddressFile = param( "SCHEDD_ADDRESS_FILE" ); parseCommandLine(deepOpts, shallowOpts, argc, argv); int tmpResult; // Recursively run ourself on nested DAGs. We need to do this // depth-first so all of the lower-level .condor.sub files already // exist when we check for log files. if ( deepOpts.recurse ) { if ( param_boolean( "DAGMAN_USE_OLD_DAG_READER", false ) ) { fprintf( stderr, "Warning: DAGMAN_USE_OLD_DAG_READER " "is no longer supported\n" ); } tmpResult = doRecursionNew( deepOpts, shallowOpts ); if ( tmpResult != 0) { fprintf( stderr, "Recursive submit(s) failed; exiting without " "attempting top-level submit\n" ); return tmpResult; } } // Further work to get the shallowOpts structure set up properly. tmpResult = setUpOptions( deepOpts, shallowOpts ); if ( tmpResult != 0 ) return tmpResult; // Check whether the output files already exist; if so, we may // abort depending on the -f flag and whether we're running // a rescue DAG. ensureOutputFilesExist( deepOpts, shallowOpts ); // Make sure that all node jobs have log files, the files // aren't on NFS, etc. // Note that this MUST come after recursion, otherwise we'd // pass down the "preserved" values from the current .condor.sub // file. if ( deepOpts.updateSubmit ) { tmpResult = getOldSubmitFlags( shallowOpts ); if ( tmpResult != 0 ) return tmpResult; } // Write the actual submit file for DAGMan. writeSubmitFile( deepOpts, shallowOpts ); return submitDag( shallowOpts ); }
//--------------------------------------------------------------------------- int main(int argc, char *argv[]) { param_functions *p_funcs = NULL; printf("\n"); // Set up the dprintf stuff to write to stderr, so that Condor // libraries which use it will write to the right place... Termlog = true; p_funcs = get_param_functions(); dprintf_config("condor_submit_dag", p_funcs); set_debug_flags(NULL, D_ALWAYS | D_NOHEADER); config(); // Initialize our Distribution object -- condor vs. hawkeye, etc. myDistro->Init( argc, argv ); // Load command-line arguments into the deepOpts and shallowOpts // structures. SubmitDagDeepOptions deepOpts; SubmitDagShallowOptions shallowOpts; parseCommandLine(deepOpts, shallowOpts, argc, argv); int tmpResult; // Recursively run ourself on nested DAGs. We need to do this // depth-first so all of the lower-level .condor.sub files already // exist when we check for log files. if ( deepOpts.recurse ) { tmpResult = doRecursion( deepOpts, shallowOpts ); if ( tmpResult != 0) { fprintf( stderr, "Recursive submit(s) failed; exiting without " "attempting top-level submit\n" ); return tmpResult; } } // Further work to get the shallowOpts structure set up properly. tmpResult = setUpOptions( deepOpts, shallowOpts ); if ( tmpResult != 0 ) return tmpResult; // Check whether the output files already exist; if so, we may // abort depending on the -f flag and whether we're running // a rescue DAG. ensureOutputFilesExist( deepOpts, shallowOpts ); // Make sure that all node jobs have log files, the files // aren't on NFS, etc. // Note that this MUST come after recursion, otherwise we'd // pass down the "preserved" values from the current .condor.sub // file. if ( deepOpts.updateSubmit ) { tmpResult = getOldSubmitFlags( shallowOpts ); if ( tmpResult != 0 ) return tmpResult; } // Write the actual submit file for DAGMan. writeSubmitFile( deepOpts, shallowOpts ); return submitDag( shallowOpts ); }