static int batch_fs_dryrun_putfile (struct batch_queue *q, const char *lpath, const char *rpath) { FILE *log; if ((log = fopen(q->logfile, "a"))) { char *escaped_lpath = string_escape_shell(lpath); char *escaped_rpath = string_escape_shell(rpath); fprintf(log, "cp %s %s\n", escaped_lpath, escaped_rpath); free(escaped_lpath); free(escaped_rpath); fclose(log); return 0; } else { return -1; } }
static batch_job_id_t batch_job_dryrun_submit (struct batch_queue *q, const char *cmd, const char *extra_input_files, const char *extra_output_files, struct jx *envlist, const struct rmsummary *resources ) { FILE *log; char *escaped_cmd; char *env_assignment; char *escaped_env_assignment; struct batch_job_info *info; batch_job_id_t jobid = random(); fflush(NULL); debug(D_BATCH, "started dry run of job %" PRIbjid ": %s", jobid, cmd); if ((log = fopen(q->logfile, "a"))) { if (!(info = calloc(sizeof(*info), 1))) { fclose(log); return -1; } info->submitted = time(0); info->started = time(0); itable_insert(q->job_table, jobid, info); if(envlist && jx_istype(envlist, JX_OBJECT) && envlist->u.pairs) { struct jx_pair *p; fprintf(log, "env "); for(p=envlist->u.pairs;p;p=p->next) { if(p->key->type==JX_STRING && p->value->type==JX_STRING) { env_assignment = string_format("%s=%s", p->key->u.string_value,p->value->u.string_value); escaped_env_assignment = string_escape_shell(env_assignment); fprintf(log, "%s", escaped_env_assignment); fprintf(log, " "); free(env_assignment); free(escaped_env_assignment); } } } escaped_cmd = string_escape_shell(cmd); fprintf(log, "sh -c %s\n", escaped_cmd); free(escaped_cmd); fclose(log); return jobid; } else { return -1; } }
static int batch_fs_dryrun_unlink (struct batch_queue *q, const char *path) { FILE *log; if ((log = fopen(q->logfile, "a"))) { char *escaped_path = string_escape_shell(path); fprintf(log, "rm -r %s\n", escaped_path); free(escaped_path); fclose(log); return 0; } else { return -1; } }
static int batch_fs_dryrun_chdir (struct batch_queue *q, const char *path) { FILE *log; if ((log = fopen(q->logfile, "a"))) { char *escaped_path = string_escape_shell(path); batch_queue_set_option(q, "cwd", xxstrdup(path)); fprintf(log, "cd %s\n", escaped_path); fclose(log); free(escaped_path); return 0; } else { return -1; } }
static int batch_fs_dryrun_mkdir (struct batch_queue *q, const char *path, mode_t mode, int recursive) { FILE *log; if ((log = fopen(q->logfile, "a"))) { char *escaped_path = string_escape_shell(path); if (recursive) { fprintf(log, "mkdir -p -m %d %s\n", mode, escaped_path); } else { fprintf(log, "mkdir -m %d %s\n", mode, escaped_path); } fclose(log); free(escaped_path); return 0; } else { return -1; } }
static int batch_fs_dryrun_stat (struct batch_queue *q, const char *path, struct stat *buf) { struct stat dummy; FILE *log; if ((log = fopen(q->logfile, "a"))) { char *escaped_path = string_escape_shell(path); // Since Makeflow only calls stat *after* a file has been created, // add a test here as a sanity check. If Makeflow e.g. tries to stat // files before running rules to create them, these tests will // cripple the shell script representation. fprintf(log, "test -e %s\n", escaped_path); free(escaped_path); fclose(log); dummy.st_size = 1; memcpy(buf, &dummy, sizeof(dummy)); return 0; } else { return -1; } }
static int node_submit( void * instance_struct, struct dag_node *n, struct batch_task *t){ struct vc3_definition *v = (struct vc3_definition*)instance_struct; struct batch_wrapper *wrapper = batch_wrapper_create(); batch_wrapper_prefix(wrapper, "./vc3_builder_"); char * executable = NULL; // If the queue supports remote_renaming add as remote rename. if (batch_queue_supports_feature(makeflow_get_queue(n), "remote_rename")) { executable = string_format("./%s", path_basename(v->exe)); } else { // Else just use executable in path executable = string_format("%s", v->exe); } /* Assumes a /disk dir in the image. */ char *log = string_format("%s_%d", v->log, t->taskid); char *task_cmd = string_escape_shell(t->command); char *cmd = string_format("%s --home $PWD %s -- %s > %s", executable, v->opt, task_cmd, log); makeflow_hook_add_input_file(n->d, t, v->exe, executable, DAG_FILE_TYPE_GLOBAL); makeflow_hook_add_output_file(n->d, t, log, log, DAG_FILE_TYPE_INTERMEDIATE); free(log); free(executable); free(task_cmd); batch_wrapper_cmd(wrapper, cmd); free(cmd); cmd = batch_wrapper_write(wrapper, t); if(cmd){ batch_task_set_command(t, cmd); struct dag_file *df = makeflow_hook_add_input_file(n->d, t, cmd, cmd, DAG_FILE_TYPE_TEMP); debug(D_MAKEFLOW_HOOK, "Wrapper written to %s", df->filename); makeflow_log_file_state_change(n->d, df, DAG_FILE_STATE_EXISTS); } else { debug(D_MAKEFLOW_HOOK, "Failed to create wrapper: errno %d, %s", errno, strerror(errno)); return MAKEFLOW_HOOK_FAILURE; } free(cmd); return MAKEFLOW_HOOK_SUCCESS; }
char * string_wrap_command( const char *command, const char *wrapper_command ) { if(!wrapper_command) return xxstrdup(command); char * braces = strstr(wrapper_command,"{}"); char * square = strstr(wrapper_command,"[]"); char * new_command; if(braces) { new_command = xxstrdup(command); } else { new_command = string_escape_shell(command); } char * result = malloc(strlen(new_command)+strlen(wrapper_command)+16); if(braces) { strcpy(result,wrapper_command); result[braces-wrapper_command] = 0; strcat(result,new_command); strcat(result,braces+2); } else if(square) { strcpy(result,wrapper_command); result[square-wrapper_command] = 0; strcat(result,new_command); strcat(result,square+2); } else { strcpy(result,wrapper_command); strcat(result," /bin/sh -c "); strcat(result,new_command); } free(new_command); return result; }