void makeflow_clean_node(struct dag *d, struct batch_queue *queue, struct dag_node *n, int silent) { if(n->nested_job){ char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } }
void makeflow_clean_node(struct dag *d, struct batch_queue *queue, struct dag_node *n, int silent) { struct dag_file *f; list_first_item(n->target_files); while((f = list_next_item(n->target_files))) makeflow_file_clean(d, queue, f, silent); if(n->nested_job){ char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } }
void makeflow_clean(struct dag *d, struct batch_queue *queue, makeflow_clean_depth clean_depth) { struct dag_file *f; char *name; hash_table_firstkey(d->files); while(hash_table_nextkey(d->files, &name, (void **) &f)) { if(dag_file_is_source(f)) continue; int silent = 1; if(dag_file_should_exist(f)) silent = 0; if(clean_depth == MAKEFLOW_CLEAN_ALL){ makeflow_file_clean(d, queue, f, silent); } else if(set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_OUTPUTS)) { makeflow_file_clean(d, queue, f, silent); } else if(!set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_INTERMEDIATES)){ makeflow_file_clean(d, queue, f, silent); } } struct dag_node *n; for(n = d->nodes; n; n = n->next) { /* If the node is a Makeflow job, then we should recursively call the * * clean operation on it. */ if(n->nested_job) { char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } } }
int makeflow_clean(struct dag *d, struct batch_queue *queue, makeflow_clean_depth clean_depth)//, struct makeflow_wrapper *w, struct makeflow_monitor *m) { struct dag_file *f; char *name; hash_table_firstkey(d->files); while(hash_table_nextkey(d->files, &name, (void **) &f)) { int silent = 1; if(dag_file_should_exist(f)) silent = 0; /* We have a record of the file, but it is no longer created or used so delete */ if(dag_file_is_source(f) && dag_file_is_sink(f) && !set_lookup(d->inputs, f)) makeflow_clean_file(d, queue, f, silent); if(dag_file_is_source(f)) { if(f->source && (clean_depth == MAKEFLOW_CLEAN_CACHE || clean_depth == MAKEFLOW_CLEAN_ALL)) { /* this file is specified in the mountfile */ if(makeflow_clean_mount_target(f->filename)) { fprintf(stderr, "Failed to remove %s!\n", f->filename); return -1; } } continue; } if(clean_depth == MAKEFLOW_CLEAN_ALL) { makeflow_clean_file(d, queue, f, silent); } else if(set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_OUTPUTS)) { makeflow_clean_file(d, queue, f, silent); } else if(!set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_INTERMEDIATES)){ makeflow_clean_file(d, queue, f, silent); } } /* clean up the cache dir created due to the usage of mountfile */ if(clean_depth == MAKEFLOW_CLEAN_CACHE || clean_depth == MAKEFLOW_CLEAN_ALL) { if(d->cache_dir && unlink_recursive(d->cache_dir)) { fprintf(stderr, "Failed to clean up the cache dir (%s) created due to the usage of the mountfile!\n", d->cache_dir); dag_mount_clean(d); return -1; } dag_mount_clean(d); } struct dag_node *n; for(n = d->nodes; n; n = n->next) { /* If the node is a Makeflow job, then we should recursively call the * * clean operation on it. */ if(n->nested_job) { char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } } return 0; }