/* Returns the AVD's content path, i.e. the directory that contains * the AVD's content files (e.g. data partition, cache, sd card, etc...). * * We extract this by parsing the root config .ini file, looking for * a "path" elements. */ static int _avdInfo_getContentPath( AvdInfo* i ) { char temp[PATH_MAX], *p=temp, *end=p+sizeof(temp); i->contentPath = iniFile_getString(i->rootIni, ROOT_ABS_PATH_KEY, NULL); if (i->contentPath == NULL) { derror("bad config: %s", "virtual device file lacks a "ROOT_ABS_PATH_KEY" entry"); return -1; } if (!path_is_dir(i->contentPath)) { // If the absolute path doesn't match an actual directory, try // the relative path if present. const char* relPath = iniFile_getString(i->rootIni, ROOT_REL_PATH_KEY, NULL); if (relPath != NULL) { p = bufprint_config_path(temp, end); p = bufprint(p, end, PATH_SEP "%s", relPath); if (p < end && path_is_dir(temp)) { AFREE(i->contentPath); i->contentPath = ASTRDUP(temp); } } } D("virtual device content at %s", i->contentPath); return 0; }
static void command_done (CommandData *cdata) { if (cdata == NULL) return; if ((cdata->temp_dir != NULL) && path_is_dir (cdata->temp_dir)) { char *argv[4]; argv[0] = "rm"; argv[1] = "-rf"; argv[2] = cdata->temp_dir; argv[3] = NULL; g_spawn_sync (g_get_tmp_dir (), argv, NULL, G_SPAWN_SEARCH_PATH, NULL, NULL, NULL, NULL, NULL, NULL); } g_free (cdata->command); if (cdata->app != NULL) g_object_unref (cdata->app); path_list_free (cdata->file_list); g_free (cdata->temp_dir); if (cdata->process != NULL) g_object_unref (cdata->process); CommandList = g_list_remove (CommandList, cdata); g_free (cdata); }
/* Return the content based ID for a node. * This includes : * command * input files (content) * output files (name) : * important addition as changed expected outputs may not * be reflected in the command and not present in archive * LATER : environment variables (name:value) * returns a string the caller needs to free **/ char * batch_task_generate_id(struct batch_task *t) { if(t->hash) free(t->hash); unsigned char *hash = xxcalloc(1, sizeof(char *)*SHA1_DIGEST_LENGTH); struct batch_file *f; sha1_context_t context; sha1_init(&context); /* Add command to the archive id */ sha1_update(&context, "C", 1); sha1_update(&context, t->command, strlen(t->command)); sha1_update(&context, "\0", 1); /* Sort inputs for consistent hashing */ list_sort(t->input_files, batch_file_outer_compare); /* add checksum of the node's input files together */ struct list_cursor *cur = list_cursor_create(t->input_files); for(list_seek(cur, 0); list_get(cur, (void**)&f); list_next(cur)) { char * file_id; if(path_is_dir(f->inner_name) == 1){ f->hash = batch_file_generate_id_dir(f->outer_name); file_id = xxstrdup(f->hash); } else{ file_id = batch_file_generate_id(f); } sha1_update(&context, "I", 1); sha1_update(&context, f->outer_name, strlen(f->outer_name)); sha1_update(&context, "C", 1); sha1_update(&context, file_id, strlen(file_id)); sha1_update(&context, "\0", 1); free(file_id); } list_cursor_destroy(cur); /* Sort outputs for consistent hashing */ list_sort(t->output_files, batch_file_outer_compare); /* add checksum of the node's output file names together */ cur = list_cursor_create(t->output_files); for(list_seek(cur, 0); list_get(cur, (void**)&f); list_next(cur)) { sha1_update(&context, "O", 1); sha1_update(&context, f->outer_name, strlen(f->outer_name)); sha1_update(&context, "\0", 1); } list_cursor_destroy(cur); sha1_final(hash, &context); t->hash = xxstrdup(sha1_string(hash)); free(hash); return xxstrdup(t->hash); }
int main(void) { char cwd[1024], *path, *ctx = tal_strdup(NULL, "ctx"); plan_tests(6); if (!getcwd(cwd, sizeof(cwd))) abort(); unlink("run-is_dir-dir-link"); unlink("run-is_dir-file-link"); unlink("run-is_dir-dir/file"); rmdir("run-is_dir-dir"); if (mkdir("run-is_dir-dir", 0700) != 0) abort(); if (symlink("run-is_dir-dir", "run-is_dir-dir-link") != 0) abort(); if (symlink("run-is_dir-dir/file", "run-is_dir-file-link") != 0) abort(); close(open("run-is_dir-dir/file", O_WRONLY|O_CREAT, 0600)); ok1(path_is_dir("run-is_dir-dir-link")); ok1(!path_is_dir("run-is_dir-file-link")); ok1(!path_is_dir("run-is_dir-dir/file")); ok1(path_is_dir("run-is_dir-dir")); path = path_join(ctx, cwd, "run-is_dir-dir/file"); ok1(!path_is_dir(path)); ok1(path_is_dir(cwd)); tal_free(ctx); return exit_status(); }
static char* _getAvdContentPath(const char* avdName) { char temp[PATH_MAX], *p=temp, *end=p+sizeof(temp); IniFile* ini = NULL; char* iniPath = path_getRootIniPath(avdName); char* avdPath = NULL; if (iniPath != NULL) { ini = iniFile_newFromFile(iniPath); AFREE(iniPath); } if (ini == NULL) { APANIC("Could not open: %s\n", iniPath == NULL ? avdName : iniPath); } avdPath = iniFile_getString(ini, ROOT_ABS_PATH_KEY, NULL); if (!path_is_dir(avdPath)) { // If the absolute path doesn't match an actual directory, try // the relative path if present. const char* relPath = iniFile_getString(ini, ROOT_REL_PATH_KEY, NULL); if (relPath != NULL) { p = bufprint_config_path(temp, end); p = bufprint(p, end, PATH_SEP "%s", relPath); if (p < end && path_is_dir(temp)) { AFREE(avdPath); avdPath = ASTRDUP(temp); } } } iniFile_free(ini); return avdPath; }
/* Copy a file to the s3 bucket*/ static int makeflow_archive_s3_file(struct archive_instance *a, char *batchID, char *file_path){ // Copy to s3 archive struct timeval start_time; struct timeval end_time; char *fileCopy; FILE *fp; //Tar directories before submitting them to s3 bucket if(path_is_dir(file_path) != 1){ fp = fopen(file_path,"rb"); } else{ char *tarDir = string_format("tar -czvf %s.tar.gz -C %s .",file_path,file_path); if(system(tarDir) != 0){ free(tarDir); return 0; } free(tarDir); fileCopy = string_format("%s.tar.gz",file_path); fp = fopen(fileCopy,"rb"); free(fileCopy); } gettimeofday(&start_time, NULL); if(s3_put(fp,batchID) != 0){ gettimeofday(&end_time,NULL); float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0; total_up_time += run_time; debug(D_MAKEFLOW_HOOK," It took %f seconds for %s to fail uploading to %s",run_time, batchID, a->s3_dir); debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time); return 0; } gettimeofday(&end_time,NULL); float run_time = ((end_time.tv_sec*1000000 + end_time.tv_usec) - (start_time.tv_sec*1000000 + start_time.tv_usec)) / 1000000.0; total_up_time += run_time; hash_table_insert(s3_files_in_archive, batchID, batchID); fclose(fp); printf("Upload %s to %s/%s\n",file_path, a->s3_dir, batchID); debug(D_MAKEFLOW_HOOK," It took %f second(s) for %s to upload to %s\n",run_time, batchID, a->s3_dir); debug(D_MAKEFLOW_HOOK," The total upload time is %f second(s)",total_up_time); return 1; }
/** * Remove a hide-file in all branches up to maxbranch * If maxbranch == -1, try to delete it in all branches. */ int remove_hidden(const char *path, int maxbranch) { DBG("%s\n", path); if (!uopt.cow_enabled) RETURN(0); if (maxbranch == -1) maxbranch = uopt.nbranches; int i; for (i = 0; i <= maxbranch; i++) { char p[PATHLEN_MAX]; if (BUILD_PATH(p, uopt.branches[i].path, METADIR, path)) RETURN(-ENAMETOOLONG); if (strlen(p) + strlen(HIDETAG) > PATHLEN_MAX) RETURN(-ENAMETOOLONG); strcat(p, HIDETAG); // TODO check length switch (path_is_dir(p)) { case IS_FILE: unlink(p); break; case IS_DIR: rmdir(p); break; case NOT_EXISTING: continue; } } RETURN(0); }
int trace_add_files_from_proc(unsigned int process, pid_t tid, const char *binary) { FILE *fp; char dummy; char *line = NULL; size_t length = 0; char previous_path[4096] = ""; const char *const fmt = "/proc/%d/maps"; int len = snprintf(&dummy, 1, fmt, tid); char *procfile = malloc(len + 1); snprintf(procfile, len + 1, fmt, tid); /* Loops on lines * Format: * 08134000-0813a000 rw-p 000eb000 fe:00 868355 /bin/bash * 0813a000-0813f000 rw-p 00000000 00:00 0 * b7721000-b7740000 r-xp 00000000 fe:00 901950 /lib/ld-2.18.so * bfe44000-bfe65000 rw-p 00000000 00:00 0 [stack] */ #ifdef DEBUG_PROC_PARSER log_info(tid, "parsing %s", procfile); #endif fp = fopen(procfile, "r"); free(procfile); while((line = read_line(line, &length, fp)) != NULL) { unsigned long int addr_start, addr_end; char perms[5]; unsigned long int offset; unsigned int dev_major, dev_minor; unsigned long int inode; char pathname[4096]; sscanf(line, "%lx-%lx %4s %lx %x:%x %lu %s", &addr_start, &addr_end, perms, &offset, &dev_major, &dev_minor, &inode, pathname); #ifdef DEBUG_PROC_PARSER log_info(tid, "proc line:\n" " addr_start: %lx\n" " addr_end: %lx\n" " perms: %s\n" " offset: %lx\n" " dev_major: %x\n" " dev_minor: %x\n" " inode: %lu\n" " pathname: %s", addr_start, addr_end, perms, offset, dev_major, dev_minor, inode, pathname); #endif if(inode > 0) { if(strncmp(pathname, binary, 4096) != 0 && strncmp(previous_path, pathname, 4096) != 0) { #ifdef DEBUG_PROC_PARSER log_info(tid, " adding to database"); #endif if(db_add_file_open(process, pathname, FILE_READ, path_is_dir(pathname)) != 0) return -1; strncpy(previous_path, pathname, 4096); } } } fclose(fp); return 0; }
/** * unionfs rename function * TODO: If we rename a directory on a read-only branch, we need to copy over * all files to the renamed directory on the read-write branch. */ static int unionfs_rename(const char *from, const char *to) { DBG("from %s to %s\n", from, to); bool is_dir = false; // is 'from' a file or directory int j = find_rw_branch_cutlast(to); if (j == -1) RETURN(-errno); int i = find_rorw_branch(from); if (i == -1) RETURN(-errno); if (!uopt.branches[i].rw) { i = find_rw_branch_cow_common(from, true); if (i == -1) RETURN(-errno); } if (i != j) { USYSLOG(LOG_ERR, "%s: from and to are on different writable branches %d vs %d, which" "is not supported yet.\n", __func__, i, j); RETURN(-EXDEV); } char f[PATHLEN_MAX], t[PATHLEN_MAX]; if (BUILD_PATH(f, uopt.branches[i].path, from)) RETURN(-ENAMETOOLONG); if (BUILD_PATH(t, uopt.branches[i].path, to)) RETURN(-ENAMETOOLONG); filetype_t ftype = path_is_dir(f); if (ftype == NOT_EXISTING) RETURN(-ENOENT); else if (ftype == IS_DIR) is_dir = true; int res; if (!uopt.branches[i].rw) { // since original file is on a read-only branch, we copied the from file to a writable branch, // but since we will rename from, we also need to hide the from file on the read-only branch if (is_dir) res = hide_dir(from, i); else res = hide_file(from, i); if (res) RETURN(-errno); } res = rename(f, t); if (res == -1) { int err = errno; // unlink() might overwrite errno // if from was on a read-only branch we copied it, but now rename failed so we need to delete it if (!uopt.branches[i].rw) { if (unlink(f)) USYSLOG(LOG_ERR, "%s: cow of %s succeeded, but rename() failed and now " "also unlink() failed\n", __func__, from); if (remove_hidden(from, i)) USYSLOG(LOG_ERR, "%s: cow of %s succeeded, but rename() failed and now " "also removing the whiteout failed\n", __func__, from); } RETURN(-err); } if (uopt.branches[i].rw) { // A lower branch still *might* have a file called 'from', we need to delete this. // We only need to do this if we have been on a rw-branch, since we created // a whiteout for read-only branches anyway. if (is_dir) maybe_whiteout(from, i, WHITEOUT_DIR); else maybe_whiteout(from, i, WHITEOUT_FILE); } remove_hidden(to, i); // remove hide file (if any) RETURN(0); }
/* Initialize application data. */ static void initialize_data (void) { char *current_dir; char *path, *filename; int i = 0; convert_to_new_comment_system (); create_default_categories_if_needed (); eel_gconf_monitor_add ("/apps/gthumb"); gth_monitor = gth_monitor_new (); /* Icon theme */ icon_theme = gtk_icon_theme_get_default (); g_signal_connect (icon_theme, "changed", G_CALLBACK (theme_changed_cb), NULL); /* Default windows icon */ init_icon_pixbufs (); g_set_application_name (_("gThumb")); gtk_window_set_default_icon_name ("gthumb"); /**/ init_session ("gthumb"); if (session_is_restored ()) return; /* Parse command line arguments. */ if (remaining_args == NULL) { /* No arguments specified. */ reset_command_line_catalog (); return; } current_dir = g_get_current_dir (); while ((filename = remaining_args[i++]) != NULL) { char *tmp1 = NULL; gboolean is_dir; if (uri_has_scheme (filename) || g_path_is_absolute (filename)) tmp1 = gnome_vfs_make_uri_from_shell_arg (filename); else tmp1 = g_strconcat (current_dir, "/", filename, NULL); path = remove_special_dirs_from_path (tmp1); g_free (tmp1); if (path_is_dir (path)) is_dir = TRUE; else if (path_is_file (path)) is_dir = FALSE; else { g_free (path); continue; } if (is_dir) { dir_urls = g_list_prepend (dir_urls, add_scheme_if_absent (path)); g_free (path); } else file_urls = g_list_prepend (file_urls, path); } n_file_urls = g_list_length (file_urls); n_dir_urls = g_list_length (dir_urls); if (n_file_urls == 1) view_single_image = TRUE; if (n_file_urls > 1) { /* Create a catalog with the command line list. */ Catalog *catalog; char *catalog_path; GList *scan; catalog = catalog_new (); catalog_path = get_command_line_catalog_path (); catalog_set_path (catalog, catalog_path); g_free (catalog_path); for (scan = file_urls; scan; scan = scan->next) catalog_add_item (catalog, scan->data); catalog->sort_method = GTH_SORT_METHOD_MANUAL; catalog_write_to_disk (catalog, NULL); catalog_free (catalog); view_comline_catalog = TRUE; } else reset_command_line_catalog (); g_free (current_dir); }
static void add_monitor_event (GthMonitor *monitor, GnomeVFSMonitorEventType event_type, const char *path, GList **monitor_events) { MonitorEventType type; char *op; if (!monitor->priv->monitor_enabled) return; if (event_type == GNOME_VFS_MONITOR_EVENT_CREATED) op = "CREATED"; else if (event_type == GNOME_VFS_MONITOR_EVENT_DELETED) op = "DELETED"; else op = "CHANGED"; debug (DEBUG_INFO, "[%s] %s", op, path); if (event_type == GNOME_VFS_MONITOR_EVENT_CREATED) { if (path_is_file (path)) type = MONITOR_EVENT_FILE_CREATED; else if (path_is_dir (path)) type = MONITOR_EVENT_DIR_CREATED; else return; } else if (event_type == GNOME_VFS_MONITOR_EVENT_DELETED) { if (file_is_image_video_or_audio (path, TRUE)) type = MONITOR_EVENT_FILE_DELETED; else type = MONITOR_EVENT_DIR_DELETED; } else { if (path_is_file (path)) type = MONITOR_EVENT_FILE_CHANGED; else return; } if (type == MONITOR_EVENT_FILE_CREATED) { if (remove_if_present (monitor_events, MONITOR_EVENT_FILE_DELETED, path)) type = MONITOR_EVENT_FILE_CHANGED; } else if (type == MONITOR_EVENT_FILE_DELETED) { remove_if_present (monitor_events, MONITOR_EVENT_FILE_CREATED, path); remove_if_present (monitor_events, MONITOR_EVENT_FILE_CHANGED, path); } else if (type == MONITOR_EVENT_FILE_CHANGED) { remove_if_present (monitor_events, MONITOR_EVENT_FILE_CHANGED, path); } else if (type == MONITOR_EVENT_DIR_CREATED) { remove_if_present (monitor_events, MONITOR_EVENT_DIR_DELETED, path); } else if (type == MONITOR_EVENT_DIR_DELETED) remove_if_present (monitor_events, MONITOR_EVENT_DIR_CREATED, path); monitor_events[type] = g_list_append (monitor_events[type], g_strdup (path)); }
int makeflow_archive_copy_preserved_files(struct archive_instance *a, struct batch_task *t, char *task_path ) { struct batch_file *f; struct stat buf; struct list_cursor *cur = list_cursor_create(t->output_files); // Iterate through output files for(list_seek(cur, 0); list_get(cur, (void**)&f); list_next(cur)) { char *file_name = xxstrdup(f->outer_name); debug(D_MAKEFLOW_HOOK,"Trying to copy file to %s",file_name); char *file_to_check = xxstrdup(file_name); //Check to see if the directory was copied as an empty file/incorrectly stat(dirname(file_to_check),&buf); if(S_ISREG(buf.st_mode)){ debug(D_MAKEFLOW,"Removing empty file in the place of directory name %s",file_to_check); char *dirEmpty = string_format("rm -rf %s",file_to_check); system(dirEmpty); free(dirEmpty); } free(file_to_check); // Gets path of output file char *output_file_path = string_format("%s/output_files/%s",task_path,basename(file_name)); char *directory_name = xxstrdup(file_name); debug(D_MAKEFLOW_HOOK,"Creating directory %s",dirname(directory_name)); if(strcmp(directory_name,file_name) != 0){ //Create the upper level directory to copy the output files into if necessary if (!create_dir(directory_name, 0777) && errno != EEXIST){ debug(D_ERROR|D_MAKEFLOW_HOOK,"Failed to create directory %s",directory_name); free(directory_name); free(output_file_path); free(file_name); return 1; } } free(directory_name); // Copy output file or directory over to specified location if(path_is_dir(output_file_path) != 1){ int success = copy_file_to_file(output_file_path, file_name); free(output_file_path); free(file_name); if (!success) { list_cursor_destroy(cur); debug(D_ERROR|D_MAKEFLOW_HOOK,"Failed to copy output file %s to %s\n", output_file_path, file_name); return 1; } } else{ if(copy_dir(output_file_path, file_name) != 0){ list_cursor_destroy(cur); debug(D_ERROR|D_MAKEFLOW_HOOK,"Failed to copy output file %s to %s\n", output_file_path, file_name); free(output_file_path); free(file_name); return 1; } free(output_file_path); free(file_name); } } list_cursor_destroy(cur); return 0; }
/* Archive the specified file. * This includes several steps: * 1. Generate the id * 2. Copy file to id if non-existent * 3. Link back to creating task * @return 0 if successfully archived, 1 if failed at any point. */ static int makeflow_archive_file(struct archive_instance *a, struct batch_file *f, char *job_file_archive_path) { /* Generate the file archive id (content based) if does not exist. */ char * id; if(path_is_dir(f->inner_name) == 1){ f->hash = batch_file_generate_id_dir(f->inner_name); id = xxstrdup(f->hash); } else{ id = batch_file_generate_id(f); } struct stat buf; int rv = 0; char * file_archive_dir = string_format("%s/files/%.2s", a->dir, id); char * file_archive_path = string_format("%s/%s", file_archive_dir, id); char * job_file_archive_dir = NULL; /* Create the archive path with 2 character prefix. */ if (!create_dir(file_archive_dir, 0777) && errno != EEXIST){ debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create file archiving directory %s: %d %s\n", file_archive_dir, errno, strerror(errno)); rv = 1; goto FAIL; } /* Check if file is already archived */ if(stat(file_archive_path, &buf) >= 0) { debug(D_MAKEFLOW_HOOK, "file %s already archived at %s", f->outer_name, file_archive_path); /* File did not already exist, store in general file area */ } else { if(path_is_dir(f->outer_name) != 1){ if (!copy_file_to_file(f->outer_name, file_archive_path)){ debug(D_ERROR|D_MAKEFLOW_HOOK, "could not archive output file %s at %s: %d %s\n", f->outer_name, file_archive_path, errno, strerror(errno)); rv = 1; goto FAIL; } } else{ debug(D_MAKEFLOW,"COPYING %s to the archive",f->outer_name); if(copy_dir(f->outer_name,file_archive_path) != 0){ debug(D_ERROR|D_MAKEFLOW_HOOK, "could not archive output file %s at %s: %d %s\n", f->outer_name, file_archive_path, errno, strerror(errno)); rv = 1; goto FAIL; } } } /* Create the directory structure for job_file_archive. */ job_file_archive_dir = xxstrdup(job_file_archive_path); path_dirname(job_file_archive_path, job_file_archive_dir); if (!create_dir(job_file_archive_dir, 0777) && errno != EEXIST){ debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create job file directory %s: %d %s\n", file_archive_dir, errno, strerror(errno)); rv = 1; goto FAIL; } if(a->s3){ int result = 1; // Check to see if file already exists in the s3 bucket if(a->s3_check){ if(!in_s3_archive(a,id)){ result = makeflow_archive_s3_file(a,id,file_archive_path); } } else result = makeflow_archive_s3_file(a,id,file_archive_path); /* Copy file to the s3 bucket*/ if(!result){ debug(D_ERROR|D_MAKEFLOW_HOOK, "could not copy file %s to s3 bucket: %d %s\n", id, errno, strerror(errno)); rv = 1; goto FAIL; } } free(file_archive_path); file_archive_path = string_format("../../../../files/%.2s/%s", id, id); /* Create a symlink to task that used/created this file. */ int symlink_failure = symlink(file_archive_path, job_file_archive_path); if (symlink_failure && errno != EEXIST) { debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create symlink %s pointing to %s: %d %s\n", job_file_archive_path, file_archive_path, errno, strerror(errno)); rv = 1; goto FAIL; } FAIL: free(id); free(file_archive_dir); free(file_archive_path); free(job_file_archive_dir); return rv; }
/* Write the task and run info to the task directory * These files are hardcoded to task_info and run_info */ static int makeflow_archive_write_task_info(struct archive_instance *a, struct dag_node *n, struct batch_task *t, char *archive_path) { struct batch_file *f; /* task_info : * COMMAND: Tasks command that was run * SRC_COMMAND: Origin node's command for reference * SRC_LINE: Line of origin node in SRC_MAKEFLOW * SRC_MAKEFLOW: ID of file for the original Makeflow stored in archive * INPUT_FILES: Alphabetic list of input files checksum IDs * OUTPUT_FILES: Alphabetic list of output file inner_names */ struct jx *task_jx = jx_object(NULL); jx_insert(task_jx, jx_string("COMMAND"), jx_string(t->command)); jx_insert(task_jx, jx_string("SRC_COMMAND"), jx_string(n->command)); jx_insert(task_jx, jx_string("SRC_LINE"), jx_integer(n->linenum)); jx_insert(task_jx, jx_string("SRC_MAKEFLOW"), jx_string(a->source_makeflow)); struct jx * input_files = jx_object(NULL); struct list_cursor *cur = list_cursor_create(t->input_files); for(list_seek(cur, 0); list_get(cur, (void**)&f); list_next(cur)) { /* Generate the file archive id (content based) if does not exist. */ char * id; if(path_is_dir(f->inner_name) == 1){ f->hash = batch_file_generate_id_dir(f->inner_name); id = xxstrdup(f->hash); } else{ id = batch_file_generate_id(f); } jx_insert(input_files, jx_string(f->inner_name), jx_string(id)); free(id); } list_cursor_destroy(cur); jx_insert(task_jx, jx_string("INPUT_FILES"), input_files); struct jx * output_files = jx_object(NULL); cur = list_cursor_create(t->output_files); for(list_seek(cur, 0); list_get(cur, (void**)&f); list_next(cur)) { /* Generate the file archive id (content based) if does not exist. */ char * id; if(path_is_dir(f->inner_name) == 1){ f->hash = batch_file_generate_id_dir(f->inner_name); id = xxstrdup(f->hash); } else{ id = batch_file_generate_id(f); } jx_insert(output_files, jx_string(f->inner_name), jx_string(id)); free(id); } list_cursor_destroy(cur); jx_insert(task_jx, jx_string("OUTPUT_FILES"), output_files); char *task_info = string_format("%s/task_info", archive_path); FILE *fp = fopen(task_info, "w"); if (fp == NULL) { free(task_info); debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create task_info for node %d archive", n->nodeid); return 0; } else { jx_pretty_print_stream(task_jx, fp); } fclose(fp); free(task_info); jx_delete(task_jx); /* run_info : * SUBMITTED : Time task was submitted * STARTED : Time task was started * FINISHED : Time task was completed * EXIT_NORMALLY : 0 if abnormal exit, 1 is normal * EXIT_CODE : Task's exit code * EXIT_SIGNAL : Int value of signal if occurred */ struct jx * run_jx = jx_object(NULL); jx_insert(run_jx, jx_string("SUBMITTED"), jx_integer(t->info->submitted)); jx_insert(run_jx, jx_string("STARTED"), jx_integer(t->info->started)); jx_insert(run_jx, jx_string("FINISHED"), jx_integer(t->info->finished)); jx_insert(run_jx, jx_string("EXIT_NORMAL"), jx_integer(t->info->exited_normally)); jx_insert(run_jx, jx_string("EXIT_CODE"), jx_integer(t->info->exit_code)); jx_insert(run_jx, jx_string("EXIT_SIGNAL"), jx_integer(t->info->exit_signal)); task_info = string_format("%s/run_info", archive_path); fp = fopen(task_info, "w"); if (fp == NULL) { free(task_info); debug(D_ERROR|D_MAKEFLOW_HOOK, "could not create run_info for node %d archive", n->nodeid); return 0; } else { jx_pretty_print_stream(run_jx, fp); } fclose(fp); free(task_info); jx_delete(run_jx); return 1; }