bool conf_set_value_in_file(const char *path, const char *key, const char *value, char **errmsg) { FILE *infile, *outfile; char *outpath; char buf[10000]; bool found; const struct conf_item *item; item = find_conf(key); if (!item) { *errmsg = format("unknown configuration option \"%s\"", key); return false; } infile = fopen(path, "r"); if (!infile) { *errmsg = format("%s: %s", path, strerror(errno)); return false; } outpath = format("%s.tmp", path); outfile = create_tmp_file(&outpath, "w"); if (!outfile) { *errmsg = format("%s: %s", outpath, strerror(errno)); free(outpath); fclose(infile); return false; } found = false; while (fgets(buf, sizeof(buf), infile)) { char *errmsg2, *key2, *value2; bool ok; ok = parse_line(buf, &key2, &value2, &errmsg2); if (ok && key2 && str_eq(key2, key)) { found = true; fprintf(outfile, "%s = %s\n", key, value); } else { fputs(buf, outfile); } free(key2); free(value2); } if (!found) { fprintf(outfile, "%s = %s\n", key, value); } fclose(infile); fclose(outfile); if (x_rename(outpath, path) != 0) { *errmsg = format("rename %s to %s: %s", outpath, path, strerror(errno)); return false; } free(outpath); return true; }
int fsck_test(char *dev_file, int* part_list){ int result = 0; int i = 0; int size = sizeof( part_list ) / sizeof( int ); char * cmd = NULL; FILE * tmp_file = NULL; char * tmp_file_name = NULL; char r_buffer[101]; tmp_file_name = create_tmp_file(&tmp_file); if (tmp_file_name){ for (i = 0; i < size && part_list[i] > 0; i++ ){ cmd = (char *)calloc(strlen(dev_file) + 100, sizeof(char)); sprintf (cmd, "fsck %s%d", dev_file, part_list[i]); execute_command (cmd, tmp_file_name); while (!feof(tmp_file)) { if (!fgets (r_buffer, 100, tmp_file)){ if (strstr(r_buffer, "files") != NULL && strstr(r_buffer, "clusters") != NULL){ result = 1; } } } free(cmd); } } return result; }
void execute_command_create_output (char * cmd, FILE ** tmp_file) { char * tmp_file_name = NULL; tmp_file_name = create_tmp_file(tmp_file); execute_command (cmd, tmp_file_name); }
int retrieve_common(struct file_system_info *fsinfo, struct stats *stats, int repo){ #define retrieve_common_finish(value){ \ unlock(file_mutex[repo][stats->rev]); \ gstrdel(file); \ gstrdel(revision); \ free(temp); \ return value; \ } char *file = NULL; char *revision = calloc(20, sizeof(char)); struct stat *temp = single(struct stat); debug(2, "Received file %s from repo %d;\n", stats->path, repo); lock(file_mutex[repo][stats->rev]); node_t *node = add_file(open_files, stats->path, stats->rev); if (node->count > 0){ node->count++; retrieve_common_finish(0); }; if (create_tmp_file(stats, node) == -1) retrieve_common_finish(-1); if (gmstrcpy(&file, fsinfo->repos[repo], "/", stats->internal, 0) == -1) retrieve_common_finish(-1); sprintf(revision, "%dB", stats->rev); if (retrieve_rdiff(revision, file, node->tmp_path) != 0) retrieve_common_finish(-1); if (stat(node->tmp_path, temp) != 0 || temp->st_size != stats->size) retrieve_common_finish(-1); node->count = 1; retrieve_common_finish(0); };
void execute_dmidecode (FILE ** tmp_file) { char * tmp_file_name = NULL; tmp_file_name = create_tmp_file(tmp_file); // TODO: aprimorar essa execucao pra que o resultado contenha menos linhas execute_command ("/usr/sbin/dmidecode", tmp_file_name); }
void partitions_info (char *devFile, ldc_info_t * tail){ int numpart = 0; char * cmd = NULL; FILE * tmp_file = NULL; char * tmp_file_name = NULL; char r_buffer[101]; char *tokens = NULL; char name[100]; tmp_file_name = create_tmp_file(&tmp_file); if (tmp_file_name){ cmd = (char *)calloc(strlen(devFile) + 50, sizeof(char)); strcpy (cmd, "parted -m "); strcat (cmd, devFile); //strcat (cmd, "/dev/sda"); strcat (cmd, " print"); execute_command (cmd, tmp_file_name); while (!feof(tmp_file)) { if (fgets (r_buffer, 100, tmp_file)){ tokens = strtok(r_buffer, ":"); int num_token = 0; int end = 0; while (!end && tokens != NULL) { if (num_token == 0){ if (isdigit(tokens[0])){ numpart++; } else { end = 1; } } if (num_token == 3){ sprintf(name, "partition_%d_size", numpart); add_info_tuple(tail, name, tokens, numpart, "Partição e Tamanho"); } if (num_token == 4){ sprintf(name, "partition_%d_fileType", numpart); add_info_tuple(tail, name, tokens, numpart, "Partição e Sistema de arquivos"); end = 1; } tokens = strtok (NULL, ":"); num_token++; } } } free(cmd); } }
// Write out a stats file. void stats_write(const char *path, struct counters *counters) { char *tmp_file = format("%s.tmp", path); FILE *f = create_tmp_file(&tmp_file, "wb"); for (size_t i = 0; i < counters->size; i++) { if (fprintf(f, "%u\n", counters->data[i]) < 0) { fatal("Failed to write to %s", tmp_file); } } fclose(f); x_rename(tmp_file, path); free(tmp_file); }
//------------------------------------------------------------------------- // contract_db() // Wed Jan 30 10:44:01 EST 2002 //------------------------------------------------------------------------- // Contract the CTS or CRATE db files // // input: db type, // current number of entries in CTS data file // output: status //------------------------------------------------------------------------- int contract_db( int dbType, int numOfEntries ) { char *FileName; int FileIncr, newCount; int status = SUCCESS; // optimistic, aren't we ... :> char tmpfile[1024]; strcpy(tmpfile,get_file_name("mdscts_temp_file_XXXXXX")); if( MSGLVL(FUNCTION_NAME) ) printf( "contract_db()\n" ); // assimilate db specific information ... switch( dbType ) { case CTS_DB: FileName = CTS_DB_FILE; FileIncr = CTS_DB_INCREMENT; break; case CRATE_DB: FileName = CRATE_DB_FILE; FileIncr = CRATE_DB_INCREMENT; break; } // make db file smaller .... [2002.01.30] // // // calculate new db file size newCount = (((int)numOfEntries / FileIncr) + 1) * FileIncr; // create a TMP file if( (status = create_tmp_file( dbType, newCount, tmpfile )) != SUCCESS ) { if( MSGLVL(ALWAYS) ) fprintf( stderr, "error creating TMP file\n" ); goto ContractDB_Exit; } // only need to copy old data if there is any if( numOfEntries ) { // copy current db file to TMP file if( (status = copy( dbType, FileName, tmpfile, numOfEntries )) != SUCCESS ) { if( MSGLVL(ALWAYS) ) fprintf( stderr, "error copying db to TMP file\n" ); goto ContractDB_Exit; } // remove old db file if( Remove( FileName ) ) { // non-zero is an error if( MSGLVL(ALWAYS) ) { fprintf( stderr, "error removing old db file\n" ); perror("remove()"); } status = CONTRACT_ERROR; goto ContractDB_Exit; } } if( rename(tmpfile, get_file_name( FileName )) ) { // non-zero is an error if( MSGLVL(ALWAYS) ) { fprintf( stderr, "error renaming temp db file\n" ); perror("rename()"); } status = CONTRACT_ERROR; goto ContractDB_Exit; } chmod(get_file_name( FileName ), O666); // re-map file if( map_data_file( dbType ) != SUCCESS ) { if( MSGLVL(ALWAYS) ) fprintf( stderr, "unable to map contracted file\n" ); status = MAP_ERROR; goto ContractDB_Exit; } ContractDB_Exit: if( MSGLVL(DETAILS) ) { printf( "contract_db('%s'): ", get_file_name( FileName )); ShowStatus( status ); } return status; }
int thrashing_test(int test_runs) { int fds[4] = {-1, -1, -1, -1}; char tmpnames[4][17] = { "thrashing1XXXXXX", "thrashing2XXXXXX", "thrashing3XXXXXX", "thrashing4XXXXXX" }; volatile char *bufs[4] = {0}; unsigned i, j; long long k; int ret = -1; struct timeval begin_time, end_time, elapsed_time, total_time; unsigned long long filesize; long num_pages; long pagesize; timerclear(&total_time); num_pages = sysconf(_SC_PHYS_PAGES); pagesize = sysconf(_SC_PAGE_SIZE); if (num_pages < 0) { fprintf(stderr, "failed to get the number of pages\n"); return -1; } filesize = num_pages * pagesize / (ARRAY_SIZE(fds) - 1); for (i = 0; i < ARRAY_SIZE(fds); i++) { fds[i] = create_tmp_file(tmpnames[i], filesize); if (fds[i] < 0) { goto err_fd; } } for (i = 0; i < ARRAY_SIZE(fds); i++) { bufs[i] = mmap(NULL, filesize, PROT_READ, MAP_PRIVATE, fds[i], 0); if (bufs[i] == ((void *)-1)) { fprintf(stderr, "Failed to mmap file: %s\n", strerror(errno)); goto err; } } for (i = 0; i < test_runs; i++) { for (j = 0; j < ARRAY_SIZE(fds); j++) { gettimeofday(&begin_time, NULL); //Unfortunately when under memory pressure, fadvise and madvise stop working... //Read backwards to prevent mmap prefetching for (k = ((filesize - 1) & ~(pagesize - 1)); k >= 0; k -= pagesize) { bufs[j][k]; } gettimeofday(&end_time, NULL); timersub(&end_time, &begin_time, &elapsed_time); timeradd(&total_time, &elapsed_time, &total_time); } } printf("thrashing: %llu MB/s\n", (filesize * ARRAY_SIZE(fds) * test_runs * USEC_PER_SEC) / (1024 * 1024 * (total_time.tv_sec * USEC_PER_SEC + total_time.tv_usec))); ret = 0; err: for (i = 0; i < ARRAY_SIZE(bufs) && bufs[i] != NULL; i++) { munmap((void *)bufs[i], filesize); } err_fd: for (i = 0; i < ARRAY_SIZE(fds) && fds[i] >= 0; i++) { close(fds[i]); } return ret; }
static int child(arg_data *args, home_data *data, uid_t uid, gid_t gid) { int ret = 0; /* check the pid file */ ret = check_pid(args); if (args->vers != true && args->chck != true) { if (ret == 122) return ret; if (ret < 0) return ret; } #ifdef OS_LINUX /* setuid()/setgid() only apply the current thread so we must do it now */ if (linuxset_user_group(args->user, uid, gid) != 0) return 4; #endif /* Initialize the Java VM */ if (java_init(args, data) != true) { log_debug("java_init failed"); return 1; } else log_debug("java_init done"); /* Check wether we need to dump the VM version */ if (args->vers == true) { log_error("jsvc (Apache Commons Daemon) " JSVC_VERSION_STRING); log_error("Copyright (c) 1999-2011 Apache Software Foundation."); if (java_version() != true) { return -1; } else return 0; } /* Check wether we need to dump the VM version */ else if (args->vershow == true) { if (java_version() != true) { return 7; } } /* Do we have to do a "check-only" initialization? */ if (args->chck == true) { if (java_check(args) != true) return 2; printf("Service \"%s\" checked successfully\n", args->clas); return 0; } /* Load the service */ if (java_load(args) != true) { log_debug("java_load failed"); return 3; } else log_debug("java_load done"); /* Downgrade user */ #ifdef OS_LINUX if (args->user && set_caps(0) != 0) { log_debug("set_caps (0) failed"); return 4; } #else if (set_user_group(args->user, uid, gid) != 0) return 4; #endif /* Start the service */ umask(envmask); if (java_start() != true) { log_debug("java_start failed"); return 5; } else log_debug("java_start done"); /* Install signal handlers */ handler_hup = signal_set(SIGHUP, handler); handler_usr1 = signal_set(SIGUSR1, handler); handler_usr2 = signal_set(SIGUSR2, handler); handler_trm = signal_set(SIGTERM, handler); handler_int = signal_set(SIGINT, handler); controlled = getpid(); log_debug("Waiting for a signal to be delivered"); create_tmp_file(args); while (!stopping) { #if defined(OSD_POSIX) java_sleep(60); /* pause(); */ #else /* pause() is not threadsafe */ sleep(60); #endif if(doreopen) { doreopen = false; set_output(args->outfile, args->errfile, args->redirectstdin, args->procname); } if(dosignal) { dosignal = false; java_signal(); } } remove_tmp_file(args); log_debug("Shutdown or reload requested: exiting"); /* Stop the service */ if (java_stop() != true) return 6; if (doreload == true) ret = 123; else ret = 0; /* Destroy the service */ java_destroy(); /* Destroy the Java VM */ if (JVM_destroy(ret) != true) return 7; return ret; }
static int hotplug_add_device(void) { int ret; char serv[256]; char *busid = NULL; logmsg("bind-driver.c:: adding device" ); ret = read_config(); if(ret) { if(ret == 1) { logmsg("usbip turned off in config file"); return 0; }else { logerr("error reading config file"); return -1; } } ret = get_export_server(serv, 256);/*XXX*/ if(ret) { logerr("could not get export server"); return -1; } logmsg("export server is '%s'", serv); ret = export_device_q(); switch (ret) { case EXPORT: logmsg(" -- exporting device --" ); break; case NO_EXPORT: logmsg(" -- not exporting device --"); return 0; break; default: logwarn("export_device_q returned with error: %d",ret); return -1; } busid = get_busid_from_env(); if( busid == NULL ) { logerr("error retrieving busid"); return -1; } logmsg("busid: '%s'", busid); logmsg("exporting device '%s' to server '%s'", busid, serv); ret = export_to(serv, busid); if(ret) { logerr("exporting of device failed"); logerr("see syslog for details"); return -1; } logmsg("device exported"); ret = create_tmp_file(serv, busid); if(ret) { logwarn("failed to create /tmp file"); } return 0; }
int ctx_links(int argc, char **argv) { size_t limit = 0; const char *link_out_path = NULL, *csv_out_path = NULL, *plot_out_path = NULL; const char *thresh_path = NULL, *hist_path = NULL; size_t hist_distsize = 0, hist_covgsize = 0; size_t cutoff = 0; bool clean = false; // Arg parsing char cmd[100]; char shortopts[300]; cmd_long_opts_to_short(longopts, shortopts, sizeof(shortopts)); int c; while((c = getopt_long_only(argc, argv, shortopts, longopts, NULL)) != -1) { cmd_get_longopt_str(longopts, c, cmd, sizeof(cmd)); switch(c) { case 0: /* flag set */ break; case 'h': cmd_print_usage(NULL); break; case 'o': cmd_check(!link_out_path, cmd); link_out_path = optarg; break; case 'f': cmd_check(!futil_get_force(), cmd); futil_set_force(true); break; case 'l': cmd_check(!csv_out_path, cmd); csv_out_path = optarg; break; case 'c': cmd_check(!cutoff, cmd); cutoff = cmd_size(cmd, optarg); clean = true; break; case 'L': cmd_check(!limit, cmd); limit = cmd_size(cmd, optarg); break; case 'P': cmd_check(!plot_out_path, cmd); plot_out_path = optarg; break; case 'T': cmd_check(!thresh_path, cmd); thresh_path = optarg; break; case 'H': cmd_check(!hist_path, cmd); hist_path = optarg; break; case 'C': cmd_check(!hist_covgsize, cmd); hist_covgsize = cmd_size(cmd, optarg); break; case 'D': cmd_check(!hist_distsize, cmd); hist_distsize = cmd_size(cmd, optarg); break; case ':': /* BADARG */ case '?': /* BADCH getopt_long has already printed error */ // cmd_print_usage(NULL); die("`"CMD" links -h` for help. Bad option: %s", argv[optind-1]); default: ctx_assert2(0, "shouldn't reach here: %c", c); } } if(hist_distsize && !hist_path) cmd_print_usage("--max-dist without --covg-hist"); if(hist_covgsize && !hist_path) cmd_print_usage("--max-covg without --covg-hist"); // Defaults if(!hist_distsize) hist_distsize = DEFAULT_MAX_DIST; if(!hist_covgsize) hist_covgsize = DEFAULT_MAX_COVG; if(optind + 1 != argc) cmd_print_usage("Wrong number of arguments"); const char *ctp_path = argv[optind]; bool list = (csv_out_path != NULL); bool plot = (plot_out_path != NULL); bool save = (link_out_path != NULL); bool hist_covg = (thresh_path != NULL || hist_path != NULL); size_t plot_kmer_idx = (limit == 0 ? 0 : limit - 1); if(clean && !save) cmd_print_usage("Need to give --out <out.ctp.gz> with --clean"); if(!save && !list && !plot && !hist_covg) cmd_print_usage("Please specify one of --plot, --list or --clean"); if(link_out_path && hist_covg && strcmp(link_out_path,"-") == 0) cmd_print_usage("Outputing both cleaning threshold (-T) and links (-o) to STDOUT!"); // Open input file FILE *list_fh = NULL, *plot_fh = NULL, *link_tmp_fh = NULL; FILE *thresh_fh = NULL, *hist_fh = NULL; gzFile link_gz = NULL; // Check file don't exist or that we can overwrite // Will ignore if path is null bool err = false; err |= futil_check_outfile(csv_out_path); err |= futil_check_outfile(plot_out_path); err |= futil_check_outfile(link_out_path); err |= futil_check_outfile(thresh_path); err |= futil_check_outfile(hist_path); if(err) die("Use -f,--force to overwrite files"); StrBuf link_tmp_path; strbuf_alloc(&link_tmp_path, 1024); GPathReader ctpin; memset(&ctpin, 0, sizeof(ctpin)); gpath_reader_open(&ctpin, ctp_path); size_t ncols = file_filter_into_ncols(&ctpin.fltr); size_t kmer_size = gpath_reader_get_kmer_size(&ctpin); cJSON *newhdr = cJSON_Duplicate(ctpin.json, 1); if(ncols != 1) die("Can only clean a single colour at a time. Sorry."); uint64_t (*hists)[hist_covgsize] = NULL; if(hist_covg) { hists = ctx_calloc(hist_distsize, sizeof(hists[0])); } if(hist_path && (hist_fh = futil_fopen_create(hist_path, "w")) == NULL) die("Cannot open file: %s", hist_path); if(thresh_path && (thresh_fh = futil_fopen_create(thresh_path, "w")) == NULL) die("Cannot open file: %s", thresh_path); if(limit) status("Limiting to the first %zu kmers", limit); if(clean) { timestamp(); message(" Cleaning coverage below %zu", cutoff); message("\n"); } if(save) { // Check we can find the fields we need cJSON *links_json = json_hdr_get(newhdr, "paths", cJSON_Object, link_out_path); cJSON *nkmers_json = json_hdr_get(links_json, "num_kmers_with_paths", cJSON_Number, link_out_path); cJSON *nlinks_json = json_hdr_get(links_json, "num_paths", cJSON_Number, link_out_path); cJSON *nbytes_json = json_hdr_get(links_json, "path_bytes", cJSON_Number, link_out_path); if(!nkmers_json || !nlinks_json || !nbytes_json) die("Cannot find required header entries"); // Create a random temporary file link_tmp_fh = create_tmp_file(&link_tmp_path, link_out_path); status("Saving output to: %s", link_out_path); status("Temporary output: %s", link_tmp_path.b); // Open output file if((link_gz = futil_gzopen_create(link_out_path, "w")) == NULL) die("Cannot open output link file: %s", link_out_path); // Need to open output file first so we can get absolute path // Update the header to include this command json_hdr_add_curr_cmd(newhdr, link_out_path); } if(list) { status("Listing to %s", csv_out_path); if((list_fh = futil_fopen_create(csv_out_path, "w")) == NULL) die("Cannot open output CSV file %s", csv_out_path); // Print csv header fprintf(list_fh, "SeqLen,Covg\n"); } if(plot) { status("Plotting kmer %zu to %s", plot_kmer_idx, plot_out_path); if((plot_fh = futil_fopen_create(plot_out_path, "w")) == NULL) die("Cannot open output .dot file %s", plot_out_path); } SizeBuffer countbuf, jposbuf; size_buf_alloc(&countbuf, 16); size_buf_alloc(&jposbuf, 1024); StrBuf kmerbuf, juncsbuf, seqbuf, outbuf; strbuf_alloc(&kmerbuf, 1024); strbuf_alloc(&juncsbuf, 1024); strbuf_alloc(&seqbuf, 1024); strbuf_alloc(&outbuf, 1024); bool link_fw; size_t njuncs; size_t knum, nlinks, num_links_exp = 0; LinkTree ltree; ltree_alloc(<ree, kmer_size); LinkTreeStats tree_stats; memset(&tree_stats, 0, sizeof(tree_stats)); size_t init_num_links = 0, num_links = 0; for(knum = 0; !limit || knum < limit; knum++) { ltree_reset(<ree); if(!gpath_reader_read_kmer(&ctpin, &kmerbuf, &num_links_exp)) break; ctx_assert2(kmerbuf.end == kmer_size, "Kmer incorrect length %zu != %zu", kmerbuf.end, kmer_size); // status("kmer: %s", kmerbuf.b); for(nlinks = 0; gpath_reader_read_link(&ctpin, &link_fw, &njuncs, &countbuf, &juncsbuf, &seqbuf, &jposbuf); nlinks++) { ltree_add(<ree, link_fw, countbuf.b[0], jposbuf.b, juncsbuf.b, seqbuf.b); } if(nlinks != num_links_exp) warn("Links count mismatch %zu != %zu", nlinks, num_links_exp); if(hist_covg) { ltree_update_covg_hists(<ree, (uint64_t*)hists, hist_distsize, hist_covgsize); } if(clean) { ltree_clean(<ree, cutoff); } // Accumulate statistics ltree_get_stats(<ree, &tree_stats); num_links = tree_stats.num_links - init_num_links; init_num_links = tree_stats.num_links; if(list) { ltree_write_list(<ree, &outbuf); if(fwrite(outbuf.b, 1, outbuf.end, list_fh) != outbuf.end) die("Cannot write CSV file to: %s", csv_out_path); strbuf_reset(&outbuf); } if(save && num_links) { ltree_write_ctp(<ree, kmerbuf.b, num_links, &outbuf); if(fwrite(outbuf.b, 1, outbuf.end, link_tmp_fh) != outbuf.end) die("Cannot write ctp file to: %s", link_tmp_path.b); strbuf_reset(&outbuf); } if(plot && knum == plot_kmer_idx) { status("Plotting tree..."); ltree_write_dot(<ree, &outbuf); if(fwrite(outbuf.b, 1, outbuf.end, plot_fh) != outbuf.end) die("Cannot write plot DOT file to: %s", plot_out_path); strbuf_reset(&outbuf); } } gpath_reader_close(&ctpin); cJSON *links_json = json_hdr_get(newhdr, "paths", cJSON_Object, link_out_path); cJSON *nkmers_json = json_hdr_get(links_json, "num_kmers_with_paths", cJSON_Number, link_out_path); cJSON *nlinks_json = json_hdr_get(links_json, "num_paths", cJSON_Number, link_out_path); cJSON *nbytes_json = json_hdr_get(links_json, "path_bytes", cJSON_Number, link_out_path); status("Number of kmers with links %li -> %zu", nkmers_json->valueint, tree_stats.num_trees_with_links); status("Number of links %li -> %zu", nlinks_json->valueint, tree_stats.num_links); status("Number of bytes %li -> %zu", nbytes_json->valueint, tree_stats.num_link_bytes); if(save) { // Update JSON nkmers_json->valuedouble = nkmers_json->valueint = tree_stats.num_trees_with_links; nlinks_json->valuedouble = nlinks_json->valueint = tree_stats.num_links; nbytes_json->valuedouble = nbytes_json->valueint = tree_stats.num_link_bytes; char *json_str = cJSON_Print(newhdr); if(gzputs(link_gz, json_str) != (int)strlen(json_str)) die("Cannot write ctp file to: %s", link_out_path); free(json_str); gzputs(link_gz, "\n\n"); gzputs(link_gz, ctp_explanation_comment); gzputs(link_gz, "\n"); fseek(link_tmp_fh, 0, SEEK_SET); char *tmp = ctx_malloc(4*ONE_MEGABYTE); size_t s; while((s = fread(tmp, 1, 4*ONE_MEGABYTE, link_tmp_fh)) > 0) { if(gzwrite(link_gz, tmp, s) != (int)s) die("Cannot write to output: %s", link_out_path); } ctx_free(tmp); gzclose(link_gz); fclose(link_tmp_fh); } // Write histogram to file if(hist_fh) { size_t i, j; fprintf(hist_fh, " "); for(j = 1; j < hist_covgsize; j++) fprintf(hist_fh, ",covg.%02zu", j); fprintf(hist_fh, "\n"); for(i = 1; i < hist_distsize; i++) { fprintf(hist_fh, "dist.%02zu", i); for(j = 1; j < hist_covgsize; j++) { fprintf(hist_fh, ",%"PRIu64, hists[i][j]); } fprintf(hist_fh, "\n"); } } if(thresh_fh) { // Use median of first five cutoffs print_suggest_cutoff(6, hist_covgsize, hists, thresh_fh); } if(hist_fh && hist_fh != stdout) fclose(hist_fh); if(list) { fclose(list_fh); } if(plot) { fclose(plot_fh); } ctx_free(hists); cJSON_Delete(newhdr); strbuf_dealloc(&link_tmp_path); ltree_dealloc(<ree); size_buf_dealloc(&countbuf); size_buf_dealloc(&jposbuf); strbuf_dealloc(&kmerbuf); strbuf_dealloc(&juncsbuf); strbuf_dealloc(&seqbuf); strbuf_dealloc(&outbuf); return EXIT_SUCCESS; }
int pageinout_test(int test_runs, unsigned long long file_size) { int fd; char tmpname[] = "pageinoutXXXXXX"; unsigned char *vec; int i; long long j; volatile char *buf; int ret = -1; int rc; struct timeval begin_time, end_time, elapsed_time, total_time_in, total_time_out; long pagesize = sysconf(_SC_PAGE_SIZE); timerclear(&total_time_in); timerclear(&total_time_out); fd = create_tmp_file(tmpname, file_size); if (fd < 0) { return -1; } vec = alloc_mincore_vec(file_size); if (vec == NULL) { goto err_alloc; } buf = mmap(NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0); if (buf == ((void *)-1)) { fprintf(stderr, "Failed to mmap file: %s\n", strerror(errno)); goto err_mmap; } if (!check_caching((void *)buf, vec, file_size, false)) { goto err; } for (i = 0; i < test_runs; i++) { gettimeofday(&begin_time, NULL); //Read backwards to prevent mmap prefetching for (j = ((file_size - 1) & ~(pagesize - 1)); j >= 0; j -= pagesize) { buf[j]; } gettimeofday(&end_time, NULL); timersub(&end_time, &begin_time, &elapsed_time); timeradd(&total_time_in, &elapsed_time, &total_time_in); if (!check_caching((void *)buf, vec, file_size, true)) { goto err; } gettimeofday(&begin_time, NULL); rc = madvise((void *)buf, file_size, MADV_DONTNEED) || posix_fadvise(fd, 0, file_size, POSIX_FADV_DONTNEED); gettimeofday(&end_time, NULL); if (rc) { fprintf(stderr, "posix_fadvise/madvise DONTNEED failed\n"); goto err; } timersub(&end_time, &begin_time, &elapsed_time); timeradd(&total_time_out, &elapsed_time, &total_time_out); if (!check_caching((void *)buf, vec, file_size, false)) { goto err; } } printf("page-in: %llu MB/s\n", (file_size * test_runs * USEC_PER_SEC) / (1024 * 1024 * (total_time_in.tv_sec * USEC_PER_SEC + total_time_in.tv_usec))); printf("page-out (clean): %llu MB/s\n", (file_size * test_runs * USEC_PER_SEC) / (1024 * 1024 * (total_time_out.tv_sec * USEC_PER_SEC + total_time_out.tv_usec))); ret = 0; err: munmap((void *)buf, file_size); err_mmap: free(vec); err_alloc: close(fd); return ret; }