void UpdateRec(char *name, int *rec_b, int *rec_e, int nrec, int ithread) { int rec_b1[1000], rec_e1[1000], nrec1; FILE *frec; int irec, irec1; if( ! read_rec(1, name, 0, rec_b1, rec_e1, &nrec1) ) { reports[ithread].tail += sprintf(reports[ithread].tail, "*** Warning: cannot open record file %s ***", name); frec = fopen(name, "w"); for(irec=0; irec<nrec; irec++) fprintf(frec, "%d %d\n", rec_b[irec], rec_e[irec]); fclose(frec); } int recB, recE; char name2[100]; sprintf(name2, "%s2", name); frec = fopen(name2, "w"); for(irec=0; irec<nrec; irec++) for(irec1=0;irec1<nrec1;irec1++){ if(rec_b[irec]>=rec_e1[irec1]) continue; if(rec_e[irec]<=rec_b1[irec1]) break; recB = max(rec_b[irec],rec_b1[irec1]); recE = min(rec_e[irec],rec_e1[irec1]); fprintf(frec, "%d %d\n", recB, recE); } fclose(frec); }
static int restore_tdb(const char *fname) { TDB_CONTEXT *tdb; tdb = tdb_open(fname, 0, 0, O_RDWR|O_CREAT|O_EXCL, 0666); if (!tdb) { perror("tdb_open"); fprintf(stderr, "Failed to open %s\n", fname); return 1; } while (1) { int eof = 0; if (read_rec(stdin, tdb, &eof) == -1) { if (eof) { break; } return 1; } } if (tdb_close(tdb)) { fprintf(stderr, "Error closing tdb\n"); return 1; } return 0; }
bool serial_read_files(It start, It end, leveldb::DB* db) { auto tid = std::this_thread::get_id(); bool had_errors = false; int ctr = 0; for (auto it = start; it != end; ++it) { std::string filename; { std::stringstream cmd; cmd << "/tmp/tmp-cleaned-" << tid << "-" << std::distance(start, it) << ".tsv"; filename = cmd.str(); cmd.str(""); cmd << "python /n/fs/gcf/COS513-Finance/clean_single_csv.py "; cmd << *it << " " << filename; auto cmdstr = cmd.str(); std::cout << "tid " << tid << ": running " << cmdstr << std::endl; system(cmdstr.c_str()); } std::ifstream in(filename); std::cout << "tid " << tid << ": started " << filename << std::endl; iterate_lines([&had_errors, db, &ctr](std::string str) { had_errors |= read_rec(db, std::move(str), ctr); }, in); } return had_errors; }
void ansi_c_convert_typet::read(const typet &type) { clear(); source_location=type.source_location(); read_rec(type); if(!aligned && type.find(ID_C_alignment).is_not_nil()) { aligned=true; alignment=static_cast<const exprt &>(type.find(ID_C_alignment)); } }
void ansi_c_convert_typet::read_rec(const typet &type) { if(type.id() == "merged_type") { forall_subtypes(it, type) read_rec(*it); } else if(type.id() == "signed") signed_cnt++; else if(type.id() == "unsigned") unsigned_cnt++; else if(type.id() == "volatile") c_qualifiers.is_volatile = true; else if(type.id() == "const") c_qualifiers.is_constant = true; else if(type.id() == "restricted") c_qualifiers.is_restricted = true; else if(type.id() == "char") char_cnt++; else if(type.id() == "int") int_cnt++; else if(type.id() == "int8") int8_cnt++; else if(type.id() == "int16") int16_cnt++; else if(type.id() == "int32") int32_cnt++; else if(type.id() == "int64") int64_cnt++; else if(type.id() == "ptr32") ptr32_cnt++; else if(type.id() == "ptr64") ptr64_cnt++; else if(type.id() == "short") short_cnt++; else if(type.id() == "long") long_cnt++; else if(type.id() == "double") double_cnt++; else if(type.id() == "float") float_cnt++; else if(type.is_bool()) bool_cnt++; else if(type.id() == "static") c_storage_spec.is_static = true; else if(type.id() == "inline") c_storage_spec.is_inline = true; else if(type.id() == "extern") c_storage_spec.is_extern = true; else if(type.id() == "typedef") c_storage_spec.is_typedef = true; else if(type.id() == "register") c_storage_spec.is_register = true; else if(type.id() == "auto") { // ignore } else if(type == get_nil_irep()) { // ignore } else other.push_back(type); }
void ansi_c_convert_typet::read(const typet &type) { clear(); location = type.location(); read_rec(type); }
void ansi_c_convert_typet::read_rec(const typet &type) { if(type.id()==ID_merged_type) { forall_subtypes(it, type) read_rec(*it); } else if(type.id()==ID_signed) signed_cnt++; else if(type.id()==ID_unsigned) unsigned_cnt++; else if(type.id()==ID_ptr32) c_qualifiers.is_ptr32=true; else if(type.id()==ID_ptr64) c_qualifiers.is_ptr64=true; else if(type.id()==ID_volatile) c_qualifiers.is_volatile=true; else if(type.id()==ID_asm) { // These are called 'asm labels' by GCC. // ignore for now } else if(type.id()==ID_const) c_qualifiers.is_constant=true; else if(type.id()==ID_restrict) c_qualifiers.is_restricted=true; else if(type.id()==ID_atomic) c_qualifiers.is_atomic=true; else if(type.id()==ID_atomic_type_specifier) { // this gets turned into the qualifier, uh c_qualifiers.is_atomic=true; read_rec(type.subtype()); } else if(type.id()==ID_char) char_cnt++; else if(type.id()==ID_int) int_cnt++; else if(type.id()==ID_int8) int8_cnt++; else if(type.id()==ID_int16) int16_cnt++; else if(type.id()==ID_int32) int32_cnt++; else if(type.id()==ID_int64) int64_cnt++; else if(type.id()==ID_gcc_float128) gcc_float128_cnt++; else if(type.id()==ID_gcc_int128) gcc_int128_cnt++; else if(type.id()==ID_gcc_attribute_mode) { gcc_attribute_mode=type; } else if(type.id()==ID_gcc_attribute) { } else if(type.id()==ID_msc_based) { const exprt &as_expr=static_cast<const exprt &>(static_cast<const irept &>(type)); assert(as_expr.operands().size()==1); msc_based=as_expr.op0(); } else if(type.id()==ID_custom_bv) { bv_cnt++; const exprt &size_expr= static_cast<const exprt &>(type.find(ID_size)); bv_width=size_expr; } else if(type.id()==ID_custom_floatbv) { floatbv_cnt++; const exprt &size_expr= static_cast<const exprt &>(type.find(ID_size)); const exprt &fsize_expr= static_cast<const exprt &>(type.find(ID_f)); bv_width=size_expr; fraction_width=fsize_expr; } else if(type.id()==ID_custom_fixedbv) { fixedbv_cnt++; const exprt &size_expr= static_cast<const exprt &>(type.find(ID_size)); const exprt &fsize_expr= static_cast<const exprt &>(type.find(ID_f)); bv_width=size_expr; fraction_width=fsize_expr; } else if(type.id()==ID_short) short_cnt++; else if(type.id()==ID_long) long_cnt++; else if(type.id()==ID_double) double_cnt++; else if(type.id()==ID_float) float_cnt++; else if(type.id()==ID_c_bool) c_bool_cnt++; else if(type.id()==ID_proper_bool) proper_bool_cnt++; else if(type.id()==ID_complex) complex_cnt++; else if(type.id()==ID_static) c_storage_spec.is_static=true; else if(type.id()==ID_thread_local) c_storage_spec.is_thread_local=true; else if(type.id()==ID_inline) c_storage_spec.is_inline=true; else if(type.id()==ID_extern) c_storage_spec.is_extern=true; else if(type.id()==ID_typedef) c_storage_spec.is_typedef=true; else if(type.id()==ID_register) c_storage_spec.is_register=true; else if(type.id()==ID_auto) { // ignore } else if(type.id()==ID_packed) packed=true; else if(type.id()==ID_aligned) { aligned=true; // may come with size or not if(type.find(ID_size).is_nil()) alignment=exprt(ID_default); else alignment=static_cast<const exprt &>(type.find(ID_size)); } else if(type.id()==ID_transparent_union) { c_qualifiers.is_transparent_union=true; } else if(type.id()==ID_vector) vector_size=to_vector_type(type).size(); else if(type.id()==ID_void) { // we store 'void' as 'empty' typet tmp=type; tmp.id(ID_empty); other.push_back(tmp); } else if(type.id()==ID_msc_declspec) { const exprt &as_expr= static_cast<const exprt &>(static_cast<const irept &>(type)); forall_operands(it, as_expr) { // these are symbols const irep_idt &id=it->get(ID_identifier); if(id=="thread") c_storage_spec.is_thread_local=true; else if(id=="align") { assert(it->operands().size()==1); aligned=true; alignment=it->op0(); } } } else
/** * ccache_read(path): * Read the chunkification cache (if present) from the directory ${path}; * return a Patricia tree mapping absolute paths to cache entries. */ CCACHE * ccache_read(const char * path) { struct ccache_internal * C; struct ccache_read_internal R; struct ccache_record * ccr; #ifdef HAVE_MMAP struct stat sb; off_t fpos; long int pagesize; #endif size_t i; uint8_t N[4]; /* The caller must pass a file name to be read. */ assert(path != NULL); /* Allocate memory for the cache. */ if ((C = malloc(sizeof(struct ccache_internal))) == NULL) goto err0; memset(C, 0, sizeof(struct ccache_internal)); /* Create a Patricia tree to store cache entries. */ if ((C->tree = patricia_init()) == NULL) goto err1; /* Construct the name of cache file. */ if (asprintf(&R.s, "%s/cache", path) == -1) { warnp("asprintf"); goto err2; } /* Open the cache file. */ if ((R.f = fopen(R.s, "r")) == NULL) { /* ENOENT isn't an error. */ if (errno != ENOENT) { warnp("fopen(%s)", R.s); goto err3; } /* No cache exists on disk; return an empty cache. */ goto emptycache; } /** * We read the cache file in three steps: * 1. Read a little-endian uint32_t which indicates the number of * records in the cache file. * 2. Read N (record, path suffix) pairs and insert them into a * Patricia tree. * 3. Iterate through the tree and read chunk headers and compressed * entry trailers. */ /* Read the number of cache entries. */ if (fread(N, 4, 1, R.f) != 1) { if (ferror(R.f)) warnp("Error reading cache: %s", R.s); else warn0("Error reading cache: %s", R.s); goto err4; } R.N = le32dec(N); /* Read N (record, path suffix) pairs. */ R.sbuf = NULL; R.sbuflen = R.slen = R.datalen = 0; for (i = 0; i < R.N; i++) { if ((ccr = read_rec(&R)) == NULL) goto err5; if (patricia_insert(C->tree, R.sbuf, R.slen, ccr)) goto err5; C->chunksusage += ccr->nch * sizeof(struct chunkheader); C->trailerusage += ccr->tzlen; } #ifdef HAVE_MMAP /* Obtain page size, since mmapped regions must be page-aligned. */ if ((pagesize = sysconf(_SC_PAGESIZE)) == -1) { warnp("sysconf(_SC_PAGESIZE)"); goto err5; } /* Map the remainder of the cache into memory. */ fpos = ftello(R.f); if (fpos == -1) { warnp("ftello(%s)", R.s); goto err5; } if (fstat(fileno(R.f), &sb)) { warnp("fstat(%s)", R.s); goto err5; } if (sb.st_size != (off_t)(fpos + R.datalen)) { warn0("Cache has incorrect size (%jd, expected %jd)\n", (intmax_t)(sb.st_size), (intmax_t)(fpos + R.datalen)); goto err5; } C->datalen = R.datalen + (fpos % pagesize); if ((C->data = mmap(NULL, C->datalen, PROT_READ, #ifdef MAP_NOCORE MAP_PRIVATE | MAP_NOCORE, #else MAP_PRIVATE, #endif fileno(R.f), fpos - (fpos % pagesize))) == MAP_FAILED) { warnp("mmap(%s)", R.s); goto err5; } R.data = (uint8_t *)C->data + (fpos % pagesize); #else /* Allocate space. */ C->datalen = R.datalen; if (((C->data = malloc(C->datalen)) == NULL) && (C->datalen > 0)) goto err5; if (fread(C->data, C->datalen, 1, R.f) != 1) { warnp("fread(%s)", R.s); goto err6; } R.data = (uint8_t *)C->data; #endif /* Iterate through the tree reading chunk headers and trailers. */ if (patricia_foreach(C->tree, callback_read_data, &R)) { warnp("Error reading cache: %s", R.s); goto err6; } /* Free buffer used for storing paths. */ free(R.sbuf); /* Close the cache file. */ fclose(R.f); /* Free string allocated by asprintf. */ free(R.s); /* Success! */ return (C); emptycache: /* Nothing went wrong, but there's nothing on disk. */ free(R.s); return (C); err6: #ifdef HAVE_MMAP if (C->datalen > 0) munmap(C->data, C->datalen); #else free(C->data); #endif err5: free(R.sbuf); patricia_foreach(C->tree, callback_free, NULL); err4: fclose(R.f); err3: free(R.s); err2: patricia_free(C->tree); err1: free(C); err0: /* Failure! */ return (NULL); }
void ansi_c_convert_typet::read_rec(const typet &type) { if(type.id()==ID_merged_type) { forall_subtypes(it, type) read_rec(*it); } else if(type.id()==ID_signed) signed_cnt++; else if(type.id()==ID_unsigned) unsigned_cnt++; else if(type.id()==ID_ptr32) c_qualifiers.is_ptr32=true; else if(type.id()==ID_ptr64) c_qualifiers.is_ptr64=true; else if(type.id()==ID_volatile) c_qualifiers.is_volatile=true; else if(type.id()==ID_asm) { // ignore for now } else if(type.id()==ID_const) c_qualifiers.is_constant=true; else if(type.id()==ID_restricted) c_qualifiers.is_restricted=true; else if(type.id()==ID_char) char_cnt++; else if(type.id()==ID_int) int_cnt++; else if(type.id()==ID_int8) int8_cnt++; else if(type.id()==ID_int16) int16_cnt++; else if(type.id()==ID_int32) int32_cnt++; else if(type.id()==ID_int64) int64_cnt++; else if(type.id()==ID_gcc_float128) gcc_float128_cnt++; else if(type.id()==ID_gcc_int128) gcc_int128_cnt++; else if(type.id()==ID_gcc_attribute_mode) { const exprt &size_expr= static_cast<const exprt &>(type.find(ID_size)); if(size_expr.id()=="__QI__") gcc_mode_QI=true; else if(size_expr.id()=="__HI__") gcc_mode_HI=true; else if(size_expr.id()=="__SI__") gcc_mode_SI=true; else if(size_expr.id()=="__DI__") gcc_mode_DI=true; else { // we ignore without whining } } else if(type.id()==ID_bv) { bv_cnt++; const exprt &size_expr= static_cast<const exprt &>(type.find(ID_size)); mp_integer size_int; if(to_integer(size_expr, size_int)) { err_location(location); error("bit vector width has to be constant"); throw 0; } if(size_int<1 || size_int>1024) { err_location(location); error("bit vector width invalid"); throw 0; } bv_width=integer2long(size_int); } else if(type.id()==ID_short) short_cnt++; else if(type.id()==ID_long) long_cnt++; else if(type.id()==ID_double) double_cnt++; else if(type.id()==ID_float) float_cnt++; else if(type.id()==ID_bool) c_bool_cnt++; else if(type.id()==ID_proper_bool) proper_bool_cnt++; else if(type.id()==ID_complex) complex_cnt++; else if(type.id()==ID_static) c_storage_spec.is_static=true; else if(type.id()==ID_thread_local) c_storage_spec.is_thread_local=true; else if(type.id()==ID_inline) c_storage_spec.is_inline=true; else if(type.id()==ID_extern) c_storage_spec.is_extern=true; else if(type.id()==ID_typedef) c_storage_spec.is_typedef=true; else if(type.id()==ID_register) c_storage_spec.is_register=true; else if(type.id()==ID_auto) { // ignore } else if(type.id()==ID_packed) packed=true; else if(type.id()==ID_aligned) { aligned=true; // may come with size or not if(type.find(ID_size).is_nil()) alignment=exprt(ID_default); else alignment=static_cast<const exprt &>(type.find(ID_size)); } else if(type.id()==ID_transparent_union) { c_qualifiers.is_transparent_union=true; } else if(type.id()==ID_vector) vector_size=to_vector_type(type).size(); else if(type.id()==ID_void) { // we store 'void' as 'empty' typet tmp=type; tmp.id(ID_empty); other.push_back(tmp); } else other.push_back(type); }