int test_gc() { build_hash(); gc_sweep(gc); build_hash(); gc_sweep(gc); return 0; }
/* Test to see if we can retrieve data written to the hash */ int test_read() { build_hash(); for(int i=0; i <10; i++) { /* an ineffcient means of doing this */ gc_alloc(gc, 0, 40, (void **)&key1); gc_alloc(gc, 0, 40, (void **)&expected); snprintf(key1, 40, "k%i", i); snprintf(expected, 40, "v%i", i); /* look up a key, we've previously set */ if(hash_get(hash, (void*)key1, (void **)&value)) { /* is the returned value what we expected? */ if(strcmp(expected, value) !=0) { printf("'%s': '%s' != '%s'\n",key1, expected, value); return 1; } } else { /* test case failed */ return 1; } } /* everything passed */ return 0; }
int main(){ printf("******* INTIATING COMPILER ***********\n"); //printf("Choose Relevant stage of compiler:\n"); //printf("1. Lexical Analysis\n"); //printf("2. Parsing\n"); //printf("3. Semantic Analysis\n"); //printf("4. Code Generation\n"); int choice,i; char c; if(1){ hashTable* T = build_hash(); // build the hash table to maintain state of tokens printStoreHash(T); // store hashTable in a file Dnode* DFA = buildDFA(); memset(LexicalUnits,'\0',sizeof(LexicalUnits)); lexerCode(DFA,T,LexicalUnits,tokenValue,line); printf("\nLexer tokens have been generated in Lexer Tokens.txt\n"); pnode* root=NULL; root=parse(LexicalUnits,tokenValue,line); printf("\nParse tree generated in parseTree.txt\n"); pnode* ast=getAST(root); printf("\nAbstarct Syntax Tree Generated in AbstractSyntaxTree.txt\n"); Master *sym = (Master*)malloc(sizeof (Master)); getSymbolTable(sym,ast); printf("\nSymbol Table Generated in symbolTable.txt\n"); GenerateCode(sym,ast); printf("\nCode Generation Successfull\n\n"); } return 0; }
/* Test for a bad read */ int test_bad_read() { build_hash(); /* make sure that unset values actually fail */ if(hash_get(hash, (void*)"bad", (void**)&value)) { return 1; } return 0; }
inline void insert( const uint8_t key, T* val ) { // if it is already present it will not be added if(lookup( key )) return; if(n == 1){ // adds in a faster way murmur_hash_type h1 = murmur_hash::murmurHash3( &key, 1, seed1 ) % n; hash_table[h1].key = key; hash_table[h1].value = val; hash_table[h1].sum = h1; return; } //cout << "N: " << n << endl; // insert the key in the first free cell for(uint16_t i = 0; i < n; i++){ if(hash_table[i].value == nullptr){ hash_table[i].key = key; hash_table[i].value = val; break; } } uint16_t m = (uint16_t) (C * (n + 1)); // creates the list of graph nodes node_t* nodes = new node_t[m]; while(true){ //cout << "CALCOLO I DEGREE.." << endl; compute_degree( nodes, m ); //cout << "DEGREE CALCOLATI" << endl; //cout << "PEELING GRAPH..." << endl; if(peel_graph( nodes, m )) break; } //cout << "GRAPH PEELED" << endl; build_hash( nodes, m ); // update the size n = m; delete[] nodes; //for(uint64_t i = 0; i < n; i++) //cout << "HASH[" << i << "]: " << hash_table[i].sum << endl; }
B2GlpkHasher::B2GlpkHasher(const B2TraceCoeffs &trace_coeffs, const B2StrSet &str_set) : B2HasherBase(str_set) { _lp = glp_create_prob(); glp_set_prob_name(_lp, "Bouma2-GLPK"); glp_set_obj_dir(_lp, GLP_MIN); add_trace_vars(trace_coeffs); add_str_constraints(); #ifdef B2_GLPK_DEBUG glp_write_prob(_lp, 0, b2_preproc_config(B2_CFG_GLPK_DEBUG_FILE).c_str()); #endif //B2_GLPK_DEBUG build_hash(); _motif_set.remove_duplicates(trace_coeffs); };
int main(int argc, char **argv) { char **table; char *names; unsigned long size; int hash_size; printf("get_names\n"); get_names(argv[1], &names, &size); printf("build_hash\n"); build_hash(names, size, &table, &hash_size); report_collisions(table, hash_size); printf("validate_names\n"); validate_names(names, size, table, hash_size); free(table); close(fd); }
string longestPalindrome(string s) { gs = ""; for(int i = 0; i < s.length(); i++) { gs += (i>0 ? "#" : ""); gs += s[i]; } build_hash(); int ansp = -1, ansl = -1; for(int i = 0; i < gs.length(); i++) { int tl = bcheck(i); if(tl > ansl || tl == ansl && i%2!=0) { ansp = i, ansl = tl; } } string ans = ""; for(int i = ansp-ansl; i <= ansp+ansl; i++) { if(i%2 == 0) ans += gs[i]; } return ans; }
/* Test that we can erase a key from the table */ int test_erase() { build_hash(); /* The key should no longer be found */ if(!hash_get(hash, (void*)"k1", (void**)&value)) { printf("Key k1 missing from initial hash.\n"); return 1; } /* make sure that unset values actually fail */ hash_erase(hash, (void*)"k1"); value = ""; /* The key should no longer be found */ if(hash_get(hash, (void*)"k1", (void**)&value)) { printf("Found: '%s'\n", value); return 1; } return 0; }
// entry function to pack data // returns zero if any error ULONG pack(void) { ULONG (*get_lz_price)(OFFSET position, struct lzcode * lzcode) = NULL; // generates correct bitlen (price) of code ULONG (*emit)(struct optchain * optch, ULONG actual_len) = NULL; // emits lzcode to the output bit/byte stream ULONG success=1; ULONG actual_len; // actual length of packing (to account for ZX headers containing last unpacked bytes) UBYTE * hash; struct optchain * optch=NULL; static struct lzcode codes[MAX_CODES_SIZE]; // generate codes here; static to ensure it's not on the stack UBYTE curr_byte, last_byte; UWORD index; OFFSET position; // some preparations // if( wrk.packtype==PK_MLZ ) { get_lz_price = &get_lz_price_megalz; emit = &emit_megalz; } else if( wrk.packtype==PK_HRM ) { get_lz_price = &get_lz_price_hrum; emit = &emit_hrum; } else if( wrk.packtype==PK_HST ) { get_lz_price = &get_lz_price_hrust; emit = &emit_hrust; } else { printf("mhmt-pack.c:pack() - format unsupported!\n"); return 0; } actual_len = wrk.inlen; if( wrk.zxheader ) { if( wrk.packtype==PK_HRM ) { actual_len -= 5; } else if( wrk.packtype==PK_HST ) { actual_len -= 6; } else { printf("mhmt-pack.c:pack() - there must be no zxheader for anything except hrust or hrum!\n"); return 0; } } // initializations and preparations init_tb(); hash = build_hash(wrk.indata, actual_len, wrk.prelen); if( !hash ) { printf("mhmt-pack.c:pack() - build_hash() failed!\n"); success = 0; } if( success ) { optch = make_optch(actual_len); if( !optch ) { printf("mhmt-pack.c:pack() - can't make optchain array!\n"); success = 0; } } // go packing! if( success ) { // fill TBs with prebinary date if( wrk.prebin ) { curr_byte=wrk.indata[0LL-wrk.prelen]; // for(position=(1LL-wrk.prelen);position<=0;position++) { last_byte = curr_byte; curr_byte = wrk.indata[position]; index = (last_byte<<8) + curr_byte; if( !add_tb(index,position) ) { printf("mhmt-pack.c:pack() - add_tb() failed!\n"); success = 0; goto ERROR; } } } if( !wrk.greedy ) // default optimal coding { // go generating lzcodes byte-by-byte // curr_byte = wrk.indata[0]; // for(position=1;position<actual_len;position++) { last_byte = curr_byte; curr_byte = wrk.indata[position]; // add current two-byter to the chains index = (last_byte<<8) + curr_byte; if( !add_tb(index,position) ) { printf("mhmt-pack.c:pack() - add_tb() failed!\n"); success = 0; goto ERROR; } // search lzcodes for given position make_lz_codes(position, actual_len, hash, codes); // update optimal chain with lzcodes update_optch(position, codes, get_lz_price, optch); } // all input bytes scanned, chain built, so now reverse it (prepare for scanning in output generation part) reverse_optch(optch, actual_len); } else // greedy coding { printf("mhmt-pack.c:pack() - greedy coding not supported!\n"); success = 0; } // data built, now emit packed file success = success && (*emit)(optch, actual_len); } ERROR: free_optch(optch); destroy_hash(hash, wrk.prelen); return success; }