static void forstat (LexState *ls, int line) { /* forstat -> FOR (fornum | forlist) END */ FuncState *fs = ls->fs; TString *varname; BlockCnt bl; enterblock(fs, &bl, 1); /* scope for loop and control variables */ luaX_next(ls); /* skip `for' */ varname = str_checkname(ls); /* first variable name */ switch (ls->t.token) { case '=': fornum(ls, varname, line); break; case ',': case TK_IN: forlist(ls, varname); break; default: luaX_syntaxerror(ls, LUA_QL("=") " or " LUA_QL("in") " expected"); } check_match(ls, TK_END, TK_FOR, line); leaveblock(fs); /* loop scope (`break' jumps to this point) */ }
static void repeatstat (LexState *ls, int line) { /* repeatstat -> REPEAT block UNTIL cond */ int condexit; FuncState *fs = ls->fs; int repeat_init = luaK_getlabel(fs); BlockCnt bl1, bl2; enterblock(fs, &bl1, 1); /* loop block */ enterblock(fs, &bl2, 0); /* scope block */ luaX_next(ls); /* skip REPEAT */ chunk(ls); check_match(ls, TK_UNTIL, TK_REPEAT, line); condexit = cond(ls); /* read condition (inside scope block) */ if (!bl2.upval) { /* no upvalues? */ leaveblock(fs); /* finish scope */ luaK_patchlist(ls->fs, condexit, repeat_init); /* close the loop */ } else { /* complete semantics when there are upvalues */ breakstat(ls); /* if condition then break */ luaK_patchtohere(ls->fs, condexit); /* else... */ leaveblock(fs); /* finish scope... */ luaK_patchlist(ls->fs, luaK_jump(fs), repeat_init); /* and repeat */ } leaveblock(fs); /* finish loop */ }
static void constructor (LexState *ls, expdesc *t) { /* constructor -> ?? */ FuncState *fs = ls->fs; int line = ls->linenumber; int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0); struct ConsControl cc; cc.na = cc.nh = cc.tostore = 0; cc.t = t; init_exp(t, VRELOCABLE, pc); init_exp(&cc.v, VVOID, 0); /* no value (yet) */ luaK_exp2nextreg(ls->fs, t); /* fix it at stack top (for gc) */ checknext(ls, '{'); #if LUA_OPTIONAL_COMMA for (;;) { #else do { #endif /* LUA_OPTIONAL_COMMA */ lua_assert(cc.v.k == VVOID || cc.tostore > 0); if (ls->t.token == '}') break; closelistfield(fs, &cc); switch(ls->t.token) { case TK_NAME: { /* may be listfields or recfields */ luaX_lookahead(ls); if (ls->lookahead.token != '=') /* expression? */ listfield(ls, &cc); else recfield(ls, &cc); break; } case '[': { /* constructor_item -> recfield */ recfield(ls, &cc); break; } default: { /* constructor_part -> listfield */ listfield(ls, &cc); break; } } #if LUA_OPTIONAL_COMMA if (ls->t.token == ',' || ls->t.token == ';') next(ls); else if (ls->t.token == '}') break; } #else } while (testnext(ls, ',') || testnext(ls, ';')); #endif /* LUA_OPTIONAL_COMMA */ check_match(ls, '}', '{', line); lastlistfield(fs, &cc); SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */ SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */ } /* }====================================================================== */ static void parlist (LexState *ls) { /* parlist -> [ param { `,' param } ] */ FuncState *fs = ls->fs; Proto *f = fs->f; int nparams = 0; f->is_vararg = 0; if (ls->t.token != ')') { /* is `parlist' not empty? */ do { switch (ls->t.token) { case TK_NAME: { /* param -> NAME */ new_localvar(ls, str_checkname(ls), nparams++); break; } case TK_DOTS: { /* param -> `...' */ luaX_next(ls); #if defined(LUA_COMPAT_VARARG) /* use `arg' as default name */ new_localvarliteral(ls, "arg", nparams++); f->is_vararg = VARARG_HASARG | VARARG_NEEDSARG; #endif f->is_vararg |= VARARG_ISVARARG; break; } default: luaX_syntaxerror(ls, "<name> or " LUA_QL("...") " expected"); } } while (!f->is_vararg && testnext(ls, ',')); } adjustlocalvars(ls, nparams); f->numparams = cast_byte(fs->nactvar - (f->is_vararg & VARARG_HASARG)); luaK_reserveregs(fs, fs->nactvar); /* reserve register for parameters */ }
int tar_remove(const int fd, struct tar_t ** archive, const size_t filecount, const char * files[], const char verbosity){ if (fd < 0){ return -1; } // archive has to exist if (!archive || !*archive){ return -1; } if (filecount && !files){ return -1; } if (!filecount){ return 0; } // get file permissions struct stat st; if (fstat(fd, &st)){ RC_ERROR(stderr, "Error: Unable to stat archive: %s\n", strerror(rc)); } // reset offset of original file if (lseek(fd, 0, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Unable to seek file: %s\n", strerror(rc)); } // find first file to be removed that does not exist int ret = 0; char * bad = calloc(filecount, sizeof(char)); for(int i = 0; i < filecount; i++){ if (!exists(*archive, files[i], 0)){ V_PRINT(stderr, "Error: %s not found in archive\n", files[i]); bad[i] = 1; ret = -1; } } unsigned int read_offset = 0; unsigned int write_offset = 0; struct tar_t * prev = NULL; struct tar_t * curr = *archive; while(curr){ // get original size int total = 512; if ((curr -> type == REGULAR) || (curr -> type == NORMAL) || (curr -> type == CONTIGUOUS)){ total += oct2uint(curr -> size, 11); if (total % 512){ total += 512 - (total % 512); } } const int match = check_match(curr, filecount, bad, files); if (match < 0){ V_PRINT(stderr, "Error: Match failed\n"); return -1; } else if (!match){ // if the old data is not in the right place, move it if (write_offset < read_offset){ int got = 0; while (got < total){ // go to old data if (lseek(fd, read_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } char buf[512]; // copy chunk out if (read_size(fd, buf, 512) != 512){// guarenteed 512 octets V_PRINT(stderr, "Error: Read error\n"); return -1; } // go to new position if (lseek(fd, write_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } // write data in if (write_size(fd, buf, 512) != 512){ V_PRINT(stderr, "Error: Write error\n"); return -1; } // increment offsets got += 512; read_offset += 512; write_offset += 512; } } else{ read_offset += total; write_offset += total; // skip past data if (lseek(fd, read_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } } prev = curr; curr = curr -> next; } else{// if name matches, skip the data struct tar_t * tmp = curr; if (!prev){ *archive = curr -> next; if (*archive){ (*archive) -> begin = 0; } } else{ prev -> next = curr -> next; if (prev -> next){ prev -> next -> begin = curr -> begin; } } curr = curr -> next; free(tmp); // next read starts after current entry read_offset += total; } } // resize file if (ftruncate(fd, write_offset) < 0){ RC_ERROR(stderr, "Error: Could not truncate file: %s\n", strerror(rc)); } // add end data if (write_end_data(fd, write_offset, verbosity) < 0){ V_PRINT(stderr, "Error: Could not close file\n"); } return ret; }
bool CUnconcat:: exec() { FILE *fp; if (filename_ == "--") fp = stdin; else { fp = fopen(filename_.c_str(), "rb"); if (! fp) { std::cerr << "Can't Open Input File " << filename() << "\n"; return false; } } // read concat id line and id char buffer[256]; int no = fread(buffer, 1, 10, fp); if (no != 10 || strncmp(buffer, "CONCAT_ID=", 10) != 0) { std::cerr << "Invalid Concat File " << filename() << "\n"; exit(1); } if (! readId(fp)) exit(1); //--- // read concat id from filename line uint len = id_.size(); no = fread(buffer, 1, len, fp); if (no != len || strncmp(buffer, id_.c_str(), len) != 0) { std::cerr << "Invalid Concat File " << filename() << "\n"; exit(1); } //--- int numFiles = 0; while (true) { // read filename std::string output_file; int c = fgetc(fp); while (c != '\n' && c != EOF) { output_file += char(c); c = fgetc(fp); } if (c == EOF) { std::cerr << "Invalid Concat File " << filename() << "\n"; exit(1); } //--- bytesWritten_ = 0; FILE *fp1 = 0; // output filename if (isTabulate()) { std::cout << output_file; } // increment file count else if (isCount()) { ++numFiles; } else { bool output = true; if (fileNum() > 0) { ++numFiles; output = (fileNum() == numFiles); } //--- // open output file if needed if (output) { if (filename() == output_file) { std::cerr << "Input and Output File are the same\n"; exit(1); } fp1 = fopen(output_file.c_str(), "w"); if (! fp1) { std::cerr << "Can't Open Output File " << output_file << "\n"; exit(1); } } } // read to end of file (new id/filename line) while ((c = fgetc(fp)) != EOF) if (check_match(fp1, c)) break; check_match(fp1, EOF); // close file if (fp1) fclose(fp1); //--- // output number of bytes in file for tabulate if (isTabulate()) std::cout << " " << bytesWritten_ << " bytes\n"; //--- if (c == EOF) break; } // close input file if (fp != stdin) fclose(fp); //--- // output file count if (isCount()) std::cout << numFiles << "\n"; return true; }
static int run_a_test(size_t test_idx, int live, const char *svc, const char *hdr, int check_for_match, int exp_err, const char *mock_body_in, int repop, int reset) { const SSERV_Info *info = NULL; SConnNetInfo *net_info; SERV_ITER iter; const char *mock_body = NULL; char *mock_body_adj = NULL; int n_matches_perfect = 0, n_matches_near = 0; int success = 0, errors = 0; int retval = -1; s_n_hits_got = 0; /* Adjust mock data for current time, if necessary. */ adjust_mock_times(mock_body_in, &mock_body_adj); mock_body = mock_body_adj ? mock_body_adj : mock_body_in; /* Select the HTTP data source (live or mock). */ s_results[test_idx].live = live; if ( ! s_results[test_idx].live && ( ! mock_body || ! *mock_body)) { CORE_TRACE("Mock HTTP data source unavailable."); s_results[test_idx].live = 1; } if (s_results[test_idx].live) { CORE_TRACE("Using a live HTTP data source."); SERV_NAMERD_SetConnectorSource(NULL); /* use live HTTP */ } else { CORE_TRACE("Using a mock HTTP data source."); if ( ! SERV_NAMERD_SetConnectorSource(mock_body)) { CORE_LOG(eLOG_Error, "Unable to create mock HTTP data source."); retval = 1; goto out; } } /* Set up the server iterator. */ net_info = ConnNetInfo_Create(svc); if (*hdr) ConnNetInfo_SetUserHeader(net_info, hdr); iter = SERV_OpenP(svc, fSERV_All | (strpbrk(svc, "?*") ? fSERV_Promiscuous : 0), SERV_LOCALHOST, 0/*port*/, 0.0/*preference*/, net_info, 0/*skip*/, 0/*n_skip*/, 0/*external*/, 0/*arg*/, 0/*val*/); ConnNetInfo_Destroy(net_info); /* Fetch the server hits from namerd. */ if (iter) { for (; s_n_hits_got < MAX_HITS && (info = SERV_GetNextInfo(iter)); ++s_n_hits_got) { if (info->type & fSERV_Http) { CORE_LOGF(eLOG_Note, (" HTTP extra (path): %s", SERV_HTTP_PATH(&info->u.http))); } strcpy(s_hits_got[s_n_hits_got].type, SERV_TypeStr(info->type)); strcpy(s_hits_got[s_n_hits_got].xtra, (info->type & fSERV_Http) ? SERV_HTTP_PATH(&info->u.http) : ""); strcpy(s_hits_got[s_n_hits_got].loc , (info->site & fSERV_Local ) ? "yes" : "no"); strcpy(s_hits_got[s_n_hits_got].priv, (info->site & fSERV_Private ) ? "yes" : "no"); strcpy(s_hits_got[s_n_hits_got].stfl, (info->mode & fSERV_Stateful) ? "yes" : "no"); SOCK_ntoa(info->host, s_hits_got[s_n_hits_got].host, LEN_HOST); s_hits_got[s_n_hits_got].port = info->port; s_hits_got[s_n_hits_got].match = 0; char *info_str; info_str = SERV_WriteInfo(info); CORE_LOGF(eLOG_Note, (" Found server %d: %s", s_n_hits_got, info_str ? info_str : "?")); if (info_str) free(info_str); } /* Make sure endpoint data can be repopulated and reset. */ if (repop && s_n_hits_got) { /* repopulate */ CORE_LOG(eLOG_Trace, "Repopulating the service mapper."); if ( ! info && ! SERV_GetNextInfo(iter)) { CORE_LOG(eLOG_Error, "Unable to repopulate endpoint data."); errors = 1; } } if (reset && s_n_hits_got) { /* reset */ CORE_LOG(eLOG_Trace, "Resetting the service mapper."); SERV_Reset(iter); if ( ! SERV_GetNextInfo(iter)) { CORE_LOG(eLOG_Error, "No services found after reset."); errors = 1; } } SERV_Close(iter); } else { errors = 1; } /* Search for matches unless this is a standalone run. */ if (check_for_match) { /* Search for perfect matches first (order is unknown). */ int it_exp, it_got; for (it_got=0; it_got < s_n_hits_got; ++it_got) { for (it_exp=0; it_exp < s_n_hits_exp; ++it_exp) { if (s_hits_exp[it_exp].match) continue; /*if (check_match(fMatch_Default, it_exp, it_got)) {*/ if (check_match(fMatch_All, it_exp, it_got)) { CORE_LOGF(eLOG_Note, ( " Found server %d perfectly matched expected server " "%d.", it_got, it_exp)); s_hits_exp[it_exp].match = 1; s_hits_got[it_got].match = 1; ++n_matches_perfect; break; } } } /* If not all found, search again but exclude host:port from match. */ for (it_got=0; it_got < s_n_hits_got; ++it_got) { if (s_hits_got[it_got].match) continue; for (it_exp=0; it_exp < s_n_hits_exp; ++it_exp) { if (s_hits_exp[it_exp].match) continue; if (check_match(fMatch_NoHostPort, it_exp, it_got)) { CORE_LOGF(eLOG_Note, ( " Found server %d nearly matched expected server %d.", it_got, it_exp)); s_hits_exp[it_exp].match = 1; s_hits_got[it_got].match = 1; ++n_matches_near; log_match_diffs(it_exp, it_got); break; } } } /* List any non-matching servers. */ for (it_exp=0; it_exp < s_n_hits_exp; ++it_exp) { if ( ! s_hits_exp[it_exp].match) CORE_LOGF(eLOG_Note, ( " Expected server %d didn't match any found servers.", it_exp)); } for (it_got=0; it_got < s_n_hits_got; ++it_got) { if ( ! s_hits_got[it_got].match) CORE_LOGF(eLOG_Note, ( " Found server %d didn't match any expected servers.", it_got)); } CORE_LOGF(n_matches_perfect + n_matches_near == s_n_hits_got ? eLOG_Note : eLOG_Error, ("Expected %d servers; found %d (%d perfect matches, %d near " "matches, and %d non-matches).", s_n_hits_exp, s_n_hits_got, n_matches_perfect, n_matches_near, s_n_hits_got - n_matches_perfect - n_matches_near)); if (!errors && s_n_hits_got == s_n_hits_exp && s_n_hits_got == n_matches_perfect + n_matches_near) { success = 1; } retval = (success != exp_err ? 1 : 0); CORE_LOGF(eLOG_Note, ("Test result: %s.", retval ? (success ? "PASS" : "PASS (with expected error)") : (success ? "FAIL (success when error expected)" : "FAIL"))); } out: if (mock_body_adj) free(mock_body_adj); return retval == -1 ? (success != exp_err ? 1 : 0) : retval; }
static void funcargs (LexState *ls, expdesc *f) { FuncState *fs = GetCurrentFuncState( ls ); expdesc args; int base, nparams; int line = ls->linenumber; switch (ls->t.token) { case '(': { /* funcargs -> `(' [ explist1 ] `)' */ if (line != ls->lastline) { luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)"); } luaX_next(ls); if (ls->t.token == ')') /* arg list is empty? */ { args.k = VVOID; } else { explist1(ls, &args); luaK_setmultret(fs, &args); } check_match(ls, ')', '(', line); break; } case '{': { /* funcargs -> constructor */ constructor(ls, &args); break; } case TK_STRING: { /* funcargs -> STRING */ codestring(ls, &args, ls->t.seminfo.ts); luaX_next(ls); /* must use `seminfo' before `next' */ break; } default: { luaX_syntaxerror(ls, "function arguments expected"); return; } } lua_assert(f->k == VNONRELOC); base = f->u.s.info; /* base register for call */ if (hasmultret(args.k)) { nparams = LUA_MULTRET; /* open call */ } else { if (args.k != VVOID) { luaK_exp2nextreg(fs, &args); /* close last argument */ } nparams = fs->freereg - (base+1); } init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2)); luaK_fixline(fs, line); fs->freereg = base+1; /* call remove function and arguments and leaves (unless changed) one result */ }
int search_for_quote(char *search_line, char *exact_line, int max_msgnum, String_Match * match_info) { char *ptr = search_line; char token[MAXLINE]; int last_itok = 0; int search_len = strlen(search_line); const char *stop_ptr = search_line + (search_len >= 80 ? 40 : (search_len + 1) / 2); static int count_tokens = 0; static int count_matches = 0; static int count_searched = 0; const char *match_start_ptr = ptr; const char *next_match_start_ptr; char *next_exact_ptr; int len; int dummy = 0; struct body *bp; struct body b; b.line = search_line; b.next = NULL; match_info->match_len_tokens = 0; match_info->match_len_bytes = 0; match_info->msgnum = -1; match_info->last_matched_string = NULL; bp = tokenize_body(&b, token, &ptr, &dummy, TRUE); if (!bp) return -1; ++count_searched; last_itok = ENCODE_TOKEN(token); next_match_start_ptr = ptr; next_exact_ptr = exact_line; while ((bp = tokenize_body(bp, token, &ptr, &dummy, TRUE)) != NULL) { int itok = ENCODE_TOKEN(token); struct bigram_list *bigram; bigram = find_bigram(last_itok, itok); if (!bigram) printf("Warning, internal inconsistency in search_for_quote:\n(%d,%d) %s %d best %d, msg %d %s || %s\n", last_itok, itok, token, dummy, match_info->match_len_tokens, max_msgnum, ptr, search_line); ++count_tokens; while (bigram) { ++count_matches; check_match(bigram, bp, ptr, max_msgnum, match_info, match_start_ptr, exact_line); if (match_info->match_len_bytes == search_len) break; bigram = bigram->next; } if (match_info->last_matched_string != NULL && strlen(match_info->last_matched_string) > search_len / 2) break; if (ptr > stop_ptr) /* very little chance of improving match */ break; /* in 2nd half of string */ last_itok = itok; match_start_ptr = next_match_start_ptr; next_match_start_ptr = ptr; exact_line = next_exact_ptr; tokenize_body(bp, token, &next_exact_ptr, &dummy, TRUE); } if (0) printf("%d times %d searches %d tokens %d matches tries %f\n", max_msgnum, count_searched, count_tokens, count_matches, (float)count_matches / count_tokens); len = match_info->match_len_bytes; if (max_msgnum == -1) printf("best_match_len %d (%d) len %d search_len %d %d; %s.\n", match_info->match_len_tokens, match_info->msgnum, len, search_len, match_info->match_len_bytes, match_info->last_matched_string); if (match_info->match_len_tokens > 1 && (len > search_len / 2 || len > 40)) { if ((ptr = strchr(match_info->last_matched_string, '\n')) != NULL) *ptr = 0; /* multi-line used for compares in check_match, but would screw up line-by-line compare outside */ return TRUE; } if (match_info->match_len_tokens > 1 && (len > search_len / 2 || len > 40) && len > match_info->match_len_bytes / 2) if (0) printf("#almost %d best_match_len %d len %d search_len %d %d.\n", max_msgnum, match_info->match_len_tokens, len, search_len, match_info->match_len_bytes); if (match_info->last_matched_string != NULL) free(match_info->last_matched_string); match_info->last_matched_string = NULL; match_info->msgnum = -1; return FALSE; }
/* =========================================================================== * Same as deflate_medium, but achieves better compression. We use a lazy * evaluation for matches: a match is finally adopted only if there is * no better match at the next window position. */ block_state deflate_slow(deflate_state *s, int flush) { IPos hash_head; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = NIL; if (s->lookahead >= MIN_MATCH) { hash_head = insert_string(s, s->strstart); } /* Find the longest match, discarding those <= prev_length. */ s->prev_length = s->match_length, s->prev_match = s->match_start; s->match_length = MIN_MATCH-1; if (hash_head != NIL && s->prev_length < s->max_lazy_match && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ s->match_length = longest_match(s, hash_head); /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED #if TOO_FAR <= 32767 || (s->match_length == MIN_MATCH && s->strstart - s->match_start > TOO_FAR) #endif )) { /* If prev_match is also MIN_MATCH, match_start is garbage * but we will ignore the current match anyway. */ s->match_length = MIN_MATCH-1; } } /* If there was a match at the previous step and the current * match is not better, output the previous match: */ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; /* Do not insert strings in hash table beyond this. */ check_match(s, s->strstart-1, s->prev_match, s->prev_length); _tr_tally_dist(s, s->strstart -1 - s->prev_match, s->prev_length - MIN_MATCH, bflush); /* Insert in hash table all strings up to the end of the match. * strstart-1 and strstart are already inserted. If there is not * enough lookahead, the last two strings are not inserted in * the hash table. */ s->lookahead -= s->prev_length-1; #ifdef NOT_TWEAK_COMPILER s->prev_length -= 2; do { if (++s->strstart <= max_insert) { insert_string(s, s->strstart); } } while (--s->prev_length != 0); s->match_available = 0; s->match_length = MIN_MATCH-1; s->strstart++; #else { uInt mov_fwd = s->prev_length - 2; uInt insert_cnt = mov_fwd; if (unlikely(insert_cnt > max_insert - s->strstart)) insert_cnt = max_insert - s->strstart; bulk_insert_str(s, s->strstart + 1, insert_cnt); s->prev_length = 0; s->match_available = 0; s->match_length = MIN_MATCH-1; s->strstart += mov_fwd + 1; } #endif /*NOT_TWEAK_COMPILER*/ if (bflush) FLUSH_BLOCK(s, 0); } else if (s->match_available) { /* If there was no match at the previous position, output a * single literal. If there was a match but the current match * is longer, truncate the previous match to a single literal. */ Tracevv((stderr, "%c", s->window[s->strstart-1])); _tr_tally_lit(s, s->window[s->strstart-1], bflush); if (bflush) { FLUSH_BLOCK_ONLY(s, 0); } s->strstart++; s->lookahead--; if (s->strm->avail_out == 0) return need_more; } else { /* There is no previous match to compare with, wait for * the next step to decide. */ s->match_available = 1; s->strstart++; s->lookahead--; } } Assert(flush != Z_NO_FLUSH, "no flush?"); if (s->match_available) { Tracevv((stderr, "%c", s->window[s->strstart-1])); _tr_tally_lit(s, s->window[s->strstart-1], bflush); s->match_available = 0; } s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->last_lit) FLUSH_BLOCK(s, 0); return block_done; }
static void trystat (LexState *ls, int line) { /* trystat -> TRY block CATCH err DO block END */ FuncState *fs = ls->fs; BlockCnt bl; int base, pc, escapelist = NO_JUMP; luaX_next(ls); enterblock(fs, &bl, 2); /* try block */ base = fs->freereg; new_localvarliteral(ls, "(error obj)", 0); adjustlocalvars(ls, 1); /* error object */ luaK_reserveregs(fs, 1); pc = luaK_codeAsBx(fs, OP_TRY, base, NO_JUMP); chunk(ls); if (ls->t.token == TK_CATCH) { TString *varname; int errobj; luaK_codeABC(fs, OP_EXITTRY, 0, 0, 0); luaK_concat(fs, &escapelist, luaK_jump(fs)); SET_OPCODE(fs->f->code[pc], OP_TRYCATCH); /* change it to TRYCATCH */ luaK_patchtohere(fs, pc); bl.isbreakable = 0; // local err luaX_next(ls); /* skip `catch' */ varname = str_checkname(ls); /* first variable name */ // do checknext(ls, TK_DO); errobj = fs->freereg; new_localvar(ls, varname, 0); adjustlocalvars(ls, 1); luaK_reserveregs(fs, 1); luaK_codeABC(fs, OP_MOVE, errobj, base, 0); block(ls); } else if (ls->t.token == TK_FINALLY) { luaK_codeABC(fs, OP_EXITTRY, 0, 0, 0); luaK_concat(fs, &escapelist, luaK_jump(fs)); SET_OPCODE(fs->f->code[pc], OP_TRYFIN); /* change it to TRYFIN */ luaK_patchtohere(fs, pc); bl.isbreakable = 3; luaX_next(ls); /* skip 'finally' */ block(ls); luaK_codeABC(fs, OP_RETFIN, base, 0, 0); /* OP_ENDFIN jump to the return point */ } else { luaK_codeABC(fs, OP_EXITTRY, 0, 0, 0); luaK_concat(fs, &escapelist, pc); } leaveblock(fs); luaK_patchtohere(fs, escapelist); check_match(ls, TK_END, TK_TRY, line); }
bool load_ssh(const char* shname, material_params* params) { const char* shpath = "data/world/materials/"; const char* shext = ".ssh"; char* filename = new char[strlen(shname) + strlen(shpath) + strlen(shext) + 1]; strcpy(filename, shpath); strcpy(filename + strlen(filename), shname); strcpy(filename + strlen(filename), shext); char* buffer = load_entire_file(filename, "r"); delete filename; if(!buffer) return false; char* curr = buffer; while(*curr != '\0') { if(is_whitespace(*curr)) { curr++; continue; } if(*curr == '#') { skip_line(&curr); continue; } const param_parse_map* P = material_parse_mapping; while(P->symbol) { if(check_match(curr, P->symbol)) { curr += strlen(P->symbol); for(unsigned int i=0; i<P->count; i++) { ((float*)((char*)params+P->offset))[i] = parse_float(&curr); } skip_line(&curr); break; } P++; } if(!P->symbol) { printf("Unparsable line while parsing material: %s\n", shname); skip_line(&curr); } } //printf("Material Diffuse: %f %f %f\n", params->diffuse.x, params->diffuse.y, params->diffuse.z); delete [] buffer; return true; }
obj_mesh* obj_load_mesh(const char* filename) { char* buffer = load_entire_file(filename, "r"); if(!buffer) return NULL; // Build size counts char* curr = buffer; unsigned int position_count = 0; unsigned int uv_count = 0; unsigned int normal_count = 0; unsigned int face_count = 0; while(*curr != '\0') { if(is_whitespace(*curr)) { curr++; continue; } if(check_match(curr, "v")) position_count++; else if(check_match(curr, "vt")) uv_count++; else if(check_match(curr, "vn")) normal_count++; else if(check_match(curr, "f")) face_count++; skip_line(&curr); } printf("Vertices: %d, Faces: %d.\n", position_count, face_count); vector3* positions = new vector3[position_count]; vector2* uvs = new vector2[uv_count]; vector3* normals = new vector3[normal_count]; obj_vert* verticies = new obj_vert[face_count*3]; unsigned int pidx = 0; unsigned int uvidx = 0; unsigned int nidx = 0; unsigned int fidx = 0; // Actually parse the obj file material_params current_params; load_ssh("Default", ¤t_params); curr = buffer; while(*curr != '\0') { if(is_whitespace(*curr)) { curr++; continue; } if(check_match(curr, "v")) { curr++; assert(pidx < position_count); positions[pidx].x = parse_float(&curr); positions[pidx].y = parse_float(&curr); positions[pidx].z = parse_float(&curr); pidx++; skip_line(&curr); } else if(check_match(curr, "vt")) { curr += 2; assert(uvidx < uv_count); uvs[uvidx].x = parse_float(&curr); uvs[uvidx].y = parse_float(&curr); uvidx++; skip_line(&curr); } else if(check_match(curr, "vn")) { curr += 2; assert(nidx < normal_count); normals[nidx].x = parse_float(&curr); normals[nidx].y = parse_float(&curr); normals[nidx].z = parse_float(&curr); nidx++; skip_line(&curr); } else if(check_match(curr, "f")) { curr++; assert(fidx+2 < face_count*3); populate_face(&curr, &verticies[fidx++], positions, uvs, normals, current_params); populate_face(&curr, &verticies[fidx++], positions, uvs, normals, current_params); populate_face(&curr, &verticies[fidx++], positions, uvs, normals, current_params); skip_line(&curr); } else if(check_match(curr, "usemtl")) { curr += 6; char* material = parse_symbol(&curr); if(material) { if(!load_ssh(material, ¤t_params)) load_ssh("Default", ¤t_params); delete [] material; } skip_line(&curr); } else skip_line(&curr); } obj_mesh* mesh = new obj_mesh(); mesh->vertex_count = face_count*3; mesh->verticies = verticies; delete [] positions; delete [] uvs; delete [] normals; delete [] buffer; return mesh; }
int main(int argc, char **argv) { krb5_ccache ccinitial, ccu1, ccu2; krb5_principal princ1, princ2, princ3; const char *collection_name, *typename; char *initial_primary_name, *unique1_name, *unique2_name; /* * Get the collection name from the command line. This is a ccache name * with collection semantics, like DIR:/path/to/directory. This test * program assumes that the collection is empty to start with. */ assert(argc == 2); collection_name = argv[1]; /* * Set the default ccache for the context to be the collection name, so the * library can find the collection. */ check(krb5_init_context(&ctx)); check(krb5_cc_set_default_name(ctx, collection_name)); /* * Resolve the collection name. Since the collection is empty, this should * generate a subsidiary name of an uninitialized cache. Getting the name * of the resulting cache should give us the subsidiary name, not the * collection name. This resulting subsidiary name should be consistent if * we resolve the collection name again, and the collection should still be * empty since we haven't initialized the cache. */ check(krb5_cc_resolve(ctx, collection_name, &ccinitial)); check(krb5_cc_get_full_name(ctx, ccinitial, &initial_primary_name)); assert(strcmp(initial_primary_name, collection_name) != 0); check_primary_name(collection_name, initial_primary_name); check_collection(NULL, 0); check_princ(collection_name, NULL); check_princ(initial_primary_name, NULL); /* * Before initializing the primary ccache, generate and initialize two * unique caches of the collection's type. Check that the cache names * resolve to the generated caches and appear in the collection. (They * might appear before being initialized; that's not currently considered * important). The primary cache for the collection should remain as the * unitialized cache from the previous step. */ typename = krb5_cc_get_type(ctx, ccinitial); check(krb5_cc_new_unique(ctx, typename, NULL, &ccu1)); check(krb5_cc_get_full_name(ctx, ccu1, &unique1_name)); check(krb5_parse_name(ctx, "princ1@X", &princ1)); check(krb5_cc_initialize(ctx, ccu1, princ1)); check_princ(unique1_name, princ1); check_match(princ1, unique1_name); check_collection(NULL, 1, unique1_name); check(krb5_cc_new_unique(ctx, typename, NULL, &ccu2)); check(krb5_cc_get_full_name(ctx, ccu2, &unique2_name)); check(krb5_parse_name(ctx, "princ2@X", &princ2)); check(krb5_cc_initialize(ctx, ccu2, princ2)); check_princ(unique2_name, princ2); check_match(princ1, unique1_name); check_match(princ2, unique2_name); check_collection(NULL, 2, unique1_name, unique2_name); assert(strcmp(unique1_name, initial_primary_name) != 0); assert(strcmp(unique1_name, collection_name) != 0); assert(strcmp(unique2_name, initial_primary_name) != 0); assert(strcmp(unique2_name, collection_name) != 0); assert(strcmp(unique2_name, unique1_name) != 0); check_primary_name(collection_name, initial_primary_name); /* * Initialize the initial primary cache. Make sure it didn't change names, * that the previously retrieved name and the collection name both resolve * to the initialized cache, and that it now appears first in the * collection. */ check(krb5_parse_name(ctx, "princ3@X", &princ3)); check(krb5_cc_initialize(ctx, ccinitial, princ3)); check_name(ccinitial, initial_primary_name); check_princ(initial_primary_name, princ3); check_princ(collection_name, princ3); check_match(princ3, initial_primary_name); check_collection(initial_primary_name, 2, unique1_name, unique2_name); /* * Switch the primary cache to each cache we have open. One each switch, * check the primary name, check that the collection resolves to the * expected cache, and check that the new primary name appears first in the * collection. */ check(krb5_cc_switch(ctx, ccu1)); check_primary_name(collection_name, unique1_name); check_princ(collection_name, princ1); check_collection(unique1_name, 2, initial_primary_name, unique2_name); check(krb5_cc_switch(ctx, ccu2)); check_primary_name(collection_name, unique2_name); check_princ(collection_name, princ2); check_collection(unique2_name, 2, initial_primary_name, unique1_name); check(krb5_cc_switch(ctx, ccinitial)); check_primary_name(collection_name, initial_primary_name); check_princ(collection_name, princ3); check_collection(initial_primary_name, 2, unique1_name, unique2_name); /* * Temporarily set the context default ccache to a subsidiary name, and * check that iterating over the collection yields that subsidiary cache * and no others. */ check(krb5_cc_set_default_name(ctx, unique1_name)); check_collection(unique1_name, 0); check(krb5_cc_set_default_name(ctx, collection_name)); /* * Destroy the primary cache. Make sure this causes both the initial * primary name and the collection name to resolve to an uninitialized * cache. Make sure the primary name doesn't change and doesn't appear in * the collection any more. */ check(krb5_cc_destroy(ctx, ccinitial)); check_princ(initial_primary_name, NULL); check_princ(collection_name, NULL); check_primary_name(collection_name, initial_primary_name); check_match(princ1, unique1_name); check_match(princ2, unique2_name); check_match(princ3, NULL); check_collection(NULL, 2, unique1_name, unique2_name); /* * Switch to the first unique cache after destroying the primary cache. * Check that the collection name resolves to this cache and that the new * primary name appears first in the collection. */ check(krb5_cc_switch(ctx, ccu1)); check_primary_name(collection_name, unique1_name); check_princ(collection_name, princ1); check_collection(unique1_name, 1, unique2_name); /* * Destroy the second unique cache (which is not the current primary), * check that it is on longer initialized, and check that it no longer * appears in the collection. Check that destroying the non-primary cache * doesn't affect the primary name. */ check(krb5_cc_destroy(ctx, ccu2)); check_princ(unique2_name, NULL); check_match(princ2, NULL); check_collection(unique1_name, 0); check_primary_name(collection_name, unique1_name); check_match(princ1, unique1_name); check_princ(collection_name, princ1); /* * Destroy the first unique cache. Check that the collection is empty and * still has the same primary name. */ check(krb5_cc_destroy(ctx, ccu1)); check_princ(unique1_name, NULL); check_princ(collection_name, NULL); check_primary_name(collection_name, unique1_name); check_match(princ1, NULL); check_collection(NULL, 0); krb5_free_string(ctx, initial_primary_name); krb5_free_string(ctx, unique1_name); krb5_free_string(ctx, unique2_name); krb5_free_principal(ctx, princ1); krb5_free_principal(ctx, princ2); krb5_free_principal(ctx, princ3); krb5_free_context(ctx); return 0; }
static int statement (LexState *ls) { int line = ls->linenumber; /* may be needed for error messages */ switch (ls->t.token) { case TK_IF: { /* stat -> ifstat */ ifstat(ls, line); return 0; } case TK_WHILE: { /* stat -> whilestat */ whilestat(ls, line); return 0; } case TK_DO: { /* stat -> DO block END */ luaX_next(ls); /* skip DO */ block(ls); check_match(ls, TK_END, TK_DO, line); return 0; } case TK_FOR: { /* stat -> forstat */ forstat(ls, line); return 0; } case TK_REPEAT: { /* stat -> repeatstat */ repeatstat(ls, line); return 0; } case TK_FUNCTION: { funcstat(ls, line); /* stat -> funcstat */ return 0; } case TK_LOCAL: { /* stat -> localstat */ luaX_next(ls); /* skip LOCAL */ if (testnext(ls, TK_FUNCTION)) /* local function? */ localfunc(ls); else localstat(ls); return 0; } case TK_RETURN: { /* stat -> retstat */ retstat(ls); return 1; /* must be last statement */ } case TK_BREAK: { /* stat -> breakstat */ luaX_next(ls); /* skip BREAK */ if (testnext(ls, TK_NUMBER)) { /* multi scope 'break n' */ breakstat(ls, ls->t.seminfo.r); } else breakstat(ls, 1); return 1; /* must be last statement */ } case TK_CONTINUE: { /* stat -> continuestat */ luaX_next(ls); /* skip CONTINUE */ continuestat(ls); return 1; /* must be last statement */ } case TK_INC: { /* added 0.5.2.*/ luaX_next(ls); changevalue(ls, OP_ADD); return 0; } case TK_DEC: { /* added 0.5.2.*/ luaX_next(ls); changevalue(ls, OP_SUB); return 0; } case TK_CASE: { /* added 0.5.2.*/ selectstat(ls, line); return 0; } default: { exprstat(ls); return 0; /* to avoid warnings */ } } }
static int test (const char *pattern, int cflags, const char *string, int eflags, char *expect, char *matches, const char *fail) { regex_t re; regmatch_t rm[10]; int n, ret = 0; n = regcomp (&re, pattern, cflags); if (n != 0) { char buf[500]; if (eflags == -1) { static struct { reg_errcode_t code; const char *name; } codes [] #define C(x) { REG_##x, #x } = { C(NOERROR), C(NOMATCH), C(BADPAT), C(ECOLLATE), C(ECTYPE), C(EESCAPE), C(ESUBREG), C(EBRACK), C(EPAREN), C(EBRACE), C(BADBR), C(ERANGE), C(ESPACE), C(BADRPT) }; int i; for (i = 0; i < sizeof (codes) / sizeof (codes[0]); ++i) if (n == codes[i].code) { if (strcmp (string, codes[i].name)) { printf ("%s regcomp returned REG_%s (expected REG_%s)\n", fail, codes[i].name, string); return 1; } return 0; } printf ("%s regcomp return value REG_%d\n", fail, n); return 1; } regerror (n, &re, buf, sizeof (buf)); printf ("%s regcomp failed: %s\n", fail, buf); return 1; } if (eflags == -1) { regfree (&re); /* The test case file assumes something only guaranteed by the rxspencer regex implementation. Namely that for empty expressions regcomp() return REG_EMPTY. This is not the case for us and so we ignore this error. */ if (strcmp (string, "EMPTY") == 0) return 0; printf ("%s regcomp unexpectedly succeeded\n", fail); return 1; } if (regexec (&re, string, 10, rm, eflags)) { regfree (&re); if (expect == NULL) return 0; printf ("%s regexec failed\n", fail); return 1; } regfree (&re); if (expect == NULL) { printf ("%s regexec unexpectedly succeeded\n", fail); return 1; } if (cflags & REG_NOSUB) return 0; ret = check_match (rm, 0, string, expect, fail); if (matches == NULL) return ret; for (n = 1; ret == 0 && n < 10; ++n) { char *p = NULL; if (matches) { p = strchr (matches, ','); if (p != NULL) *p = '\0'; } ret = check_match (rm, n, string, matches ? matches : "-", fail); if (p) { *p = ','; matches = p + 1; } else matches = NULL; } return ret; }
static int statement(LexState* ls) { int line = ls->linenumber; /* may be needed for error messages */ switch (ls->t.token) { case TK_IF: /* stat -> ifstat */ { ifstat(ls, line); return 0; } case TK_WHILE: /* stat -> whilestat */ { whilestat(ls, line); return 0; } case TK_DO: /* stat -> DO block END */ { luaX_next(ls); /* skip DO */ block(ls); check_match(ls, TK_END, TK_DO, line); return 0; } case TK_FOR: /* stat -> forstat */ { forstat(ls, line); return 0; } case TK_REPEAT: /* stat -> repeatstat */ { repeatstat(ls, line); return 0; } case TK_FUNCTION: { funcstat(ls, line); /* stat -> funcstat */ return 0; } case TK_LOCAL: /* stat -> localstat */ { luaX_next(ls); /* skip LOCAL */ if (testnext(ls, TK_FUNCTION)) /* local function? */ localfunc(ls); else localstat(ls); return 0; } case TK_RETURN: /* stat -> retstat */ { retstat(ls); return 1; /* must be last statement */ } case TK_BREAK: /* stat -> breakstat */ { luaX_next(ls); /* skip BREAK */ breakstat(ls); return 1; /* must be last statement */ } case TK_CONTINUE: /* stat -> continuestat */ { luaX_next(ls); /* skip CONTINUE */ continuestat(ls); return 1; /* must be last statement */ } default: { exprstat(ls); return 0; /* to avoid warnings */ } } }
int main(int argc, char** argv) { int rc; arg_t n,s,e,w; pthread_t north, south, east, west; if (argc == 2 && check_match(argv[1],"^[nsew]*$") > 0) { init(argv[1]); monitor_init(); if (gl_env.n) { n.direction = 'n'; rc = pthread_create(&north, NULL, cart, (void*)&n); if(rc){ perror("Error creating North thread"); cleanexit(); } } if (gl_env.s) { s.direction = 's'; rc = pthread_create(&south, NULL, cart, (void*)&s); if (rc) { perror("Error creating South thread"); cleanexit(); } } if (gl_env.e) { e.direction = 'e'; rc = pthread_create(&east, NULL, cart, (void*)&e); if(rc){ perror("Error creating East thread"); cleanexit(); } } if (gl_env.w) { w.direction = 'w'; rc = pthread_create(&west, NULL, cart, (void*)&w); if (rc) { perror("Error creating West thread"); cleanexit(); } } if(gl_env.n) pthread_join(north, NULL); if(gl_env.s) pthread_join(south, NULL); if(gl_env.e) pthread_join(east, NULL); if(gl_env.w) pthread_join(west, NULL); monitor_shutdown(); } else { /* invalid arguements entered */ argerror(); } cleanexit(); }
bool try_match(Tuple const& other, seq<Is...>){ return all_of(check_match(std::get<Is>(_args), std::get<Is>(other))...); }
static void constructor (LexState *ls, expdesc *t) { /* constructor -> ?? */ FuncState *fs = GetCurrentFuncState( ls ); int line = ls->linenumber; int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0); struct ConsControl cc; cc.na = cc.nh = cc.tostore = 0; cc.t = t; init_exp(t, VRELOCABLE, pc); init_exp(&cc.v, VVOID, 0); /* no value (yet) */ luaK_exp2nextreg(fs, t); /* fix it at stack top (for gc) */ checknext(ls, '{'); do { lua_assert(cc.v.k == VVOID || cc.tostore > 0); if (ls->t.token == '}') { break; } closelistfield(fs, &cc); switch(ls->t.token) { case TK_NAME: { /* may be listfields or recfields */ luaX_lookahead(ls); if (ls->lookahead.token != '=') /* expression? */ { listfield(ls, &cc); } else { recfield(ls, &cc); } break; } case '[': { /* constructor_item -> recfield */ recfield(ls, &cc); break; } default: { /* constructor_part -> listfield */ listfield(ls, &cc); break; } } } while (testnext(ls, ',') || testnext(ls, ';')); check_match(ls, '}', '{', line); lastlistfield(fs, &cc); SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */ SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */ }