bool MenuConf::readToken(FILE* f, char* token, int TokenSize) { if(! isTokenChar(curChar)) { printf("%d, %d: Expected token character\n", lineIndex, caret); return false; } for(int i=0; i<TokenSize; i++) { if(! isTokenChar(curChar)) { token[i] = 0; return true; } token[i] = curChar; nextChar(f); } token[TokenSize-1] = 0; printf("%d, %d: Token too long\n", lineIndex, caret); return false; }
bool isValidToken(const char* start, const char* end) { if (end <= start) return false; for (; start < end; ++start) { if (!isTokenChar(*start)) return false; } return true; }
bool isValidToken(char first, char second) { switch (first) { case ';': if (!isTokenChar(second)) return true; break; case '|': if (isTokenChar(second) && first == second) return true; break; case '&': if (isTokenChar(second) && first == second) return true; break; case '(': if (!isTokenChar(second)) return true; break; } return false; }
//there can be no consecutive token characters //the exceptions: a '&' character must be followed immediately by another '&' // a '|' character may be followed by a '|' // tokens may be followed by an open paren '(' void checkForConsecutiveTokens(char* complete_command) { int pos=0; while(complete_command[pos] != '\0') { //there can be no consecutive token characters if ( isTokenChar(complete_command[pos]) ) { switch (complete_command[pos]) { case '<': case '>': case ';': if ( complete_command[pos+1] == '<' || complete_command[pos+1] == '>' || complete_command[pos+1] == '|' || complete_command[pos+1] == '&' || complete_command[pos+1] == ';' || complete_command[pos+1] == ')' ) { fprintf(stderr, "Invalid syntax : checkForConsecutiveTokens!"); exit(1); } break; case '&': pos++; if ( complete_command[pos+1] == '<' || complete_command[pos+1] == '>' || complete_command[pos+1] == '|' || complete_command[pos+1] == '&' || complete_command[pos+1] == ';' || complete_command[pos+1] == ')' ) { fprintf(stderr, "Invalid syntax : checkForConsecutiveTokens!"); exit(1); } break; case '|': if ( complete_command[pos+1] == '<' || complete_command[pos+1] == '>' || complete_command[pos+1] == '&' || complete_command[pos+1] == ';' || complete_command[pos+1] == ')' ) { fprintf(stderr, "Invalid syntax : checkForConsecutiveTokens!"); exit(1); } if ( complete_command[pos+1] == '|' ) { pos++; if ( complete_command[pos+1] == '<' || complete_command[pos+1] == '>' || complete_command[pos+1] == '|' || complete_command[pos+1] == '&' || complete_command[pos+1] == ';' || complete_command[pos+1] == ')' ) { fprintf(stderr, "Invalid syntax : checkForConsecutiveTokens!"); exit(1); } } break; case '(': if ( complete_command[pos+1] == '<' || complete_command[pos+1] == '>' || complete_command[pos+1] == '|' || complete_command[pos+1] == '&' || complete_command[pos+1] == ';' ) { fprintf(stderr, "Invalid syntax : checkForConsecutiveTokens!"); exit(1); } break; default: // ')' break; } } pos++; } }
bool CharTokenizer::incrementToken() { clearAttributes(); int32_t length = 0; int32_t start = bufferIndex; CharArray buffer(termAtt->termBuffer()); while (true) { if (bufferIndex >= dataLen) { offset += dataLen; dataLen = input->read(ioBuffer.get(), 0, ioBuffer.size()); if (dataLen == -1) { dataLen = 0; // so next offset += dataLen won't decrement offset if (length > 0) break; else return false; } bufferIndex = 0; } wchar_t c = ioBuffer[bufferIndex++]; if (isTokenChar(c)) // if it's a token char { if (length == 0) start = offset + bufferIndex - 1; else if (length == buffer.size()) buffer = termAtt->resizeTermBuffer(1 + length); buffer[length++] = normalize(c); // buffer it, normalized if (length == MAX_WORD_LEN) // buffer overflow! break; } else if (length > 0) // at non-Letter with chars break; // return them } termAtt->setTermLength(length); offsetAtt->setOffset(correctOffset(start), correctOffset(start + length)); return true; }
bool CharTokenizer::next(Token* token){ int32_t length = 0; int32_t start = offset; while (true) { TCHAR c; offset++; if (bufferIndex >= dataLen) { dataLen = input->read(ioBuffer, LUCENE_IO_BUFFER_SIZE); if (dataLen == -1) dataLen = 0; bufferIndex = 0; } if (dataLen <= 0 ) { if (length > 0) break; else return false; }else c = ioBuffer[bufferIndex++]; if (isTokenChar(c)) { // if it's a token TCHAR if (length == 0) // start of token start = offset-1; buffer[length++] = normalize(c); // buffer it, normalized if (length == LUCENE_MAX_WORD_LEN) // buffer overflow! break; } else if (length > 0) // at non-Letter w/ chars break; // return 'em } buffer[length]=0; token->set( buffer, start, start+length); return true; }
/** reads the next token from the input file into the token buffer; returns whether a token was read */ static SCIP_Bool getNextToken( BLKINPUT* blkinput /**< BLK reading data */ ) { SCIP_Bool hasdot; BLKEXPTYPE exptype; char* buf; int tokenlen; assert(blkinput != NULL); assert(blkinput->linepos < BLK_MAX_LINELEN); /* check the token stack */ if( blkinput->npushedtokens > 0 ) { swapPointers(&blkinput->token, &blkinput->pushedtokens[blkinput->npushedtokens-1]); blkinput->npushedtokens--; SCIPdebugMessage("(line %d) read token again: '%s'\n", blkinput->linenumber, blkinput->token); return TRUE; } /* skip delimiters */ buf = blkinput->linebuf; while( isDelimChar(buf[blkinput->linepos]) ) { if( buf[blkinput->linepos] == '\0' ) { if( !getNextLine(blkinput) ) { blkinput->section = BLK_END; SCIPdebugMessage("(line %d) end of file\n", blkinput->linenumber); return FALSE; } assert(blkinput->linepos == 0); } else blkinput->linepos++; } assert(blkinput->linepos < BLK_MAX_LINELEN); assert(!isDelimChar(buf[blkinput->linepos])); /* check if the token is a value */ hasdot = FALSE; exptype = BLK_EXP_NONE; if( isValueChar(buf[blkinput->linepos], buf[blkinput->linepos+1], TRUE, &hasdot, &exptype) ) /*lint !e679*/ { /* read value token */ tokenlen = 0; do { assert(tokenlen < BLK_MAX_LINELEN); assert(!isDelimChar(buf[blkinput->linepos])); blkinput->token[tokenlen] = buf[blkinput->linepos]; ++tokenlen; ++(blkinput->linepos); assert(blkinput->linepos < BLK_MAX_LINELEN); } while( isValueChar(buf[blkinput->linepos], buf[blkinput->linepos+1], FALSE, &hasdot, &exptype) ); /*lint !e679*/ } else { /* read non-value token */ tokenlen = 0; do { assert(tokenlen < BLK_MAX_LINELEN); blkinput->token[tokenlen] = buf[blkinput->linepos]; tokenlen++; blkinput->linepos++; if( tokenlen == 1 && isTokenChar(blkinput->token[0]) ) break; } while( !isDelimChar(buf[blkinput->linepos]) && !isTokenChar(buf[blkinput->linepos]) ); /* if the token is an equation sense '<', '>', or '=', skip a following '=' * if the token is an equality token '=' and the next character is a '<' or '>', replace the token by the inequality sense */ if( tokenlen >= 1 && (blkinput->token[tokenlen-1] == '<' || blkinput->token[tokenlen-1] == '>' || blkinput->token[tokenlen-1] == '=') && buf[blkinput->linepos] == '=' ) { blkinput->linepos++; } else if( blkinput->token[tokenlen-1] == '=' && (buf[blkinput->linepos] == '<' || buf[blkinput->linepos] == '>') ) { blkinput->token[tokenlen-1] = buf[blkinput->linepos]; blkinput->linepos++; } } assert(tokenlen < BLK_MAX_LINELEN); blkinput->token[tokenlen] = '\0'; SCIPdebugMessage("(line %d) read token: '%s'\n", blkinput->linenumber, blkinput->token); return TRUE; }
/** reads the next token from the input file into the token buffer; returns whether a token was read */ static SCIP_Bool getNextToken( SCIP* scip, /**< SCIP data structure */ LPINPUT* lpinput /**< LP reading data */ ) { SCIP_Bool hasdot; LPEXPTYPE exptype; char* buf; int tokenlen; assert(lpinput != NULL); assert(lpinput->linepos < LP_MAX_LINELEN); /* check the token stack */ if( lpinput->npushedtokens > 0 ) { swapPointers(&lpinput->token, &lpinput->pushedtokens[lpinput->npushedtokens-1]); lpinput->npushedtokens--; SCIPdebugMessage("(line %d) read token again: '%s'\n", lpinput->linenumber, lpinput->token); return TRUE; } /* skip delimiters */ buf = lpinput->linebuf; while( isDelimChar(buf[lpinput->linepos]) ) { if( buf[lpinput->linepos] == '\0' ) { if( !getNextLine(scip, lpinput) ) { lpinput->section = LP_END; SCIPdebugMessage("(line %d) end of file\n", lpinput->linenumber); return FALSE; } assert(lpinput->linepos == 0); } else lpinput->linepos++; } assert(lpinput->linepos < LP_MAX_LINELEN); assert(!isDelimChar(buf[lpinput->linepos])); /* check if the token is a value */ hasdot = FALSE; exptype = LP_EXP_NONE; if( isValueChar(buf[lpinput->linepos], buf[lpinput->linepos+1], TRUE, &hasdot, &exptype) ) { /* read value token */ tokenlen = 0; do { assert(tokenlen < LP_MAX_LINELEN); assert(!isDelimChar(buf[lpinput->linepos])); lpinput->token[tokenlen] = buf[lpinput->linepos]; tokenlen++; lpinput->linepos++; } while( isValueChar(buf[lpinput->linepos], buf[lpinput->linepos+1], FALSE, &hasdot, &exptype) ); } else { /* read non-value token */ tokenlen = 0; do { assert(tokenlen < LP_MAX_LINELEN); lpinput->token[tokenlen] = buf[lpinput->linepos]; tokenlen++; lpinput->linepos++; if( tokenlen == 1 && isTokenChar(lpinput->token[0]) ) break; } while( !isDelimChar(buf[lpinput->linepos]) && !isTokenChar(buf[lpinput->linepos]) ); /* if the token is a power sign '^', skip a following '2' * if the token is an equation sense '<', '>', or '=', skip a following '=' * if the token is an equality token '=' and the next character is a '<' or '>', replace the token by the inequality sense */ if( tokenlen >= 1 && lpinput->token[tokenlen-1] == '^' && buf[lpinput->linepos] == '2' ) { lpinput->linepos++; } if( tokenlen >= 1 && (lpinput->token[tokenlen-1] == '<' || lpinput->token[tokenlen-1] == '>' || lpinput->token[tokenlen-1] == '=') && buf[lpinput->linepos] == '=' ) { lpinput->linepos++; } else if( lpinput->token[tokenlen-1] == '=' && (buf[lpinput->linepos] == '<' || buf[lpinput->linepos] == '>') ) { lpinput->token[tokenlen-1] = buf[lpinput->linepos]; lpinput->linepos++; } } assert(tokenlen < LP_MAX_LINELEN); lpinput->token[tokenlen] = '\0'; SCIPdebugMessage("(line %d) read token: '%s'\n", lpinput->linenumber, lpinput->token); return TRUE; }