void SieveParser::whitespace() { uint p; do { p = pos(); switch( nextChar() ) { case '#': case '/': comment(); break; case ' ': case '\t': case '\r': case '\n': step(); break; } } while ( ok() && pos() > p ); }
/* recebe um número inteiro */ void getNum(char *num){ int i; if (!isdigit(look)) expected("Integer"); for (i = 0; isdigit(look); i++) { if (i >= MAXNUM) fatal("Integer too long!"); num[i] = look; nextChar(); } num[i] = '\0'; skipWhite(); }
Token::Ptr MutCTokenizer::extractToken () { Token::Ptr token; // terminate if (currentChar () == EOF) { token = make_shared <TerminatorToken> (__source); } // skip space character else if (isspace (currentChar ())) { nextChar (); return extractToken (); } // number else if (currentChar () >= '0' && currentChar () <= '9') { token = make_shared<NumberToken> (__source); } // identifier else if (isalpha (currentChar ()) || currentChar () == '_') { token = make_shared <IdentifierToken> (__source); } // symbol else if (! isalnum (currentChar ()) && currentChar () != '_') { token = make_shared <SymbolToken> (__source); } // string else if (currentChar () == '"') { token = make_shared <StringToken> (__source); } else { // TODO error handling: unexpected beginning character return nullptr; } token->build (__source); return token; }
/* Checks whether the parentheses are balanced or not param: s pointer to a string pre: s is not null post: */ int isBalanced(char* s) { struct DynArr *array = newDynArr(10); char c; while((c = nextChar(s)) != '0'){ switch(c){ case '{' : case '[' : case '(' : pushDynArr(array, c); break; case '}' : if(topDynArr(array) == '{'){ popDynArr(array); break; } return 0; case ']' : if(topDynArr(array) == '['){ popDynArr(array); break; } return 0; case ')' : if(topDynArr(array) == '('){ popDynArr(array); break; } return 0; } } if(array->size != 0){ return 0; } deleteDynArr(array); return 1; }
static void cmdLnBatchFlushCrap(// FLUSH CARRIAGE-RETURNS, ETC void ) { int chr; // - next char if( ! CompFlags.batch_file_eof ) { for( ; ; ) { chr = nextChar(); if( CompFlags.batch_file_eof ) break; if( chr == '\n' ) { ++ CompInfo.fc_file_line; continue; } -- batch_cursor; *batch_cursor = chr; ++ batch_bytes; break; } } }
void TwoBitFile::createSequenceMeta() { // create SequenceMeta objects from name and offset uint32_t seqNameLen, offset; char seqName[SEQNAME_MAX_LEN]; std::string seqNameStr; // name as std::string for (uint32_t i = 0; i < sequenceCount_; ++i) { seqNameLen = nextChar(); // length file_.read(seqName, seqNameLen); // sequence name seqNameStr = std::string(seqName, seqNameLen); offset = nextInt(); // offset // add meta data. sequences_.emplace(seqNameStr, TwoBitSequenceMeta(seqNameStr, offset, filename_, swapped_)); sequenceNames_.push_back(seqNameStr); } }
/******************************************************************************* * Lexer Definitions ******************************************************************************/ KAbstractObjParserPrivate::token_id KAbstractObjParserPrivate::lexToken(token_type &token) { // Skip unimplemented features switch (currToken()) { case PT_GROUP: case PT_OBJECT: case PT_SMOOTHING: case PT_MATERIAL: nextLine(); break; } // Tokenization for (;;) { switch (nextChar()) { case KAbstractReader::EndOfFile: return PT_EOF; case WHITESPACE: continue; case '\n': return PT_ENDSTATEMENT; case '#': nextLine(); return PT_ENDSTATEMENT; case '/': return PT_SEPARATOR; case '.': default: if (Karma::isNumeric(currChar())) return lexTokenInteger(token); else if (Karma::isAlpha(currChar())) return lexTokenIdentifier(token); else LEX_ERROR("Unexpected character at (%d:%d) '%c'!\n", (int)currLineCount(), (int)currCharCount(), (char)currChar()); } } }
static void skipToMatch (const char *const pair) { const int begin = pair [0], end = pair [1]; const unsigned long inputLineNumber = getInputLineNumber (); int matchLevel = 1; int c = '\0'; while (matchLevel > 0) { c = nextChar (); if (c == begin) ++matchLevel; else if (c == end) --matchLevel; else if (c == '\n') break; } if (c == EOF) verbose ("%s: failed to find match for '%c' at line %lu\n", getInputFileName (), begin, inputLineNumber); }
/******************************************************************************* * Lexer Definitions ******************************************************************************/ KAbstractHdrParserPrivate::token_id KAbstractHdrParserPrivate::lexToken(KAbstractLexer::token_type &token) { // Tokenization for (;;) { switch (nextChar()) { case KAbstractReader::EndOfFile: return PT_EOF; case WHITESPACE: return PT_ENDSTATEMENT; case '\n': return PT_ENDOFHEADER; case '#': nextLine(); return PT_ENDSTATEMENT; default: return lexTokenKeyValue(token); } } }
bool tokenize() { char c; while(1) { c = nextChar(); printf("%c\n", c); switch ( getState(c) ) { case START_STATE: tokenIndex = 0; memset(token, 0, 100); break; case VARIABLE_STATE: if ( prevState != VARIABLE_STATE ) { tokenIndex = 0; memset(token, 0, 100); } concat(c); break; case INTEGER_STATE: if ( prevState != VARIABLE_STATE ) { tokenIndex = 0; memset(token, 0, 100); } concat(c); break; case OPERATOR_STATE: break; case END_OPERATOR_STATE: break; case FINAL_STATE: printf("Parse complete. Exiting.\n"); return true; case ERROR_STATE: printf("Syntax error. Exiting.\n"); return false; default: printf("Unexpected error!\n"); return false; } } }
void InText::readDouble(double& d, PhysicalInStream& stream) { buf = ""; skipWhitespace(stream); if(!isEof(stream) && (theChar == '-' || theChar == '+')) { buf += theChar; nextChar(stream); } while(!isEof(stream) && isdigit(theChar)) { buf += theChar; nextChar(stream); } if(!isEof(stream) && theChar == '.') { buf += theChar; nextChar(stream); } while(!isEof(stream) && isdigit(theChar)) { buf += theChar; nextChar(stream); } if(!isEof(stream) && (theChar == 'e' || theChar == 'E')) { buf += theChar; nextChar(stream); } if(!isEof(stream) && (theChar == '-' || theChar == '+')) { buf += theChar; nextChar(stream); } while(!isEof(stream) && isdigit(theChar)) { buf += theChar; nextChar(stream); } d = atof(buf.c_str()); skipWhitespace(stream); }
int KAbstractObjParserPrivate::lexReadInteger(int *sign) { *sign = 1; int integer = 0; // Check for negation if (currChar() == '-') *sign = -1; else if (currChar() == '+') ; // Do nothing, sign is already 1 else integer = Karma::ctoi(currChar()); // Read the integer value while (Karma::isNumeric(peekChar())) { integer *= 10; integer += Karma::ctoi(nextChar()); } return integer; }
void Scanner::nextToken() { nextTokenInternal(); while (ftoken == CodeTypes::tStartComment) { // Comment ends with an end of line char while ((fch != LF) && (fch != EOFCHAR)) fch = getCharFromBuffer(); //while ((fch != CR) && (fch != EOFCHAR)) // fch = getCharFromBuffer(); //while ((fch == LF) && (fch != EOFCHAR)) // fch = getCharFromBuffer(); while (fch == LF) { yylineno++; //while (fch == LF) // nextChar(); // Dump the linefeed fch = nextChar(); } nextTokenInternal(); // get the real next token } }
TokenT *_expofloat(TokenizerT *tk, int isFirst, int lastWasSign) { nextChar(tk); if(isdigit(tk->inputIter[0])) { return _expofloat(tk, 0, 0); } else if(tk->inputIter[0] == '+' || tk->inputIter[0] == '-') { if(isFirst) { return _expofloat(tk, 0, 1); } else if(lastWasSign) { return _invalid(tk); } else { return makeToken(tk, "float with exponent"); } } else { if(isFirst) { return _invalid(tk); } else if(lastWasSign) { return _invalid(tk); } else { return makeToken(tk, "float with exponent"); } } }
QChar QComplexText::shapedCharacter( const QString &str, int pos, const QFontMetrics *fm ) { const QChar *ch = str.unicode() + pos; if ( ch->row() != 0x06 ) return *ch; else { int shape = glyphVariantLogical( str, pos ); //qDebug("mapping U+%x to shape %d glyph=0x%x", ch->unicode(), shape, arabicUnicodeMapping[ch->cell()][shape]); // lam aleph ligatures switch ( ch->cell() ) { case 0x44: { // lam const QChar *nch = nextChar( str, pos ); if ( nch->row() == 0x06 ) { switch ( nch->cell() ) { case 0x22: case 0x23: case 0x25: case 0x27: return QChar(arabicUnicodeLamAlefMapping[nch->cell() - 0x22][shape]); default: break; } } break; } case 0x22: // alef with madda case 0x23: // alef with hamza above case 0x25: // alef with hamza below case 0x27: // alef if ( prevChar( str, pos )->unicode() == 0x0644 ) // have a lam alef ligature return QChar(0); default: break; } return QChar( getShape( ch, ch->cell(), shape, fm ) ); } }
LexBase::LexBase(const char * str) throw(ConfigurationException) { StringBuffer msg; //-------- // Initialize state for the multi-byte functions in the C library. //-------- memset(&m_mbtowcState, 0, sizeof(mbstate_t)); m_keywordInfoArray = 0; m_keywordInfoArraySize = 0; m_funcInfoArray = 0; m_funcInfoArraySize = 0; m_uidIdentifierProcessor = new UidIdentifierDummyProcessor(); m_amOwnerOfUidIdentifierProcessor = true; m_sourceType = Configuration::INPUT_STRING; m_source = str; m_lineNum = 1; m_ptr = m_source; m_atEOF = false; nextChar(); // initialize m_ch }
void InConfig::skipWhitespace(PhysicalInStream& stream) { while(!isEof(stream) && isWhitespace()) { while(!isEof(stream) && InText::isWhitespace()) nextChar(stream); if(!isEof(stream)) { if(theChar == '/' && theNextChar == '/') { skipLine(stream); } else if(theChar == '/' && theNextChar == '*') { skipComment(stream); } else if(theChar == '#') { skipLine(stream); } } } }
bool FlowLexer::initialize(std::istream *input, const std::string& name) { stream_ = input; filename_ = name; if (stream_ == NULL) return false; lastPos_.set(1, 1, 0); currPos_.set(1, 1, 0); nextPos_.set(1, 1, 0); currentChar_ = '\0'; currLocation_.fileName = filename_; currLocation_.begin.set(1, 1, 0); currLocation_.end.set(1, 1, 0); content_.clear(); nextChar(); nextToken(); return true; }
/* Checks whether the (), {}, and [] are balanced or not param: s pointer to a string pre: s is not null post: */ int isBalanced(char* s) { char c = ' '; int ret_val = 0; struct DynArr* stack = newDynArr(10); assert(stack != 0); assert(s != 0); while (c != '\0'){ ret_val = 1; c = nextChar(s); if (c == '(' || c == '[' || c == '{') pushDynArr(stack,c); if (c == ')' || c == ']' || c == '}') { if (c == ')' && topDynArr(stack) == '(') popDynArr(stack); else if (c == ']' && topDynArr(stack) == '[') popDynArr(stack); else if (c == '}' && topDynArr(stack) == '{') popDynArr(stack); else return 0; } } if (ret_val) ret_val = !(isEmptyDynArr(stack)); printf("return value %d\n", ret_val); deleteDynArr(stack); return ret_val; }
int thisIsTheEnd(OBJ inStream){ /* * This is the end * Beautiful friend * This is the end * My only friend, the end */ int end = 0; char ch; do{ //printf(CYN "\nendSearch: " RESET); ch = nextChar(inStream); if(ch == '\n' || ch == EOF){ //printf(RED "<END>" RESET); end = 1; break; } } while (isWhiteSpace(ch)); unreadChar(inStream, ch); return end; }
Statement* Parser::parse(const String& file, Engine::ErrorHandler errorHandler, void* userData) { this->errorHandler = errorHandler; this->errorHandlerUserData = userData; this->filePath = file; try { if(!this->file.open(file)) { errorHandler(errorHandlerUserData, file, 0, Error::getString()); throw false; } includeFile = new IncludeFile(engine); includeFile->fileDir = File::getDirname(file); nextChar(); // read first character nextToken(); // read first symbol return readFile(); } catch(...) { return 0; } }
/* * TKGetNextToken returns the next token from the token stream as a * character string. Space for the returned token should be dynamically * allocated. The caller is responsible for freeing the space once it is * no longer needed. * * If the function succeeds, it returns a C string (delimited by '\0') * containing the token. Else it returns 0. * * You need to fill in this function as part of your implementation. */ TokenT *TKGetNextToken(TokenizerT *tk) { clearBuffer(tk); char curr = tk->inputIter[0]; // skip all whitespace before next token while(isspace(curr)) { nextChar(tk); clearBuffer(tk); curr = tk->inputIter[0]; } if(curr == '\0') { return NULL; } else if(isalpha(curr) || curr == '_') { return _word(tk); } else if(curr == '0') { return _zero(tk); } else if(isdigit(curr)) { return _decimal(tk); } else if(curr == '!') { // neq return _neq(tk); } else if(curr == '"') { // double_quote return _double_quote(tk); } else if(curr == '#') { return _pound(tk); } else if(curr == '$') { // INVALID return _invalid(tk); } else if(curr == '%') { // mod, mod_eq return _mod(tk); } else if(curr == '&') { // bit_and, log_and, address (?) return _bit_and(tk); } else if(curr == '\'') { // single_quote return _single_quote(tk); } else if(curr == '(') { // open_paren return _open_paren(tk); } else if(curr == ')') { // close_paren return _close_paren(tk); } else if(curr == '*') { // mult, mult_eq, pointer (?) return _mult(tk); } else if(curr == '+') { // plus, plus_eq, inc return _plus(tk); } else if(curr == ',') { // comma return _comma(tk); } else if(curr == '-') { // minus, minus_eq, dec, struct_pointer return _minus(tk); } else if(curr == '.') { // dot return _dot(tk); } else if(curr == '/') { // div, div_eq return _div(tk); } else if(curr == ':') { // ternary_colon return _ternary_colon(tk); } else if(curr == ';') { // semicolon return _semicolon(tk); } else if(curr == '<') { // lt, lshift, lt_eq return _lt(tk); } else if(curr == '=') { // eq, assign return _eq(tk); } else if(curr == '>') { // gt, rshift, gt_eq return _gt(tk); } else if(curr == '?') { // ternary_qmark return _ternary_qmark(tk); } else if(curr == '@') { // INVALID return _invalid(tk); } else if(curr == '[') { // open_bracket return _open_bracket(tk); } else if(curr == '\\') { // backslash (?) return _invalid(tk); } else if(curr == ']') { // close_bracket return _close_bracket(tk); } else if(curr == '^') { // bit_xor return _bit_xor(tk); } else if(curr == '`') { // INVALID return _invalid(tk); } else if(curr == '{') { // open_brace return _open_brace(tk); } else if(curr == '|') { // bit_or, log_or return _bit_or(tk); } else if(curr == '}') { // close_brace return _close_brace(tk); } else if(curr == '~') { // bit_not return _bit_not(tk); } else { return _invalid(tk); } }
bool MorkParser::parseCell() { bool Result = true; bool bColumnOid = false; bool bValueOid = false; bool bColumn = true; int Corners = 0; // Column = Value QString Column; QString Text; char cur = nextChar(); // Process cell start with column (bColumn == true) while (Result && cur != ')' && cur) { switch (cur) { case '^': // Oids Corners++; if (Corners == 1) { bColumnOid = true; } else if (Corners == 2) { bColumn = false; bValueOid = true; } else { Text += cur; } break; case '=': // From column to value if (bColumn) { bColumn = false; } else { Text += cur; } break; case '\\': { // Get next two chars char NextChar = nextChar(); if ('\r' != NextChar && '\n' != NextChar) { Text += NextChar; } else { nextChar(); } } break; case '$': { // Get next two chars QString HexChar; HexChar += nextChar(); HexChar += nextChar(); Text += (char) HexChar.toInt(0, 16); } break; default: // Just a char if (bColumn) { Column += cur; } else { Text += cur; } break; } cur = nextChar(); } Q_UNUSED(bColumnOid); // Apply column and text int ColumnId = Column.toInt(0, 16); if (NPRows != nowParsing_) { // Dicts if (!Text.isEmpty()) { if (nowParsing_ == NPColumns) { mColumns[ ColumnId ] = Text; //qCDebug(IMPORTWIZARD_LOG)<<" column :"<<ColumnId<<" Text "<<Text; } else { mValues[ ColumnId ] = Text; //qCDebug(IMPORTWIZARD_LOG)<<" ColumnId "<<ColumnId<<" Value : "<<Text; } } } else { if (!Text.isEmpty()) { // Rows int ValueId = Text.toInt(0, 16); if (bValueOid) { (*mCurrentCells)[ ColumnId ] = ValueId; } else { mNextAddValueId--; mValues[ mNextAddValueId ] = Text; (*mCurrentCells)[ ColumnId ] = mNextAddValueId; } } } return Result; }
TokenT *_pound(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "include operator"); }
TokenT *_ternary_colon(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "ternary colon operator"); }
TokenT *_close_brace(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "closing brace"); }
TokenT *_open_brace(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "opening brace"); }
TokenT *_bit_xor(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "bitwise-xor operator"); }
TokenT *_ternary_qmark(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "ternary question mark operator"); }
TokenT *_semicolon(TokenizerT *tk) { nextChar(tk); return makeToken(tk, "end-of-statement operator"); }