bool Reader::readArray(Token& tokenStart) { currentValue() = Value(arrayValue); currentValue().setOffsetStart(tokenStart.start_ - begin_); skipSpaces(); if (*current_ == ']') // empty array { Token endArray; readToken(endArray); return true; } int index = 0; for (;;) { Value& value = currentValue()[index++]; nodes_.push(&value); bool ok = readValue(); nodes_.pop(); if (!ok) // error already set return recoverFromError(tokenArrayEnd); Token token; // Accept Comment after last item in the array. ok = readToken(token); while (token.type_ == tokenComment && ok) { ok = readToken(token); } bool badTokenType = (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd); if (!ok || badTokenType) { return addErrorAndRecover( "Missing ',' or ']' in array declaration", token, tokenArrayEnd); } if (token.type_ == tokenArrayEnd) break; } return true; }
/* parses a trait: * trait Foo {} */ static boolean parseTrait (tokenInfo *const token) { boolean readNext = TRUE; tokenInfo *name; readToken (token); if (token->type != TOKEN_IDENTIFIER) return FALSE; name = newToken (); copyToken (name, token, TRUE); makeSimplePhpTag (name, K_TRAIT, ACCESS_UNDEFINED); readToken (token); if (token->type == TOKEN_OPEN_CURLY) enterScope (token, name->string, K_TRAIT); else readNext = FALSE; deleteToken (name); return readNext; }
inline bool Json::next(const JsonValue& _current, JsonValue& _next) { m_readPos = m_fileContent.data() + _current.offset; // Iterate through the stream to the next ','. Meanwhile the number of // brackets ({} and []) must match. int parenthesis = 0; do { if(!readToken()) return false; if(m_tokenPos[0] == '{' || m_tokenPos[0] == '[') ++parenthesis; else if(m_tokenPos[0] == '}' || m_tokenPos[0] == ']') --parenthesis; } while(parenthesis > 0 || m_tokenPos[0] != ','); if(parenthesis < 0) return false; readProperty(_next); return true; }
static void parseClass (tokenInfo *const token) { Assert (isKeyword (token, KEYWORD_class)); readToken (token); if (isType (token, TOKEN_IDENTIFIER)) { #ifndef TYPE_REFERENCE_TOOL makeEiffelClassTag (token); readToken (token); #else vStringCopy (token->className, token->string); vStringUpper (token->className); if (PrintClass) puts (vStringValue (token->className)); if (! PrintReferences) exit (0); readToken (token); #endif } do { if (isType (token, TOKEN_OPEN_BRACKET)) parseGeneric (token, TRUE); else if (! isType (token, TOKEN_KEYWORD)) readToken (token); else switch (token->keyword) { case KEYWORD_inherit: parseInherit (token); break; case KEYWORD_feature: parseFeatureClauses (token); break; case KEYWORD_convert: parseConvert (token); break; default: readToken (token); break; } } while (! isKeyword (token, KEYWORD_end) && ! isType (token, TOKEN_EOF)); }
static void parseRename (tokenInfo *const token) { do { readToken (token); if (readFeatureName (token)) { readToken (token); if (isKeyword (token, KEYWORD_as)) { readToken (token); if (readFeatureName (token)) { #ifndef TYPE_REFERENCE_TOOL makeEiffelFeatureTag (token); /* renamed feature */ #endif readToken (token); } } } } while (isType (token, TOKEN_COMMA)); findKeyword (token, KEYWORD_end); readToken (token); }
PARSENODE_PTR SQLParser::parseInsert() { if (!startsInsert(nowReading)) { syntaxError(nowReading, "expect insert token!"); return nullptr; } //LOG_TRACE(logger, "parse insert statement."); PARSENODE_PTR insertNode = PARSENODE_PTR(new ParseNode(INSERT)); readToken(); expect(INTO); insertNode->children.push_back(parseIdentifier()); expect(VALUES); expect(LEFT_BRACE); insertNode->children.push_back(parseLiteral()); while (nowReading == SLICE) { readToken(); insertNode->children.push_back(parseLiteral()); } expect(RIGHT_BRACE); expect(TERMINATOR); return insertNode; }
static void parseConvert (tokenInfo *const token) { Assert (isKeyword (token, KEYWORD_convert)); do { readToken (token); if (! isType (token, TOKEN_IDENTIFIER)) break; else if (isType (token, TOKEN_OPEN_PAREN)) { while (! isType (token, TOKEN_CLOSE_PAREN) && ! isType (token, TOKEN_EOF)) readToken (token); } else if (isType (token, TOKEN_COLON)) { readToken (token); if (! isType (token, TOKEN_OPEN_BRACE)) break; else while (! isType (token, TOKEN_CLOSE_BRACE)) readToken (token); } } while (isType (token, TOKEN_COMMA)); }
bool DbcParser::expectData(DbcParser::DbcTokenList &tokens, dbc_token_type_t type, QString *data, bool skipWhitespace, bool skipSectionEnding, bool newLineIsSectionEnding) { DbcToken *token; if (!(token = readToken(tokens, type, skipWhitespace, skipSectionEnding, newLineIsSectionEnding))) { return false; } if (data) { data->clear(); data->append(token->getData()); } free(token); return true; }
bool Reader::recoverFromError( TokenType skipUntilToken ) { int errorCount = int(errors_.size()); Token skip; for (;;) { if ( !readToken(skip) ) errors_.resize( errorCount ); // discard errors caused by recovery if ( skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream ) break; } errors_.resize( errorCount ); return false; }
static void parseConstant (bool local) { tokenInfo *const name = newToken (); readToken (name); if (local) { makeVhdlTag (name, VHDLTAG_LOCAL); } else { makeVhdlTag (name, VHDLTAG_CONSTANT); } skipToCharacterInInputFile (';'); deleteToken (name); }
static void parseLocal (tokenInfo *const token) { Assert (isKeyword (token, KEYWORD_local)); readToken (token); /* Check keyword first in case local clause is empty */ while (! isKeyword (token, KEYWORD_do) && ! isKeyword (token, KEYWORD_once)) { #ifndef TYPE_REFERENCE_TOOL if (isType (token, TOKEN_IDENTIFIER)) makeEiffelLocalTag (token); #endif readToken (token); if (isType (token, TOKEN_COLON)) { readToken (token); if (isType (token, TOKEN_IDENTIFIER)) parseType (token); } } }
static void findJsonTags (void) { tokenInfo *const token = newToken (); /* We allow multiple top-level elements, although it's not actually valid * JSON. An interesting side effect of this is that we allow a leading * Unicode BOM mark -- even though ok, many JSON parsers will choke on it */ do { readToken (token); parseValue (token); } while (token->type != TOKEN_EOF); deleteToken (token); }
static void parseInherit (tokenInfo *const token) { Assert (isKeyword (token, KEYWORD_inherit)); #ifdef TYPE_REFERENCE_TOOL readToken (token); while (isType (token, TOKEN_IDENTIFIER)) { parseType (token); if (isType (token, TOKEN_KEYWORD)) { switch (token->keyword) /* check for feature adaptation */ { case KEYWORD_rename: case KEYWORD_export: case KEYWORD_undefine: case KEYWORD_redefine: case KEYWORD_select: findKeyword (token, KEYWORD_end); readToken (token); default: break; } } } #else readToken (token); while (isType (token, TOKEN_IDENTIFIER)) { parseType (token); switch (token->keyword) /* check for feature adaptation */ { case KEYWORD_rename: parseRename (token); if (isKeyword (token, KEYWORD_end)) readToken (token); break; case KEYWORD_export: case KEYWORD_undefine: case KEYWORD_redefine: case KEYWORD_select: findKeyword (token, KEYWORD_end); readToken (token); break; case KEYWORD_end: readToken (token); break; default: break; } } #endif }
static void findHtmlTags (void) { tokenInfo token; token.string = vStringNew (); do { readToken (&token, true); if (token.type == TOKEN_TAG_START) readTag (&token, NULL, 0); } while (token.type != TOKEN_EOF); vStringDelete (token.string); }
int Parser::readToken(s_cursor_t &it, const s_cursor_t &end) { // skipping any whitespace while (it != end && isspace(_lastChar)) _lastChar = *(it++); // getting char and advancing cursor if (isalpha(_lastChar)) { /// identifier: [a-zA-Z][a-zA-Z0-9]* _identifier = _lastChar; while (it != end && isalnum((_lastChar = *(it++)))) _identifier += _lastChar; if (_identifier == def_id) return tok_def; else if (_identifier == extern_id) return tok_extern; return tok_identifier; } if (isdigit(_lastChar)) { // number [0-9] std::string num; do { num += _lastChar; } while (it != end && isdigit((_lastChar = *(it++)))); _number = strtol(num.c_str(), nullptr, 10); return tok_number; } if (_lastChar == '#') { // just a loop for ignoring everything after the # do _lastChar = *(it++); while (it != end && _lastChar != '\n' && _lastChar != '\r'); if (_lastChar != EOF) return readToken(it, end); } if (it == end || _lastChar == EOF) { _lastChar = tok_eof; } // don't know what to do with this char int thisChar = _lastChar; _lastChar = *(it++); return thisChar; }
bool Parser::processRenderer() { std::string strType, // Tipo de renderer strTemp; // Auxiliar para comprobaciones. int nNumSamples, nMaxDepth; if(!readBloqueTxt("type", strType)) return false; if(!readBloqueInts("samples", 1, &nNumSamples)) return false; if(!readBloqueInts("max_depth", 1, &nMaxDepth)) return false; if(nNumSamples > 0) // Otherwise, use default. m_pGlobals->nSamplesPerPixel = nNumSamples; if(nMaxDepth > 0) // Otherwise, use default. m_pGlobals->nMaxDepth = nMaxDepth; if(!ignorarChars()) return false; if(!readToken(strTemp)) return false; if(strTemp != "/renderer") return false; if(strType == "whitted") { if(m_pGlobals->pRenderer == NULL) m_pGlobals->pRenderer = new DirectRenderer(m_pGlobals->nMaxDepth); else // Renderer ya creado, error. return false; } else if(strType == "path") { if(m_pGlobals->pRenderer == NULL) m_pGlobals->pRenderer = new PathRenderer(m_pGlobals->nMaxDepth, time(NULL)); else return false; } else // Renderer desconocido, error. return false; return true; }
bool MenuConf::readItemNameIfPresent(FILE* f, Item* item) { eatWhitespace(f); if(! isAlpha(curChar)) { return true;//Name not present } if(! readToken(f, item->data->name, ItemData::NameSize)) { return false; } eatWhitespace(f); if(curChar != ':') { printf("%d, %d: Expected ':'\n", lineIndex, caret); return false; } nextChar(f); return true; }
JNIEXPORT jboolean JNICALL Java_gov_nasa_jpf_symbolic_dp_NativeInterface_isSatisfiable (JNIEnv *env, jclass cls, jstring constraintString ) { constraint = (char*) (*env)->GetStringUTFChars(env,constraintString, NULL); if( constraint == NULL ){ throwRuntimeException( "out of memory?" ); } //printf( "query: %s\n", constraint); //fflush(stdout); if (constraint[0] == '\0') return TRUE; vc_push(vc); marker = 0; int constraintCount = 1; char c; int i = 0; do{ c = constraint[i++]; if (c == ',') constraintCount++; }while(c != '\0'); Expr* constraintArray = (Expr*) malloc(sizeof(Expr)*constraintCount); i = 0; char token[2]; // it must be just a comma and '\0' do{ constraintArray[i++] = parse(); }while(readToken(token)); Expr andExpr = vc_andExprN(vc, constraintArray, constraintCount); linkedlist_add(&exprPool, andExpr); jboolean result = check(vc, andExpr); //fflush(stdout); //clean up (*env)->ReleaseStringUTFChars(env,constraintString, constraint); free(constraintArray); freeStuff(); vc_pop(vc); return result; }
/* Tokenizer::peekToken * Returns the next token without actually moving past it *******************************************************************/ string Tokenizer::peekToken() { // Backup current position char* c = current; uint32_t p = position; int oline = line; // Read the next token readToken(); // Go back to original position current = c; position = p; line = oline; // Return the token return token_current; }
Employee * readEmployeeRecord(FILE *fp) // we pass the file pointer in { char *name,*title; int years; double salary; name = readString(fp); //name is a string, not a token if (feof(fp)) { return 0; } // no record, return the null pointer name = name; title = readToken(fp); years = readInt(fp); salary = readReal(fp); return newEmployee(name,title,years,salary); }
static void parseFeatureClauses (tokenInfo *const token) { Assert (isKeyword (token, KEYWORD_feature)); do { if (isKeyword (token, KEYWORD_feature)) parseExport (token); if (! isKeyword (token, KEYWORD_feature) && ! isKeyword (token, KEYWORD_invariant) && ! isKeyword (token, KEYWORD_indexing)) { if (! parseFeature (token)) readToken (token); } } while (! isKeyword (token, KEYWORD_end) && ! isKeyword (token, KEYWORD_invariant) && ! isKeyword (token, KEYWORD_indexing)); }
static boolean parseFeature (tokenInfo *const token) { boolean found = FALSE; while (readFeatureName (token)) { found = TRUE; #ifndef TYPE_REFERENCE_TOOL makeEiffelFeatureTag (token); #endif readToken (token); if (isType (token, TOKEN_COMMA)) readToken (token); } if (found) { if (isKeyword (token, KEYWORD_alias)) { readToken (token); #ifndef TYPE_REFERENCE_TOOL if (isType (token, TOKEN_STRING)) makeEiffelFeatureTag (token); #endif readToken (token); } if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ parseArguments (token); if (isType (token, TOKEN_COLON)) /* a query? */ parseEntityType (token); if (isKeyword (token, KEYWORD_assign)) { readToken (token); readToken (token); } if (isKeyword (token, KEYWORD_obsolete)) { readToken (token); if (isType (token, TOKEN_STRING)) readToken (token); } findFeatureEnd (token); } return found; }
static void parseConstTypeVar (tokenInfo *const token, goKind kind) { // ConstDecl = "const" ( ConstSpec | "(" { ConstSpec ";" } ")" ) . // ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] . // IdentifierList = identifier { "," identifier } . // ExpressionList = Expression { "," Expression } . // TypeDecl = "type" ( TypeSpec | "(" { TypeSpec ";" } ")" ) . // TypeSpec = identifier Type . // VarDecl = "var" ( VarSpec | "(" { VarSpec ";" } ")" ) . // VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) . boolean usesParens = FALSE; readToken (token); if (isType (token, TOKEN_OPEN_PAREN)) { usesParens = TRUE; readToken (token); } do { while (!isType (token, TOKEN_EOF)) { if (isType (token, TOKEN_IDENTIFIER)) { makeTag (token, kind); readToken (token); } if (!isType (token, TOKEN_COMMA)) break; readToken (token); } skipType (token); while (!isType (token, TOKEN_SEMICOLON) && !isType (token, TOKEN_CLOSE_PAREN) && !isType (token, TOKEN_EOF)) { readToken (token); skipToMatched (token); } if (usesParens && !isType (token, TOKEN_CLOSE_PAREN)) { // we are at TOKEN_SEMICOLON readToken (token); } } while (!isType (token, TOKEN_EOF) && usesParens && !isType (token, TOKEN_CLOSE_PAREN)); }
static void parseTexFile (tokenInfo *const token) { do { readToken (token); if (isType (token, TOKEN_KEYWORD)) { switch (token->keyword) { case KEYWORD_part: parseTag (token, TEXTAG_PART); break; case KEYWORD_chapter: parseTag (token, TEXTAG_CHAPTER); break; case KEYWORD_section: parseTag (token, TEXTAG_SECTION); break; case KEYWORD_subsection: parseTag (token, TEXTAG_SUBSECTION); break; case KEYWORD_subsubsection: parseTag (token, TEXTAG_SUBSUBSECTION); break; case KEYWORD_paragraph: parseTag (token, TEXTAG_PARAGRAPH); break; case KEYWORD_subparagraph: parseTag (token, TEXTAG_SUBPARAGRAPH); break; case KEYWORD_label: parseTag (token, TEXTAG_LABEL); break; case KEYWORD_include: parseTag (token, TEXTAG_INCLUDE); break; default: break; } } } while (TRUE); }
void term(){ token tempToken; factor(); while(currentToken == multsym || currentToken == slashsym){ tempToken = currentToken; readToken(); factor(); if(tempToken == multsym) genCode(OPR, 0, MUL); else if(tempToken == slashsym) genCode(OPR, 0, DIV); } return; }
bool Parser::processScene() { std::string strEtiqueta; bool bFinBloque = false; if(m_pGlobals->pScene == NULL) { m_pGlobals->pScene = new Scene; while(!bFinBloque) { // Buscamos etiqueta. if(!ignorarChars()) return false; if(!readToken(strEtiqueta)) return false; if(strEtiqueta == "/scene") bFinBloque = true; else { if(strEtiqueta == "texture") { if(!processTexture()) return false; } else if(strEtiqueta == "material") { if(!processMaterial()) return false; } else if(strEtiqueta == "object") { if(!processObject()) return false; } else // Etiqueta desconocida, no perteneciente a la seccion config. return false; } } } else // Scene ya creada, error. return false; return true; }
bool DbcParser::expectSectionEnding(DbcTokenList &tokens, bool newLineIsSectionEnding) { if (tokens.isEmpty()) { return true; } DbcToken *token = readToken(tokens, dbc_tok_whitespace|dbc_tok_semicolon); if (!token) { return false; } if (!isSectionEnding(token, newLineIsSectionEnding)) { free(token); return false; } else { free(token); return true; } }
static void parseSqlFile (tokenInfo *const token) { do { readToken (token); switch (token->keyword) { case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; case KEYWORD_declare: parseBlock (token, FALSE); break; case KEYWORD_function: parseSubProgram (token); break; case KEYWORD_package: parsePackage (token); break; case KEYWORD_procedure: parseSubProgram (token); break; case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; case KEYWORD_table: parseTable (token); break; case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break; case KEYWORD_type: parseType (token); break; default: break; } } while (! isKeyword (token, KEYWORD_end)); }
static void startTextChunkEngl(png_text * const textChunkP, char const textline[], unsigned int const lineLength, bool const isCompressed, bool const verbose) { /*---------------------------------------------------------------------------- Assuming 'textline' is the first line of an entry in an English text string file, put the information from it in the comment record *textChunkP. Use the text on this line as the comment text, even though the true text string may include text from subsequent continuation lines as well. 'textline' is not NUL-terminated. Its length is 'lineLength', and it is at least one character long. 'textline' does not contain a newline character. 'isCompressed' means it is a compressed text chunk. -----------------------------------------------------------------------------*/ unsigned int cursor; cursor = 0; { const char * key; readToken(textline, lineLength, &cursor, &key); pngx_setTextKey(textChunkP, key); pm_strfree(key); } skipWhiteSpace(textline, lineLength, &cursor); pngx_setTextLang(textChunkP, NULL); readTextString(textline, lineLength, cursor, &textChunkP->text, &textChunkP->text_length); textChunkP->compression = isCompressed ? PNG_TEXT_COMPRESSION_zTXt : PNG_TEXT_COMPRESSION_NONE; }
static bool skipScriptContent (tokenInfo *token, long *line, long *lineOffset) { bool found_start = false; bool found_script = false; long line_tmp[2]; long lineOffset_tmp[2]; tokenType type; do { line_tmp[0] = getInputLineNumber (); lineOffset_tmp[0] = getInputLineOffset (); readToken (token, false); type = token->type; if (type == TOKEN_TAG_START2) { found_start = true; line_tmp[1] = line_tmp[0]; lineOffset_tmp[1] = lineOffset_tmp[0]; } else if (found_start && type == TOKEN_NAME && lookupKeyword (vStringValue (token->string), Lang_html) == KEYWORD_script) { found_script = true; *line = line_tmp[1]; *lineOffset = lineOffset_tmp[1]; } else found_start = false; } while ((type != TOKEN_EOF) && (!found_script)); return found_script; }