Esempio n. 1
0
bool TGztParser::ParseImports(Tokenizer* input, TGztFileDescriptorProto* file)
{
    TTokenizer tokenizer(input);
    Start(&tokenizer);
    if (CurrentEncoding() != CODES_UNKNOWN)
        file->set_encoding(NameByCharset(CurrentEncoding()));

    // Repeatedly parse statements until we reach the end of the file.
    while (!AtEnd())
    {
        if (TryConsume(';'))
            // empty statement; ignore
            continue;

        if (!LookingAt("import") || !ParseImport(file->add_dependency()))
        {
            // Parse imports or skip whole statement, but keep looping to parse
            // other statements.
            SkipStatement();
            SkipBlockEnd();
        }
    }

    return Finish();
}
Esempio n. 2
0
bool TGztParser::ParseTopLevelStatement(TGztFileDescriptorProto* file)
{
    if (TryConsume(';'))
        // empty statement; ignore
        return true;

    else if (LookingAt("import"))
        return ParseImport(file->add_dependency());

    // syntax extension: encoding used for interpreting string literals and article identifiers.
    else if (LookingAt("encoding"))
        return ParseEncoding(file->mutable_encoding());

    // syntax extension: various gzt compilation options
    else if (LookingAt("option"))
        return ParseGztFileOption(file);

    else if (LookingAtType(Tokenizer::TYPE_IDENTIFIER))
        return ParseArticleDefinition(file->add_article());
    else {
        AddError("Expected top-level statement (e.g. article definition).");
        return false;
    }
}
Esempio n. 3
0
void XSDParser::BuildDocumentTree(CDataTypeModule& module)
{
    size_t lexerStackSize = m_StackLexer.size();
    bool skipEof = false;
    ParseHeader();
    CopyComments(module.Comments());

    TToken tok;
    int emb=0;
    for (;;) {
        tok = GetNextToken();
        switch ( tok ) {
        case K_INCLUDE:
            ParseInclude();
            break;
        case K_ELEMENT:
            ParseElementContent(0, emb);
            break;
        case K_ATTRIBUTE:
            ParseAttributeContent();
            break;
        case K_COMPLEXTYPE:
        case K_SIMPLETYPE:
            CreateTypeDefinition(DTDEntity::eType);
            break;
        case K_GROUP:
            CreateTypeDefinition(DTDEntity::eGroup);
            break;
        case K_ATTPAIR:
            break;
        case T_EOF:
            if (skipEof) {
                skipEof = false;
                break;
            }
            ParseError("Unexpected end-of-file", "keyword");
            return;
        case K_ENDOFTAG:
            if (m_StackLexer.size() > lexerStackSize) {
                skipEof = true;
                break;
            }
            if (m_SrcType == eSchema) {
                ProcessNamedTypes();
            }
            return;
        case K_IMPORT:
            ParseImport();
            break;
        case K_ATTRIBUTEGROUP:
            CreateTypeDefinition(DTDEntity::eAttGroup);
            break;
        case K_ANNOTATION:
            m_Comments = &(module.Comments());
            ParseAnnotation();
            break;
        default:
            ParseError("Invalid keyword", "keyword");
            return;
        }
    }
}
Esempio n. 4
0
asCScriptNode *asCParser::ParseScript()
{
	asCScriptNode *node = new asCScriptNode(snScript);

	// Determine type of node
	sToken t1;

	for(;;)
	{
		while( !isSyntaxError )
		{
			GetToken(&t1);
			RewindTo(&t1);

			if( t1.type == ttImport )
				node->AddChildLast(ParseImport());
			else if( t1.type == ttClass )
				node->AddChildLast(ParseClass());
			else if( t1.type == ttInterface )
				node->AddChildLast(ParseInterface());
			else if( t1.type == ttConst )
				node->AddChildLast(ParseGlobalVar());
			else if( IsDataType(t1.type) )
			{
				if( IsVarDecl() )
					node->AddChildLast(ParseGlobalVar());
				else
					node->AddChildLast(ParseFunction());
			}
			else if( t1.type == ttEndStatement )
			{
				// Ignore a semicolon by itself
				GetToken(&t1);
			}
			else if( t1.type == ttEnd )
				return node;
			else
			{
				asCString str;
				const char *t = asGetTokenDefinition(t1.type);
				if( t == 0 ) t = "<unknown token>";

				str.Format(TXT_UNEXPECTED_TOKEN_s, t);

				Error(str.AddressOf(), &t1);
			}
		}

		if( isSyntaxError )
		{
			// Search for either ';' or '{' or end
			GetToken(&t1);
			while( t1.type != ttEndStatement && t1.type != ttEnd &&
				   t1.type != ttStartStatementBlock )
				GetToken(&t1);

			if( t1.type == ttStartStatementBlock )
			{
				// Find the end of the block and skip nested blocks
				int level = 1;
				while( level > 0 )
				{
					GetToken(&t1);
					if( t1.type == ttStartStatementBlock ) level++;
					if( t1.type == ttEndStatementBlock ) level--;
					if( t1.type == ttEnd ) break;
				}
			}

			isSyntaxError = false;
		}
	}
	return 0;
}