bool Compiler::compile(Error& error) { error.clear(); error.step = Error::COMPILATION; error.state = Error::ERROR; m_tokenList = MUON_NEW(std::vector<parser::Token>); m_nodeRoot = MUON_NEW(parser::ASTNode); // avoid a macro, and avoid duplicating code auto clearVariable = [&]() { m_loadBuffer.clear(); MUON_DELETE(m_tokenList); MUON_DELETE(m_nodeRoot); }; if (!lexical(error)) { clearVariable(); return false; } if (!syntaxic(error)) { clearVariable(); return false; } if (!semantic(error)) { clearVariable(); return false; } clearVariable(); error.state = Error::SUCCESS; return true; }
int main(int argc, char* argv[]) { #ifdef _MSC_VER _CrtSetDbgFlag( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF ); #endif if (argc < 3) { fprintf (stdout, "usage: dLexCompiler [inputRulesFileName] [outputFileName]\n"); fprintf (stdout, "[inputRulesFileName] name of the file containing a Lex or Flex rules\n"); fprintf (stdout, "[outputFileName] name of the file cpp output file\n"); exit (0); } const char* const inputRulesFileName = argv[1]; const char* const outputFileName = argv[2]; FILE* const rules = fopen (inputRulesFileName, "rb"); if (!rules) { fprintf (stdout, "Rule file \"%s\" not found\n", inputRulesFileName); exit (0); } fseek (rules, 0, SEEK_END); int size = ftell (rules); fseek (rules, 0, SEEK_SET); string buffer; buffer.resize (size + 1); fread ((void*) buffer.c_str(), 1, size, rules); fclose (rules); buffer.erase(strlen (buffer.c_str())); dLexCompiler lexical (buffer.c_str(), outputFileName, inputRulesFileName); return 0; }
void dParserCompiler::ScanGrammarFile( const dString& inputRules, dProductionRule& ruleList, dTree<dTokenInfo, dCRCTYPE>& symbolList, dOperatorsPrecedence& operatorPrecedence, dString& userCodeBlock, dString& userVariableClass, dString& endUserCode, int& lastTokenEnum) { dString startSymbol (""); int tokenEnumeration = 256; int operatorPrecedencePriority = 0; dParserLexical lexical (inputRules.GetStr()); LoadTemplateFile("dParserUserVariableTemplate_cpp.txt", userVariableClass); // scan the definition segment for (dToken token = dToken(lexical.NextToken()); token != GRAMMAR_SEGMENT; ) { switch (int (token)) { case START: { token = dToken(lexical.NextToken()); startSymbol = lexical.GetTokenString(); token = dToken(lexical.NextToken()); break; } case TOKEN: { for (token = dToken(lexical.NextToken()); token == LITERAL; token = dToken(lexical.NextToken())) { const char* const name = lexical.GetTokenString(); symbolList.Insert(dTokenInfo (tokenEnumeration, TERMINAL, name), dCRC64 (name)); tokenEnumeration ++; } break; } case LEFT: case RIGHT: { dOperatorsAssociation& association = operatorPrecedence.Append()->GetInfo(); association.m_prioprity = operatorPrecedencePriority; operatorPrecedencePriority ++; switch (int (token)) { case LEFT: association.m_associativity = dOperatorsAssociation::m_left; break; case RIGHT: association.m_associativity = dOperatorsAssociation::m_right; break; } for (token = dToken(lexical.NextToken()); (token == LITERAL) || ((token < 256) && !isalnum (token)); token = dToken(lexical.NextToken())) { association.Append(dCRC64 (lexical.GetTokenString())); } break; } case UNION: { token = dToken(lexical.NextToken()); dAssert (token == SEMANTIC_ACTION); userVariableClass = lexical.GetTokenString() + 1; userVariableClass.Replace(userVariableClass.Size() - 1, 1, ""); token = dToken(lexical.NextToken()); break; } case CODE_BLOCK: { userCodeBlock += lexical.GetTokenString(); token = dToken(lexical.NextToken()); break; } case EXPECT: { token = dToken(lexical.NextToken()); dAssert (token == INTEGER); m_shiftReduceExpectedWarnings = atoi (lexical.GetTokenString()); token = dToken(lexical.NextToken()); break; } default:; { dAssert (0); token = dToken(lexical.NextToken()); } } } int ruleNumber = 1; lastTokenEnum = tokenEnumeration; // scan the production rules segment dToken token1 = dToken(lexical.NextToken()); for (; (token1 != GRAMMAR_SEGMENT) && (token1 != -1); token1 = dToken(lexical.NextToken())) { //dTrace (("%s\n", lexical.GetTokenString())); switch (int (token1)) { case LITERAL: { // add the first Rule; dRuleInfo& rule = ruleList.Append()->GetInfo(); rule.m_token = token1; rule.m_type = NONTERMINAL; rule.m_name = lexical.GetTokenString(); rule.m_nameCRC = dCRC64 (lexical.GetTokenString()); dTree<dTokenInfo, dCRCTYPE>::dTreeNode* nonTerminalIdNode = symbolList.Find(rule.m_nameCRC); if (!nonTerminalIdNode) { nonTerminalIdNode = symbolList.Insert(dTokenInfo (tokenEnumeration, NONTERMINAL, rule.m_name), rule.m_nameCRC); tokenEnumeration ++; } rule.m_ruleId = nonTerminalIdNode->GetInfo().m_tokenId; rule.m_ruleNumber = ruleNumber; ruleNumber ++; token1 = ScanGrammarRule(lexical, ruleList, symbolList, ruleNumber, tokenEnumeration, operatorPrecedence); break; } default: dAssert (0); } } dProductionRule::dListNode* firtRuleNode = ruleList.GetFirst(); if (startSymbol != "") { firtRuleNode = ruleList.Find (dCRC64 (startSymbol.GetStr())); } //Expand the Grammar Rule by adding an empty start Rule; const dRuleInfo& firstRule = firtRuleNode->GetInfo(); dRuleInfo& rule = ruleList.Addtop()->GetInfo(); rule.m_ruleNumber = 0; rule.m_ruleId = tokenEnumeration; rule.m_token = firstRule.m_token; rule.m_type = NONTERMINAL; rule.m_name = firstRule.m_name + dString("__"); rule.m_nameCRC = dCRC64 (rule.m_name.GetStr()); symbolList.Insert(dTokenInfo (tokenEnumeration, rule.m_type, rule.m_name), rule.m_nameCRC); tokenEnumeration ++; dSymbol& symbol = rule.Append()->GetInfo(); symbol.m_token = firstRule.m_token; symbol.m_type = firstRule.m_type; symbol.m_name = firstRule.m_name; symbol.m_nameCRC = firstRule.m_nameCRC; // scan literal use code if (token1 == GRAMMAR_SEGMENT) { endUserCode = lexical.GetNextBuffer(); //endUserCode += "\n"; } }