/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into /// the specified vector. static void LexRawTokensFromMainFile(Preprocessor &PP, std::vector<Token> &RawTokens) { SourceManager &SM = PP.getSourceManager(); // Create a lexer to lex all the tokens of the main file in raw mode. Even // though it is in raw mode, it will not return comments. const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID()); Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts()); // Switch on comment lexing because we really do want them. RawLex.SetCommentRetentionState(true); Token RawTok; do { RawLex.LexFromRawLexer(RawTok); // If we have an identifier with no identifier info for our raw token, look // up the indentifier info. This is important for equality comparison of // identifier tokens. if (RawTok.is(tok::raw_identifier)) PP.LookUpIdentifierInfo(RawTok); RawTokens.push_back(RawTok); } while (RawTok.isNot(tok::eof)); }
/// HighlightMacros - This uses the macro table state from the end of the /// file, to re-expand macros and insert (into the HTML) information about the /// macro expansions. This won't be perfectly perfect, but it will be /// reasonably close. void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) { // Re-lex the raw token stream into a token buffer. const SourceManager &SM = PP.getSourceManager(); std::vector<Token> TokenStream; const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); Lexer L(FID, FromFile, SM, PP.getLangOpts()); // Lex all the tokens in raw mode, to avoid entering #includes or expanding // macros. while (1) { Token Tok; L.LexFromRawLexer(Tok); // If this is a # at the start of a line, discard it from the token stream. // We don't want the re-preprocess step to see #defines, #includes or other // preprocessor directives. if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) continue; // If this is a ## token, change its kind to unknown so that repreprocessing // it will not produce an error. if (Tok.is(tok::hashhash)) Tok.setKind(tok::unknown); // If this raw token is an identifier, the raw lexer won't have looked up // the corresponding identifier info for it. Do this now so that it will be // macro expanded when we re-preprocess it. if (Tok.is(tok::raw_identifier)) PP.LookUpIdentifierInfo(Tok); TokenStream.push_back(Tok); if (Tok.is(tok::eof)) break; } // Temporarily change the diagnostics object so that we ignore any generated // diagnostics from this pass. DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(), &PP.getDiagnostics().getDiagnosticOptions(), new IgnoringDiagConsumer); // FIXME: This is a huge hack; we reuse the input preprocessor because we want // its state, but we aren't actually changing it (we hope). This should really // construct a copy of the preprocessor. Preprocessor &TmpPP = const_cast<Preprocessor&>(PP); DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics(); TmpPP.setDiagnostics(TmpDiags); // Inform the preprocessor that we don't want comments. TmpPP.SetCommentRetentionState(false, false); // We don't want pragmas either. Although we filtered out #pragma, removing // _Pragma and __pragma is much harder. bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled(); TmpPP.setPragmasEnabled(false); // Enter the tokens we just lexed. This will cause them to be macro expanded // but won't enter sub-files (because we removed #'s). TmpPP.EnterTokenStream(TokenStream, false); TokenConcatenation ConcatInfo(TmpPP); // Lex all the tokens. Token Tok; TmpPP.Lex(Tok); while (Tok.isNot(tok::eof)) { // Ignore non-macro tokens. if (!Tok.getLocation().isMacroID()) { TmpPP.Lex(Tok); continue; } // Okay, we have the first token of a macro expansion: highlight the // expansion by inserting a start tag before the macro expansion and // end tag after it. std::pair<SourceLocation, SourceLocation> LLoc = SM.getExpansionRange(Tok.getLocation()); // Ignore tokens whose instantiation location was not the main file. if (SM.getFileID(LLoc.first) != FID) { TmpPP.Lex(Tok); continue; } assert(SM.getFileID(LLoc.second) == FID && "Start and end of expansion must be in the same ultimate file!"); std::string Expansion = EscapeText(TmpPP.getSpelling(Tok)); unsigned LineLen = Expansion.size(); Token PrevPrevTok; Token PrevTok = Tok; // Okay, eat this token, getting the next one. TmpPP.Lex(Tok); // Skip all the rest of the tokens that are part of this macro // instantiation. It would be really nice to pop up a window with all the // spelling of the tokens or something. while (!Tok.is(tok::eof) && SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) { // Insert a newline if the macro expansion is getting large. if (LineLen > 60) { Expansion += "<br>"; LineLen = 0; } LineLen -= Expansion.size(); // If the tokens were already space separated, or if they must be to avoid // them being implicitly pasted, add a space between them. if (Tok.hasLeadingSpace() || ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok)) Expansion += ' '; // Escape any special characters in the token text. Expansion += EscapeText(TmpPP.getSpelling(Tok)); LineLen += Expansion.size(); PrevPrevTok = PrevTok; PrevTok = Tok; TmpPP.Lex(Tok); } // Insert the expansion as the end tag, so that multi-line macros all get // highlighted. Expansion = "<span class='expansion'>" + Expansion + "</span></span>"; HighlightRange(R, LLoc.first, LLoc.second, "<span class='macro'>", Expansion.c_str()); } // Restore the preprocessor's old state. TmpPP.setDiagnostics(*OldDiags); TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled); }
/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with /// information about keywords, macro expansions etc. This uses the macro /// table state from the end of the file, so it won't be perfectly perfect, /// but it will be reasonably close. void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) { RewriteBuffer &RB = R.getEditBuffer(FID); const SourceManager &SM = PP.getSourceManager(); const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); Lexer L(FID, FromFile, SM, PP.getLangOpts()); const char *BufferStart = L.getBuffer().data(); // Inform the preprocessor that we want to retain comments as tokens, so we // can highlight them. L.SetCommentRetentionState(true); // Lex all the tokens in raw mode, to avoid entering #includes or expanding // macros. Token Tok; L.LexFromRawLexer(Tok); while (Tok.isNot(tok::eof)) { // Since we are lexing unexpanded tokens, all tokens are from the main // FileID. unsigned TokOffs = SM.getFileOffset(Tok.getLocation()); unsigned TokLen = Tok.getLength(); switch (Tok.getKind()) { default: break; case tok::identifier: llvm_unreachable("tok::identifier in raw lexing mode!"); case tok::raw_identifier: { // Fill in Result.IdentifierInfo and update the token kind, // looking up the identifier in the identifier table. PP.LookUpIdentifierInfo(Tok); // If this is a pp-identifier, for a keyword, highlight it as such. if (Tok.isNot(tok::identifier)) HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, "<span class='keyword'>", "</span>"); break; } case tok::comment: HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, "<span class='comment'>", "</span>"); break; case tok::utf8_string_literal: // Chop off the u part of u8 prefix ++TokOffs; --TokLen; // FALL THROUGH to chop the 8 case tok::wide_string_literal: case tok::utf16_string_literal: case tok::utf32_string_literal: // Chop off the L, u, U or 8 prefix ++TokOffs; --TokLen; // FALL THROUGH. case tok::string_literal: // FIXME: Exclude the optional ud-suffix from the highlighted range. HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, "<span class='string_literal'>", "</span>"); break; case tok::hash: { // If this is a preprocessor directive, all tokens to end of line are too. if (!Tok.isAtStartOfLine()) break; // Eat all of the tokens until we get to the next one at the start of // line. unsigned TokEnd = TokOffs+TokLen; L.LexFromRawLexer(Tok); while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) { TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength(); L.LexFromRawLexer(Tok); } // Find end of line. This is a hack. HighlightRange(RB, TokOffs, TokEnd, BufferStart, "<span class='directive'>", "</span>"); // Don't skip the next token. continue; } } L.LexFromRawLexer(Tok); } }
/// HighlightMacros - This uses the macro table state from the end of the /// file, to re-expand macros and insert (into the HTML) information about the /// macro expansions. This won't be perfectly perfect, but it will be /// reasonably close. void html::HighlightMacros(Rewriter &R, FileID FID, Preprocessor& PP) { // Re-lex the raw token stream into a token buffer. const SourceManager &SM = PP.getSourceManager(); std::vector<Token> TokenStream; Lexer L(FID, SM, PP.getLangOptions()); // Lex all the tokens in raw mode, to avoid entering #includes or expanding // macros. while (1) { Token Tok; L.LexFromRawLexer(Tok); // If this is a # at the start of a line, discard it from the token stream. // We don't want the re-preprocess step to see #defines, #includes or other // preprocessor directives. if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) continue; // If this is a ## token, change its kind to unknown so that repreprocessing // it will not produce an error. if (Tok.is(tok::hashhash)) Tok.setKind(tok::unknown); // If this raw token is an identifier, the raw lexer won't have looked up // the corresponding identifier info for it. Do this now so that it will be // macro expanded when we re-preprocess it. if (Tok.is(tok::identifier)) { // Change the kind of this identifier to the appropriate token kind, e.g. // turning "for" into a keyword. Tok.setKind(PP.LookUpIdentifierInfo(Tok)->getTokenID()); } TokenStream.push_back(Tok); if (Tok.is(tok::eof)) break; } // Temporarily change the diagnostics object so that we ignore any generated // diagnostics from this pass. IgnoringDiagClient TmpDC; Diagnostic TmpDiags(&TmpDC); Diagnostic *OldDiags = &PP.getDiagnostics(); PP.setDiagnostics(TmpDiags); // Inform the preprocessor that we don't want comments. PP.SetCommentRetentionState(false, false); // Enter the tokens we just lexed. This will cause them to be macro expanded // but won't enter sub-files (because we removed #'s). PP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false); TokenConcatenation ConcatInfo(PP); // Lex all the tokens. Token Tok; PP.Lex(Tok); while (Tok.isNot(tok::eof)) { // Ignore non-macro tokens. if (!Tok.getLocation().isMacroID()) { PP.Lex(Tok); continue; } // Okay, we have the first token of a macro expansion: highlight the // instantiation by inserting a start tag before the macro instantiation and // end tag after it. std::pair<SourceLocation, SourceLocation> LLoc = SM.getInstantiationRange(Tok.getLocation()); // Ignore tokens whose instantiation location was not the main file. if (SM.getFileID(LLoc.first) != FID) { PP.Lex(Tok); continue; } assert(SM.getFileID(LLoc.second) == FID && "Start and end of expansion must be in the same ultimate file!"); std::string Expansion = PP.getSpelling(Tok); unsigned LineLen = Expansion.size(); Token PrevTok = Tok; // Okay, eat this token, getting the next one. PP.Lex(Tok); // Skip all the rest of the tokens that are part of this macro // instantiation. It would be really nice to pop up a window with all the // spelling of the tokens or something. while (!Tok.is(tok::eof) && SM.getInstantiationLoc(Tok.getLocation()) == LLoc.first) { // Insert a newline if the macro expansion is getting large. if (LineLen > 60) { Expansion += "<br>"; LineLen = 0; } LineLen -= Expansion.size(); // If the tokens were already space separated, or if they must be to avoid // them being implicitly pasted, add a space between them. if (Tok.hasLeadingSpace() || ConcatInfo.AvoidConcat(PrevTok, Tok)) Expansion += ' '; // Escape any special characters in the token text. Expansion += EscapeText(PP.getSpelling(Tok)); LineLen += Expansion.size(); PrevTok = Tok; PP.Lex(Tok); } // Insert the expansion as the end tag, so that multi-line macros all get // highlighted. Expansion = "<span class='expansion'>" + Expansion + "</span></span>"; HighlightRange(R, LLoc.first, LLoc.second, "<span class='macro'>", Expansion.c_str()); } // Restore diagnostics object back to its own thing. PP.setDiagnostics(*OldDiags); }