コード例 #1
0
ファイル: CSSTokenizerTest.cpp プロジェクト: eth-srl/BlinkER
TEST(CSSTokenizerTest, Escapes)
{
    TEST_TOKENS("hel\\6Co", ident("hello"));
    TEST_TOKENS("\\26 B", ident("&B"));
    TEST_TOKENS("'hel\\6c o'", string("hello"));
    TEST_TOKENS("'spac\\65\r\ns'", string("spaces"));
    TEST_TOKENS("spac\\65\r\ns", ident("spaces"));
    TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace, ident("s"));
    TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces"));
    TEST_TOKENS("hel\\6c  o", ident("hell"), whitespace, ident("o"));
    TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace);
    TEST_TOKENS("eof\\", ident("eof"), delim('\\'));
    TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799)));
    TEST_TOKENS("\\E000", ident(fromUChar32(0xE000)));
    TEST_TOKENS("te\\s\\t", ident("test"));
    TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident"));
    TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!"));
    TEST_TOKENS("\\\r", delim('\\'), whitespace);
    TEST_TOKENS("\\\f", delim('\\'), whitespace);
    TEST_TOKENS("\\\r\n", delim('\\'), whitespace);
    // FIXME: We don't correctly return replacement characters
    // String replacement = fromUChar32(0xFFFD);
    // TEST_TOKENS("null\\0", ident("null" + replacement));
    // TEST_TOKENS("null\\0000", ident("null" + replacement));
    // TEST_TOKENS("large\\110000", ident("large" + replacement));
    // TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement));
    // TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement));
    // TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate"));
    // FIXME: We don't correctly return supplementary plane characters
    // TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff) + "0"));
    // TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "000"));
}
コード例 #2
0
TEST(CSSTokenizerTest, Escapes)
{
    TEST_TOKENS("hel\\6Co", ident("hello"));
    TEST_TOKENS("\\26 B", ident("&B"));
    TEST_TOKENS("'hel\\6c o'", string("hello"));
    TEST_TOKENS("'spac\\65\r\ns'", string("spaces"));
    TEST_TOKENS("spac\\65\r\ns", ident("spaces"));
    TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace(), ident("s"));
    TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces"));
    TEST_TOKENS("hel\\6c  o", ident("hell"), whitespace(), ident("o"));
    TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace());
    TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799)));
    TEST_TOKENS("\\E000", ident(fromUChar32(0xE000)));
    TEST_TOKENS("te\\s\\t", ident("test"));
    TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident"));
    TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!"));
    TEST_TOKENS("\\\r", delim('\\'), whitespace());
    TEST_TOKENS("\\\f", delim('\\'), whitespace());
    TEST_TOKENS("\\\r\n", delim('\\'), whitespace());
    String replacement = fromUChar32(0xFFFD);
    TEST_TOKENS(String("null\\\0", 6), ident("null" + replacement));
    TEST_TOKENS(String("null\\\0\0", 7), ident("null" + replacement + replacement));
    TEST_TOKENS("null\\0", ident("null" + replacement));
    TEST_TOKENS("null\\0000", ident("null" + replacement));
    TEST_TOKENS("large\\110000", ident("large" + replacement));
    TEST_TOKENS("large\\23456a", ident("large" + replacement));
    TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement));
    TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement));
    TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate"));
    TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff)));
    TEST_TOKENS("\\10fFfF0", ident(fromUChar32(0x10ffff) + "0"));
    TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "00"));
    TEST_TOKENS("eof\\", ident("eof" + replacement));
}
コード例 #3
0
TEST(CSSTokenizerTest, StringToken)
{
    TEST_TOKENS("'text'", string("text"));
    TEST_TOKENS("\"text\"", string("text"));
    TEST_TOKENS("'testing, 123!'", string("testing, 123!"));
    TEST_TOKENS("'es\\'ca\\\"pe'", string("es'ca\"pe"));
    TEST_TOKENS("'\"quotes\"'", string("\"quotes\""));
    TEST_TOKENS("\"'quotes'\"", string("'quotes'"));
    TEST_TOKENS("\"mismatch'", string("mismatch'"));
    TEST_TOKENS("'text\5\t\13'", string("text\5\t\13"));
    TEST_TOKENS("\"end on eof", string("end on eof"));
    TEST_TOKENS("'esca\\\nped'", string("escaped"));
    TEST_TOKENS("\"esc\\\faped\"", string("escaped"));
    TEST_TOKENS("'new\\\rline'", string("newline"));
    TEST_TOKENS("\"new\\\r\nline\"", string("newline"));
    TEST_TOKENS("'bad\nstring", badString(), whitespace(), ident("string"));
    TEST_TOKENS("'bad\rstring", badString(), whitespace(), ident("string"));
    TEST_TOKENS("'bad\r\nstring", badString(), whitespace(), ident("string"));
    TEST_TOKENS("'bad\fstring", badString(), whitespace(), ident("string"));
    TEST_TOKENS(String("'\0'", 3), string(fromUChar32(0xFFFD)));
    TEST_TOKENS(String("'hel\0lo'", 8), string("hel" + fromUChar32(0xFFFD) + "lo"));
    TEST_TOKENS(String("'h\\65l\0lo'", 10), string("hel" + fromUChar32(0xFFFD) + "lo"));
}
コード例 #4
0
ファイル: CSSTokenizerTest.cpp プロジェクト: eth-srl/BlinkER
TEST(CSSTokenizerTest, IdentToken)
{
    TEST_TOKENS("simple-ident", ident("simple-ident"));
    TEST_TOKENS("testing123", ident("testing123"));
    TEST_TOKENS("hello!", ident("hello"), delim('!'));
    TEST_TOKENS("world\5", ident("world"), delim('\5'));
    TEST_TOKENS("_under score", ident("_under"), whitespace, ident("score"));
    TEST_TOKENS("-_underscore", ident("-_underscore"));
    TEST_TOKENS("-text", ident("-text"));
    TEST_TOKENS("-\\6d", ident("-m"));
    TEST_TOKENS("--abc", ident("--abc"));
    TEST_TOKENS("--", ident("--"));
    TEST_TOKENS("--11", ident("--11"));
    TEST_TOKENS("---", ident("---"));
    TEST_TOKENS(fromUChar32(0x2003), ident(fromUChar32(0x2003))); // em-space
    TEST_TOKENS(fromUChar32(0xA0), ident(fromUChar32(0xA0))); // non-breaking space
    TEST_TOKENS(fromUChar32(0x1234), ident(fromUChar32(0x1234)));
    TEST_TOKENS(fromUChar32(0x12345), ident(fromUChar32(0x12345)));
    // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD
    // TEST_TOKENS("\0", ident(fromUChar32(0xFFFD)));
}
コード例 #5
0
TEST(CSSTokenizerTest, IdentToken)
{
    TEST_TOKENS("simple-ident", ident("simple-ident"));
    TEST_TOKENS("testing123", ident("testing123"));
    TEST_TOKENS("hello!", ident("hello"), delim('!'));
    TEST_TOKENS("world\5", ident("world"), delim('\5'));
    TEST_TOKENS("_under score", ident("_under"), whitespace(), ident("score"));
    TEST_TOKENS("-_underscore", ident("-_underscore"));
    TEST_TOKENS("-text", ident("-text"));
    TEST_TOKENS("-\\6d", ident("-m"));
    TEST_TOKENS("--abc", ident("--abc"));
    TEST_TOKENS("--", ident("--"));
    TEST_TOKENS("--11", ident("--11"));
    TEST_TOKENS("---", ident("---"));
    TEST_TOKENS(fromUChar32(0x2003), ident(fromUChar32(0x2003))); // em-space
    TEST_TOKENS(fromUChar32(0xA0), ident(fromUChar32(0xA0))); // non-breaking space
    TEST_TOKENS(fromUChar32(0x1234), ident(fromUChar32(0x1234)));
    TEST_TOKENS(fromUChar32(0x12345), ident(fromUChar32(0x12345)));
    TEST_TOKENS(String("\0", 1), ident(fromUChar32(0xFFFD)));
    TEST_TOKENS(String("ab\0c", 4), ident("ab" + fromUChar32(0xFFFD) + "c"));
    TEST_TOKENS(String("ab\0c", 4), ident("ab" + fromUChar32(0xFFFD) + "c"));
}