// This is called with CUR_CHAR() before first hex digit, and should return with // it pointing to last hex digit STATIC bool get_hex(mp_lexer_t *lex, int num_digits, uint *result) { uint num = 0; while (num_digits-- != 0) { next_char(lex); unichar c = CUR_CHAR(lex); if (!unichar_isxdigit(c)) { return false; } num = (num << 4) + hex_digit(c); } *result = num; return true; }
// This is called with CUR_CHAR() before first hex digit, and should return with // it pointing to last hex digit // num_digits must be greater than zero STATIC bool get_hex(mp_lexer_t *lex, size_t num_digits, mp_uint_t *result) { mp_uint_t num = 0; while (num_digits-- != 0) { next_char(lex); unichar c = CUR_CHAR(lex); if (!unichar_isxdigit(c)) { return false; } num = (num << 4) + unichar_xdigit_value(c); } *result = num; return true; }
STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, mp_token_t *tok, bool first_token) { // skip white space and comments bool had_physical_newline = false; while (!is_end(lex)) { if (is_physical_newline(lex)) { had_physical_newline = true; next_char(lex); } else if (is_whitespace(lex)) { next_char(lex); } else if (is_char(lex, '#')) { next_char(lex); while (!is_end(lex) && !is_physical_newline(lex)) { next_char(lex); } // had_physical_newline will be set on next loop } else if (is_char(lex, '\\')) { // backslash (outside string literals) must appear just before a physical newline next_char(lex); if (!is_physical_newline(lex)) { // SyntaxError: unexpected character after line continuation character tok->src_line = lex->line; tok->src_column = lex->column; tok->kind = MP_TOKEN_BAD_LINE_CONTINUATION; vstr_reset(&lex->vstr); tok->str = vstr_str(&lex->vstr); tok->len = 0; return; } else { next_char(lex); } } else { break; } } // set token source information tok->src_line = lex->line; tok->src_column = lex->column; // start new token text vstr_reset(&lex->vstr); if (first_token && lex->line == 1 && lex->column != 1) { // check that the first token is in the first column // if first token is not on first line, we get a physical newline and // this check is done as part of normal indent/dedent checking below // (done to get equivalence with CPython) tok->kind = MP_TOKEN_INDENT; } else if (lex->emit_dent < 0) { tok->kind = MP_TOKEN_DEDENT; lex->emit_dent += 1; } else if (lex->emit_dent > 0) { tok->kind = MP_TOKEN_INDENT; lex->emit_dent -= 1; } else if (had_physical_newline && lex->nested_bracket_level == 0) { tok->kind = MP_TOKEN_NEWLINE; uint num_spaces = lex->column - 1; lex->emit_dent = 0; if (num_spaces == indent_top(lex)) { } else if (num_spaces > indent_top(lex)) { indent_push(lex, num_spaces); lex->emit_dent += 1; } else { while (num_spaces < indent_top(lex)) { indent_pop(lex); lex->emit_dent -= 1; } if (num_spaces != indent_top(lex)) { tok->kind = MP_TOKEN_DEDENT_MISMATCH; } } } else if (is_end(lex)) { if (indent_top(lex) > 0) { tok->kind = MP_TOKEN_NEWLINE; lex->emit_dent = 0; while (indent_top(lex) > 0) { indent_pop(lex); lex->emit_dent -= 1; } } else { tok->kind = MP_TOKEN_END; } } else if (is_char_or(lex, '\'', '\"') || (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"')) || ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r')) && is_char_following_following_or(lex, '\'', '\"'))) { // a string or bytes literal // parse type codes bool is_raw = false; bool is_bytes = false; if (is_char(lex, 'u')) { next_char(lex); } else if (is_char(lex, 'b')) { is_bytes = true; next_char(lex); if (is_char(lex, 'r')) { is_raw = true; next_char(lex); } } else if (is_char(lex, 'r')) { is_raw = true; next_char(lex); if (is_char(lex, 'b')) { is_bytes = true; next_char(lex); } } // set token kind if (is_bytes) { tok->kind = MP_TOKEN_BYTES; } else { tok->kind = MP_TOKEN_STRING; } // get first quoting character char quote_char = '\''; if (is_char(lex, '\"')) { quote_char = '\"'; } next_char(lex); // work out if it's a single or triple quoted literal int num_quotes; if (is_char_and(lex, quote_char, quote_char)) { // triple quotes next_char(lex); next_char(lex); num_quotes = 3; } else { // single quotes num_quotes = 1; } // parse the literal int n_closing = 0; while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) { if (is_char(lex, quote_char)) { n_closing += 1; vstr_add_char(&lex->vstr, CUR_CHAR(lex)); } else { n_closing = 0; if (is_char(lex, '\\')) { next_char(lex); unichar c = CUR_CHAR(lex); if (is_raw) { // raw strings allow escaping of quotes, but the backslash is also emitted vstr_add_char(&lex->vstr, '\\'); } else { switch (c) { case MP_LEXER_CHAR_EOF: break; // TODO a proper error message? case '\n': c = MP_LEXER_CHAR_EOF; break; // TODO check this works correctly (we are supposed to ignore it case '\\': break; case '\'': break; case '"': break; case 'a': c = 0x07; break; case 'b': c = 0x08; break; case 't': c = 0x09; break; case 'n': c = 0x0a; break; case 'v': c = 0x0b; break; case 'f': c = 0x0c; break; case 'r': c = 0x0d; break; case 'u': case 'U': if (is_bytes) { // b'\u1234' == b'\\u1234' vstr_add_char(&lex->vstr, '\\'); break; } // Otherwise fall through. case 'x': { uint num = 0; if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) { // TODO error message assert(0); } c = num; break; } case 'N': // Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the // entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly // 3MB of text; even gzip-compressed and with minimal structure, it'll take // roughly half a meg of storage. This form of Unicode escape may be added // later on, but it's definitely not a priority right now. -- CJA 20140607 assert(!"Unicode name escapes not supported"); break; default: if (c >= '0' && c <= '7') { // Octal sequence, 1-3 chars int digits = 3; int num = c - '0'; while (is_following_odigit(lex) && --digits != 0) { next_char(lex); num = num * 8 + (CUR_CHAR(lex) - '0'); } c = num; } else { // unrecognised escape character; CPython lets this through verbatim as '\' and then the character vstr_add_char(&lex->vstr, '\\'); } break; } } if (c != MP_LEXER_CHAR_EOF) { if (c < 0x110000 && !is_bytes) { vstr_add_char(&lex->vstr, c); } else if (c < 0x100 && is_bytes) { vstr_add_byte(&lex->vstr, c); } else { assert(!"TODO: Throw an error, invalid escape code probably"); } } } else { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); } } next_char(lex); } // check we got the required end quotes if (n_closing < num_quotes) { tok->kind = MP_TOKEN_LONELY_STRING_OPEN; } // cut off the end quotes from the token text vstr_cut_tail_bytes(&lex->vstr, n_closing); } else if (is_head_of_identifier(lex)) { tok->kind = MP_TOKEN_NAME; // get first char vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex) && is_tail_of_identifier(lex)) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) { tok->kind = MP_TOKEN_NUMBER; // get first char vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex)) { if (is_char_or(lex, 'e', 'E')) { vstr_add_char(&lex->vstr, 'e'); next_char(lex); if (is_char(lex, '+') || is_char(lex, '-')) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } } else if (is_letter(lex) || is_digit(lex) || is_char_or(lex, '_', '.')) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } else { break; } } } else if (is_char(lex, '.')) { // special handling for . and ... operators, because .. is not a valid operator // get first char vstr_add_char(&lex->vstr, '.'); next_char(lex); if (is_char_and(lex, '.', '.')) { vstr_add_char(&lex->vstr, '.'); vstr_add_char(&lex->vstr, '.'); next_char(lex); next_char(lex); tok->kind = MP_TOKEN_ELLIPSIS; } else { tok->kind = MP_TOKEN_DEL_PERIOD; } } else { // search for encoded delimiter or operator const char *t = tok_enc; uint tok_enc_index = 0; for (; *t != 0 && !is_char(lex, *t); t += 1) { if (*t == 'e' || *t == 'c') { t += 1; } else if (*t == 'E') { tok_enc_index -= 1; t += 1; } tok_enc_index += 1; } next_char(lex); if (*t == 0) { // didn't match any delimiter or operator characters tok->kind = MP_TOKEN_INVALID; } else { // matched a delimiter or operator character // get the maximum characters for a valid token t += 1; uint t_index = tok_enc_index; for (;;) { for (; *t == 'e'; t += 1) { t += 1; t_index += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; break; } } if (*t == 'E') { t += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; } else { tok->kind = MP_TOKEN_INVALID; goto tok_enc_no_match; } break; } if (*t == 'c') { t += 1; t_index += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; t += 1; } else { break; } } else { break; } } // set token kind tok->kind = tok_enc_kind[tok_enc_index]; tok_enc_no_match: // compute bracket level for implicit line joining if (tok->kind == MP_TOKEN_DEL_PAREN_OPEN || tok->kind == MP_TOKEN_DEL_BRACKET_OPEN || tok->kind == MP_TOKEN_DEL_BRACE_OPEN) { lex->nested_bracket_level += 1; } else if (tok->kind == MP_TOKEN_DEL_PAREN_CLOSE || tok->kind == MP_TOKEN_DEL_BRACKET_CLOSE || tok->kind == MP_TOKEN_DEL_BRACE_CLOSE) { lex->nested_bracket_level -= 1; } } } // point token text to vstr buffer tok->str = vstr_str(&lex->vstr); tok->len = vstr_len(&lex->vstr); // check for keywords if (tok->kind == MP_TOKEN_NAME) { // We check for __debug__ here and convert it to its value. This is so // the parser gives a syntax error on, eg, x.__debug__. Otherwise, we // need to check for this special token in many places in the compiler. // TODO improve speed of these string comparisons //for (int i = 0; tok_kw[i] != NULL; i++) { for (int i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) { if (str_strn_equal(tok_kw[i], tok->str, tok->len)) { if (i == MP_ARRAY_SIZE(tok_kw) - 1) { // tok_kw[MP_ARRAY_SIZE(tok_kw) - 1] == "__debug__" tok->kind = (mp_optimise_value == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE); } else { tok->kind = MP_TOKEN_KW_FALSE + i; } break; } } } }
void mp_lexer_to_next(mp_lexer_t *lex) { // start new token text vstr_reset(&lex->vstr); // skip white space and comments bool had_physical_newline = skip_whitespace(lex, false); // set token source information lex->tok_line = lex->line; lex->tok_column = lex->column; if (lex->emit_dent < 0) { lex->tok_kind = MP_TOKEN_DEDENT; lex->emit_dent += 1; } else if (lex->emit_dent > 0) { lex->tok_kind = MP_TOKEN_INDENT; lex->emit_dent -= 1; } else if (had_physical_newline && lex->nested_bracket_level == 0) { lex->tok_kind = MP_TOKEN_NEWLINE; size_t num_spaces = lex->column - 1; if (num_spaces == indent_top(lex)) { } else if (num_spaces > indent_top(lex)) { indent_push(lex, num_spaces); lex->emit_dent += 1; } else { while (num_spaces < indent_top(lex)) { indent_pop(lex); lex->emit_dent -= 1; } if (num_spaces != indent_top(lex)) { lex->tok_kind = MP_TOKEN_DEDENT_MISMATCH; } } } else if (is_end(lex)) { lex->tok_kind = MP_TOKEN_END; } else if (is_string_or_bytes(lex)) { // a string or bytes literal // Python requires adjacent string/bytes literals to be automatically // concatenated. We do it here in the tokeniser to make efficient use of RAM, // because then the lexer's vstr can be used to accumulate the string literal, // in contrast to creating a parse tree of strings and then joining them later // in the compiler. It's also more compact in code size to do it here. // MP_TOKEN_END is used to indicate that this is the first string token lex->tok_kind = MP_TOKEN_END; // Loop to accumulate string/bytes literals do { // parse type codes bool is_raw = false; mp_token_kind_t kind = MP_TOKEN_STRING; int n_char = 0; if (is_char(lex, 'u')) { n_char = 1; } else if (is_char(lex, 'b')) { kind = MP_TOKEN_BYTES; n_char = 1; if (is_char_following(lex, 'r')) { is_raw = true; n_char = 2; } } else if (is_char(lex, 'r')) { is_raw = true; n_char = 1; if (is_char_following(lex, 'b')) { kind = MP_TOKEN_BYTES; n_char = 2; } } // Set or check token kind if (lex->tok_kind == MP_TOKEN_END) { lex->tok_kind = kind; } else if (lex->tok_kind != kind) { // Can't concatenate string with bytes break; } // Skip any type code characters if (n_char != 0) { next_char(lex); if (n_char == 2) { next_char(lex); } } // Parse the literal parse_string_literal(lex, is_raw); // Skip whitespace so we can check if there's another string following skip_whitespace(lex, true); } while (is_string_or_bytes(lex)); } else if (is_head_of_identifier(lex)) { lex->tok_kind = MP_TOKEN_NAME; // get first char (add as byte to remain 8-bit clean and support utf-8) vstr_add_byte(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex) && is_tail_of_identifier(lex)) { vstr_add_byte(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } // Check if the name is a keyword. // We also check for __debug__ here and convert it to its value. This is // so the parser gives a syntax error on, eg, x.__debug__. Otherwise, we // need to check for this special token in many places in the compiler. const char *s = vstr_null_terminated_str(&lex->vstr); for (size_t i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) { int cmp = strcmp(s, tok_kw[i]); if (cmp == 0) { lex->tok_kind = MP_TOKEN_KW_FALSE + i; if (lex->tok_kind == MP_TOKEN_KW___DEBUG__) { lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE); } break; } else if (cmp < 0) { // Table is sorted and comparison was less-than, so stop searching break; } } } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) { bool forced_integer = false; if (is_char(lex, '.')) { lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG; } else { lex->tok_kind = MP_TOKEN_INTEGER; if (is_char(lex, '0') && is_following_base_char(lex)) { forced_integer = true; } } // get first char vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex)) { if (!forced_integer && is_char_or(lex, 'e', 'E')) { lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG; vstr_add_char(&lex->vstr, 'e'); next_char(lex); if (is_char(lex, '+') || is_char(lex, '-')) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } } else if (is_letter(lex) || is_digit(lex) || is_char(lex, '.')) { if (is_char_or3(lex, '.', 'j', 'J')) { lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG; } vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } else { break; } } } else { // search for encoded delimiter or operator const char *t = tok_enc; size_t tok_enc_index = 0; for (; *t != 0 && !is_char(lex, *t); t += 1) { if (*t == 'e' || *t == 'c') { t += 1; } tok_enc_index += 1; } next_char(lex); if (*t == 0) { // didn't match any delimiter or operator characters lex->tok_kind = MP_TOKEN_INVALID; } else if (*t == '!') { // "!=" is a special case because "!" is not a valid operator if (is_char(lex, '=')) { next_char(lex); lex->tok_kind = MP_TOKEN_OP_NOT_EQUAL; } else { lex->tok_kind = MP_TOKEN_INVALID; } } else if (*t == '.') { // "." and "..." are special cases because ".." is not a valid operator if (is_char_and(lex, '.', '.')) { next_char(lex); next_char(lex); lex->tok_kind = MP_TOKEN_ELLIPSIS; } else { lex->tok_kind = MP_TOKEN_DEL_PERIOD; } } else { // matched a delimiter or operator character // get the maximum characters for a valid token t += 1; size_t t_index = tok_enc_index; while (*t == 'c' || *t == 'e') { t_index += 1; if (is_char(lex, t[1])) { next_char(lex); tok_enc_index = t_index; if (*t == 'e') { break; } } else if (*t == 'c') { break; } t += 2; } // set token kind lex->tok_kind = tok_enc_kind[tok_enc_index]; // compute bracket level for implicit line joining if (lex->tok_kind == MP_TOKEN_DEL_PAREN_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACKET_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACE_OPEN) { lex->nested_bracket_level += 1; } else if (lex->tok_kind == MP_TOKEN_DEL_PAREN_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACKET_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACE_CLOSE) { lex->nested_bracket_level -= 1; } } } }
STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) { // get first quoting character char quote_char = '\''; if (is_char(lex, '\"')) { quote_char = '\"'; } next_char(lex); // work out if it's a single or triple quoted literal size_t num_quotes; if (is_char_and(lex, quote_char, quote_char)) { // triple quotes next_char(lex); next_char(lex); num_quotes = 3; } else { // single quotes num_quotes = 1; } size_t n_closing = 0; while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) { if (is_char(lex, quote_char)) { n_closing += 1; vstr_add_char(&lex->vstr, CUR_CHAR(lex)); } else { n_closing = 0; if (is_char(lex, '\\')) { next_char(lex); unichar c = CUR_CHAR(lex); if (is_raw) { // raw strings allow escaping of quotes, but the backslash is also emitted vstr_add_char(&lex->vstr, '\\'); } else { switch (c) { // note: "c" can never be MP_LEXER_EOF because next_char // always inserts a newline at the end of the input stream case '\n': c = MP_LEXER_EOF; break; // backslash escape the newline, just ignore it case '\\': break; case '\'': break; case '"': break; case 'a': c = 0x07; break; case 'b': c = 0x08; break; case 't': c = 0x09; break; case 'n': c = 0x0a; break; case 'v': c = 0x0b; break; case 'f': c = 0x0c; break; case 'r': c = 0x0d; break; case 'u': case 'U': if (lex->tok_kind == MP_TOKEN_BYTES) { // b'\u1234' == b'\\u1234' vstr_add_char(&lex->vstr, '\\'); break; } // Otherwise fall through. case 'x': { mp_uint_t num = 0; if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) { // not enough hex chars for escape sequence lex->tok_kind = MP_TOKEN_INVALID; } c = num; break; } case 'N': // Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the // entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly // 3MB of text; even gzip-compressed and with minimal structure, it'll take // roughly half a meg of storage. This form of Unicode escape may be added // later on, but it's definitely not a priority right now. -- CJA 20140607 mp_not_implemented("unicode name escapes"); break; default: if (c >= '0' && c <= '7') { // Octal sequence, 1-3 chars size_t digits = 3; mp_uint_t num = c - '0'; while (is_following_odigit(lex) && --digits != 0) { next_char(lex); num = num * 8 + (CUR_CHAR(lex) - '0'); } c = num; } else { // unrecognised escape character; CPython lets this through verbatim as '\' and then the character vstr_add_char(&lex->vstr, '\\'); } break; } } if (c != MP_LEXER_EOF) { if (MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) { if (c < 0x110000 && lex->tok_kind == MP_TOKEN_STRING) { vstr_add_char(&lex->vstr, c); } else if (c < 0x100 && lex->tok_kind == MP_TOKEN_BYTES) { vstr_add_byte(&lex->vstr, c); } else { // unicode character out of range // this raises a generic SyntaxError; could provide more info lex->tok_kind = MP_TOKEN_INVALID; } } else { // without unicode everything is just added as an 8-bit byte if (c < 0x100) { vstr_add_byte(&lex->vstr, c); } else { // 8-bit character out of range // this raises a generic SyntaxError; could provide more info lex->tok_kind = MP_TOKEN_INVALID; } } } } else { // Add the "character" as a byte so that we remain 8-bit clean. // This way, strings are parsed correctly whether or not they contain utf-8 chars. vstr_add_byte(&lex->vstr, CUR_CHAR(lex)); } } next_char(lex); } // check we got the required end quotes if (n_closing < num_quotes) { lex->tok_kind = MP_TOKEN_LONELY_STRING_OPEN; } // cut off the end quotes from the token text vstr_cut_tail_bytes(&lex->vstr, n_closing); }
static void mp_lexer_next_token_into(mp_lexer_t *lex, mp_token_t *tok, bool first_token) { // skip white space and comments bool had_physical_newline = false; while (!is_end(lex)) { if (is_physical_newline(lex)) { had_physical_newline = true; next_char(lex); } else if (is_whitespace(lex)) { next_char(lex); } else if (is_char(lex, '#')) { next_char(lex); while (!is_end(lex) && !is_physical_newline(lex)) { next_char(lex); } // had_physical_newline will be set on next loop } else if (is_char(lex, '\\')) { // backslash (outside string literals) must appear just before a physical newline next_char(lex); if (!is_physical_newline(lex)) { // TODO SyntaxError assert(0); } else { next_char(lex); } } else { break; } } // set token source information tok->src_name = lex->name; tok->src_line = lex->line; tok->src_column = lex->column; // start new token text vstr_reset(&lex->vstr); if (first_token && lex->line == 1 && lex->column != 1) { // check that the first token is in the first column // if first token is not on first line, we get a physical newline and // this check is done as part of normal indent/dedent checking below // (done to get equivalence with CPython) tok->kind = MP_TOKEN_INDENT; } else if (lex->emit_dent < 0) { tok->kind = MP_TOKEN_DEDENT; lex->emit_dent += 1; } else if (lex->emit_dent > 0) { tok->kind = MP_TOKEN_INDENT; lex->emit_dent -= 1; } else if (had_physical_newline && lex->nested_bracket_level == 0) { tok->kind = MP_TOKEN_NEWLINE; uint num_spaces = lex->column - 1; lex->emit_dent = 0; if (num_spaces == indent_top(lex)) { } else if (num_spaces > indent_top(lex)) { indent_push(lex, num_spaces); lex->emit_dent += 1; } else { while (num_spaces < indent_top(lex)) { indent_pop(lex); lex->emit_dent -= 1; } if (num_spaces != indent_top(lex)) { tok->kind = MP_TOKEN_DEDENT_MISMATCH; } } } else if (is_end(lex)) { if (indent_top(lex) > 0) { tok->kind = MP_TOKEN_NEWLINE; lex->emit_dent = 0; while (indent_top(lex) > 0) { indent_pop(lex); lex->emit_dent -= 1; } } else { tok->kind = MP_TOKEN_END; } } else if (is_char_or(lex, '\'', '\"') || (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"')) || ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r')) && is_char_following_following_or(lex, '\'', '\"'))) { // a string or bytes literal // parse type codes bool is_raw = false; bool is_bytes = false; if (is_char(lex, 'u')) { next_char(lex); } else if (is_char(lex, 'b')) { is_bytes = true; next_char(lex); if (is_char(lex, 'r')) { is_raw = true; next_char(lex); } } else if (is_char(lex, 'r')) { is_raw = true; next_char(lex); if (is_char(lex, 'b')) { is_bytes = true; next_char(lex); } } // set token kind if (is_bytes) { tok->kind = MP_TOKEN_BYTES; } else { tok->kind = MP_TOKEN_STRING; } // get first quoting character char quote_char = '\''; if (is_char(lex, '\"')) { quote_char = '\"'; } next_char(lex); // work out if it's a single or triple quoted literal int num_quotes; if (is_char_and(lex, quote_char, quote_char)) { // triple quotes next_char(lex); next_char(lex); num_quotes = 3; } else { // single quotes num_quotes = 1; } // parse the literal int n_closing = 0; while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) { if (is_char(lex, quote_char)) { n_closing += 1; vstr_add_char(&lex->vstr, CUR_CHAR(lex)); } else { n_closing = 0; if (!is_raw && is_char(lex, '\\')) { next_char(lex); unichar c = CUR_CHAR(lex); switch (c) { case MP_LEXER_CHAR_EOF: break; // TODO a proper error message? case '\n': c = MP_LEXER_CHAR_EOF; break; // TODO check this works correctly (we are supposed to ignore it case '\\': break; case '\'': break; case '"': break; case 'a': c = 0x07; break; case 'b': c = 0x08; break; case 't': c = 0x09; break; case 'n': c = 0x0a; break; case 'v': c = 0x0b; break; case 'f': c = 0x0c; break; case 'r': c = 0x0d; break; // TODO \ooo octal case 'x': // TODO \xhh case 'N': // TODO \N{name} only in strings case 'u': // TODO \uxxxx only in strings case 'U': // TODO \Uxxxxxxxx only in strings default: break; // TODO error message } if (c != MP_LEXER_CHAR_EOF) { vstr_add_char(&lex->vstr, c); } } else { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); } } next_char(lex); } // check we got the required end quotes if (n_closing < num_quotes) { tok->kind = MP_TOKEN_LONELY_STRING_OPEN; } // cut off the end quotes from the token text vstr_cut_tail(&lex->vstr, n_closing); } else if (is_head_of_identifier(lex)) { tok->kind = MP_TOKEN_NAME; // get first char vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex) && is_tail_of_identifier(lex)) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) { tok->kind = MP_TOKEN_NUMBER; // get first char vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); // get tail chars while (!is_end(lex)) { if (is_char_or(lex, 'e', 'E')) { vstr_add_char(&lex->vstr, 'e'); next_char(lex); if (is_char(lex, '+') || is_char(lex, '-')) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } } else if (is_letter(lex) || is_digit(lex) || is_char_or(lex, '_', '.')) { vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); } else { break; } } } else { // search for encoded delimiter or operator const char *t = tok_enc; uint tok_enc_index = 0; for (; *t != 0 && !is_char(lex, *t); t += 1) { if (*t == 'e' || *t == 'c') { t += 1; } else if (*t == 'E') { tok_enc_index -= 1; t += 1; } tok_enc_index += 1; } next_char(lex); if (*t == 0) { // didn't match any delimiter or operator characters tok->kind = MP_TOKEN_INVALID; } else { // matched a delimiter or operator character // get the maximum characters for a valid token t += 1; uint t_index = tok_enc_index; for (;;) { for (; *t == 'e'; t += 1) { t += 1; t_index += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; break; } } if (*t == 'E') { t += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; } else { tok->kind = MP_TOKEN_INVALID; } break; } if (*t == 'c') { t += 1; t_index += 1; if (is_char(lex, *t)) { next_char(lex); tok_enc_index = t_index; t += 1; } else { break; } } else { break; } } // set token kind tok->kind = tok_enc_kind[tok_enc_index]; // compute bracket level for implicit line joining if (tok->kind == MP_TOKEN_DEL_PAREN_OPEN || tok->kind == MP_TOKEN_DEL_BRACKET_OPEN || tok->kind == MP_TOKEN_DEL_BRACE_OPEN) { lex->nested_bracket_level += 1; } else if (tok->kind == MP_TOKEN_DEL_PAREN_CLOSE || tok->kind == MP_TOKEN_DEL_BRACKET_CLOSE || tok->kind == MP_TOKEN_DEL_BRACE_CLOSE) { lex->nested_bracket_level -= 1; } } } // point token text to vstr buffer tok->str = vstr_str(&lex->vstr); tok->len = vstr_len(&lex->vstr); // check for keywords if (tok->kind == MP_TOKEN_NAME) { for (int i = 0; tok_kw[i] != NULL; i++) { if (str_strn_equal(tok_kw[i], tok->str, tok->len)) { tok->kind = MP_TOKEN_KW_FALSE + i; break; } } } }