void he_top_level_page_c::do_modifications() { he_page_base_c::do_modifications(); if (is_id(m_l1_element, KaxInfo)) fix_mandatory_segmentinfo_elements(m_l1_element); else if (is_id(m_l1_element, KaxTracks)) fix_mandatory_segment_tracks_elements(m_l1_element); m_l1_element->UpdateSize(true); }
void Lex_analyzer::addToken(int tokenType, QString tokenValue){ TokenType *t = new TokenType; Token *tok = new Token; switch(tokenType){ case 1: if(is_keyword(tokenValue,t)){ qDebug() <<"Token" <<tokenValue <<"found in keywords: id=" <<t->id <<"type=" <<t->type <<endl; tok->id=t->id; tok->tokclass = "keywords"; tok->toktype = t->type; tok->value = tokenValue; tokens->append(tok); }else if(is_type(tokenValue,t)){ qDebug() <<"Token" <<tokenValue <<"found in types: id=" <<t->id <<"type=" <<t->type <<endl; tok->id=t->id; tok->tokclass = "types"; tok->toktype = t->type; tok->value = tokenValue; tokens->append(tok); }else{ is_id(tokenValue, t); qDebug() <<"Token" <<tokenValue <<"found in/inserted to ids: id=" <<t->id <<"type=" <<t->type <<endl; tok->id=t->id; tok->tokclass = "ids"; tok->toktype = t->type; tok->value = tokenValue; tokens->append(tok); } break; case 2: is_const(tokenValue, t); qDebug() <<"Token" <<tokenValue <<"found in/inserted to constants: id=" <<t->id <<"type=" <<t->type <<endl; tok->id=t->id; tok->tokclass = "constants"; tok->toktype = t->type; tok->value = tokenValue; tokens->append(tok); break; case 3: case 4: if(is_operator(tokenValue, t)){ qDebug() <<"Token" <<tokenValue <<"found in operators/separators: id=" <<t->id <<"type=" <<t->type <<endl; tok->id=t->id; tok->tokclass = "separators"; tok->toktype = t->type; tok->value = tokenValue; tokens->append(tok); }else{ qDebug() <<"Error unknown token type: " <<tokenType <<endl; } break; default: qDebug() <<"Error unknown token type: " <<tokenType <<endl; } qDebug() <<"Adding token of type[" <<tokenType <<"] = " <<tokenValue <<endl; }
int lex_str2num(const char *str) { int i; if (is_id(str)) return lex_keyword(str); for (i = 0; token_table[i].num >= 0; i++) { if (token_table[i].str == NULL) continue; if (strcmp(token_table[i].str, str) == 0) return token_table[i].num; } return -1; }
/* insert */ static void insert(sqlite3 * db) { char *sql = NULL, *err = NULL, name[NAME_LEN], gender[GENDER_LEN]; int id, age, ret; printf("input id:"); while (0 == scanf("%d", &id) || 0 == is_id(id)) { empty_cache(); system("clear"); printf("\ninput id:"); } printf("input name:"); empty_cache(); Gets(name, sizeof(name)); while (0 == is_name(name)) { system("clear"); printf("input name:"); Gets(name, sizeof(name)); } printf("input gender:"); Gets(gender, sizeof(gender)); while (0 == is_gender(gender)) { system("clear"); printf("input gender:"); Gets(gender, sizeof(gender)); } printf("input age:"); while (0 == scanf("%d", &age) || 0 == is_age(age)) { empty_cache(); system("clear"); printf("\ninput age:"); } sql = sqlite3_mprintf("insert into employee values(%d,%Q,%Q,%d);", id, name, gender, age); ret = sqlite3_exec(db, sql, NULL, NULL, &err); if (ret != SQLITE_OK) { fputs(err, stderr); fputs("\n", stderr); } sqlite3_free(sql); }
static void process_text( struct textfile *txt, int lang_count, int *lang_map, unsigned char **lang_shorts) { int i; int lang_sect_lineno = -1; int lang_id_lineno = -1; int res; unsigned char short_name_buf[1024]; int j, lineno; int *lang_id_linenos; unsigned char id_buf[1024]; int id_buf_len; short_name_buf[0] = 0; XCALLOC(lang_id_linenos, lang_count); for (i = 0; i < txt->u; ++i) { res = is_section(txt->v[i].s, i + 1); if (res >= 0) { if (lang_sect_lineno > 0) { if (lang_id_lineno <= 0) { fprintf(stderr, "language section at line %d has no id\n", lang_sect_lineno); return; } if (!short_name_buf[0]) { fprintf(stderr, "language section at line %d has no short_name\n", lang_sect_lineno); return; } } for (j = 1; j < lang_count; ++j) { if (lang_shorts[j] && lang_map[j] > 0 && !strcmp(lang_shorts[j], short_name_buf)) break; } if (j < lang_count) lang_id_linenos[j] = lang_id_lineno; } if (res == 1) { lang_sect_lineno = i + 1; lang_id_lineno = -1; short_name_buf[0] = 0; } else if (!res) { lang_sect_lineno = -1; } if (lang_sect_lineno > 0 && is_id(txt->v[i].s, i + 1)) { //fprintf(stderr, "id: %d\n", i + 1); lang_id_lineno = i + 1; } if (lang_sect_lineno > 0 && is_short_name(txt->v[i].s, short_name_buf, sizeof(short_name_buf), i + 1)) { //fprintf(stderr, "short_name: %s, %d\n", short_name_buf, i + 1); //map_lang_aliases(short_name_buf, sizeof(short_name_buf)); } } if (lang_sect_lineno > 0) { if (lang_id_lineno <= 0) { fprintf(stderr, "language section at line %d has no id\n", lang_sect_lineno); return; } if (!short_name_buf[0]) { fprintf(stderr, "language section at line %d has no short_name\n", lang_sect_lineno); return; } } for (j = 1; j < lang_count; ++j) { if (lang_shorts[j] && lang_map[j] > 0 && !strcmp(lang_shorts[j], short_name_buf)) break; } if (j < lang_count) lang_id_linenos[j] = lang_id_lineno; for (j = 1; j < lang_count; ++j) { if (lang_map[j] > 0 && lang_id_linenos[j] <= 0) { fprintf(stderr, "language %s lineno is not found\n", lang_shorts[j]); return; } } for (j = 1; j < lang_count; ++j) { if (lang_map[j] > 0) { lineno = lang_id_linenos[j] - 1; if (lineno < 0 || lineno >= txt->u) { die("invalid lineno %d", lineno); } snprintf(id_buf, sizeof(id_buf), "id = %d", lang_map[j]); id_buf_len = strlen(id_buf); txt->v[lineno].a = id_buf_len + 1; txt->v[lineno].u = id_buf_len; xfree(txt->v[lineno].s); txt->v[lineno].s = xstrdup(id_buf); } } }
void init_util(void) { int i; // Init isidnum table for (i = 0; i < 256; i++) isidnum_table[i] = is_id(i) || is_num(i); }
// split a string into individual tokens void tokenise(const std::string &line, const std::string &filename, int line_num, const Options &opt, /*out*/ std::vector<Token> &tokens, bool test_is_raw_python = false // whether --test input is raw python code (vs just a list of boolean expressions) ) { size_t pos = 0; size_t len = line.length(); bool found_assign_op = false; bool ids_can_be_keywords = opt.assign || (opt.test && !test_is_raw_python); while (pos < len) { size_t space_start = pos; // skip whitespace (and finish if end of line or comment) while (std::isspace(char_at(line, pos))) { ++pos; } if (pos >= len || line[pos] == '#') { break; } size_t num_spaces = pos - space_start; size_t tok_start = pos; char ch = line[pos]; char next_ch = char_at(line, pos + 1); token_type type = t_undefined; std::string tok_str; if (opt.assign && found_assign_op) { type = t_string; pos = len; tok_str = trim_spaces(line.substr(tok_start, pos - tok_start)); if (is_quoted(tok_str)) { // make sure quotes are of the right type std::string unquoted_tok_str = tok_str.substr(1, tok_str.length() - 2); tok_str = quote(unquoted_tok_str, '\''); } else if (!is_number(tok_str)) { tok_str = quote(tok_str, '\''); } } else if (is_start_of_id(ch)) { // TODO: maybe allow spaces around array indexes , e.g. "a/b[ 10 ]/c" while (is_id(char_at(line, pos))) { ++pos; } std::string id = line.substr(tok_start, pos - tok_start); check_transform_id(id); if (is_keyword(id) && !(ids_can_be_keywords && is_keyword_allowed_as_id(id))) { tok_str = id; type = t_keyword; } else if (id != kwd_expect && next_nonblank_char(line, pos) == '(') { tok_str = id; type = t_function; } else { tok_str = (opt.demangle ? demangle_id(id, true) : mangle_id(id)); type = t_id; } } else if (std::isdigit(ch) || (ch == '.' && std::isdigit(next_ch)) || (ch == '-' && (std::isdigit(next_ch) || next_ch == '.'))) { bool any_digits = false; if (ch == '-') { ++pos; } while (std::isdigit(char_at(line, pos))) { ++pos; any_digits = true; } if (char_at(line, pos) == '.') { ++pos; while (std::isdigit(char_at(line, pos))) { ++pos; any_digits = true; } } // check for scientific notation // (TODO: merge common code with is_number function) if (any_digits && std::tolower(char_at(line, pos)) == 'e') { size_t pos2 = pos + 1; if (char_at(line, pos2) == '+' || char_at(line, pos2) == '-') { ++pos2; if (std::isdigit(char_at(line, pos2))) { for (pos = pos2 + 1;std::isdigit(char_at(line, pos));++pos) { } } } } if (any_digits) { type = t_number; } else { type = t_operator; pos = tok_start + 1; } // token is a single character ("-" or ".") tok_str = line.substr(tok_start, pos - tok_start); } else if (ch == '\"' || ch == '\'') { for (++pos;pos < len && line[pos] != ch;++pos) { // check for escape character if (line[pos] == '\\') { if (++pos >= len) break; } } if (pos < len) { ++pos; } tok_str = line.substr(tok_start, pos - tok_start); type = t_string; } else { pos += operator_length(line, pos); tok_str = line.substr(tok_start, pos - tok_start); type = t_operator; if (tok_str == "=") { found_assign_op = true; if (opt.test && (!test_is_raw_python || (tokens.size() > 0 && tokens[0].type == t_id && tokens[0].str == kwd_expect))) { tok_str = "=="; } } } tokens.push_back(Token(type, tok_str, num_spaces)); if (!opt.command && tokens.size() == 1) { // Python will complain if the line is indented tokens[0].spaces_before = 0; } } transform_special_tokens(tokens, filename, line_num); if (opt.assign) { // transform var= into var='' if (tokens.size() == 2 && tokens[1].str == "=") { tokens.push_back(Token(t_string, "''", 0)); } } if (opt.test) { force_string_comparison_if_quoted(tokens); } if (opt.command) { // add any missing ":" at the end of the line if (tokens.size() > 0 && tokens[0].type == t_keyword) { // check if there is a ":" anywhere in the line if (find_token(tokens, t_operator, ":") == -1) { if (tokens[0].str == "if" || tokens[0].str == "else" || tokens[0].str == "elif" || tokens[0].str == "while" || tokens[0].str == "for" || tokens[0].str == "try" || tokens[0].str == "except" || tokens[0].str == "finally") { tokens.push_back(Token(t_operator, ":", 0)); } } } } }