void loop(t_list **list, t_token **cur_tok, char *input) { int index; char cur; char prev; ft_init(&index, &cur, &prev, input); while (cur) { if ((*cur_tok)->token && is_operator(prev) && !is_newline(cur)) rule_1(*cur_tok, list, input, &index); else if (is_quoting(cur)) handle_quoting(*cur_tok, list, input, &index); else if (is_start_of_expansion(cur) || is_operator(cur)) rule_34(*cur_tok, list, input, &index); else if (is_newline(cur) || is_whitespace(cur)) rule_56(*cur_tok, list, input, &index); else if ((*cur_tok)->token && is_word(prev)) build_token(*cur_tok, cur); else if (cur == '#') discard_comment(input, &index); else build_token(*cur_tok, cur); prev = cur; cur = input[++index]; } }
static bool config_parse_string(struct lexer *lex, struct strref *ref, char end) { bool success = end != 0; struct base_token token; base_token_clear(&token); while (lexer_getbasetoken(lex, &token, false)) { if (end) { if (*token.text.array == end) { success = true; break; } else if (is_newline(*token.text.array)) { success = false; break; } } else { if (is_newline(*token.text.array)) { success = true; break; } } strref_add(ref, &token.text); } remove_ref_whitespace(ref); return success; }
static bool token_line_comment(SLCONFIG_STRING* str, TOKEN* token) { if(str->start < str->end && *str->start == '/') { str->start++; if(str->start < str->end && *str->start == '/') { str->start++; token->str.start = str->start; token->type = TOKEN_COMMENT; while(str->start < str->end) { if(is_newline(*str->start)) break; str->start++; } token->str.end = str->start; return true; } else { str->start--; } } return false; }
bool _slc_is_naked_string_character(char c) { return !is_whitespace(c) && !is_newline(c) && c != ':' && c != '$' && c != '{' && c != '~' && c != '}' && c != ';' && c != '=' && c != '"' && c != '#'; }
static LTSV *parse_ltsv(const char **string) { LTSV *ltsv = ltsv_init(); LTSV_Record *record; if (!ltsv) { fprintf(stderr, "couldn't allocate LTSV!\n"); return NULL; } while (**string != '\0') { record = parse_record(string); if (!record) { if (!is_newline(**string)) { goto bail; } fprintf(stderr, "string: %c\n", **string); return ltsv; } ltsv_add(ltsv, record); if (**string == 0x0d) { skip_char(string); } if (**string == 0x0a) { skip_char(string); } } return ltsv; bail: ltsv_free(ltsv); return NULL; }
static bool cf_lexer_is_include(struct cf_lexer *lex) { bool found_include_import = false; bool found_preprocessor = false; size_t i; for (i = lex->tokens.num; i > 0; i--) { struct cf_token *token = lex->tokens.array+(i-1); if (is_space_or_tab(*token->str.array)) continue; if (!found_include_import) { if (strref_cmp(&token->str, "include") != 0 && strref_cmp(&token->str, "import") != 0) break; found_include_import = true; } else if (!found_preprocessor) { if (*token->str.array != '#') break; found_preprocessor = true; } else { return is_newline(*token->str.array); } } /* if starting line */ return found_preprocessor && found_include_import; }
/* Raw parsing */ static uint16_t hb_parse_character( hb_csv_file_t * file ) { int byte; uint16_t c = 0; int need_char = 1; if( file == NULL ) { return CSV_CHAR_ERROR; } while( need_char ) { byte = fgetc( file->fileref ); if( feof( file->fileref ) ) { return CSV_CHAR_EOF; } if( ferror( file->fileref ) ) { return CSV_CHAR_ERROR; } if( file->parse_state == CSV_PARSE_SEEK && is_white(byte) ) { continue; } else if( file->parse_state != CSV_PARSE_ESC && is_esc(byte) ) { file->parse_state = CSV_PARSE_ESC; continue; } else if( file->parse_state != CSV_PARSE_ESC && is_sep(byte) ) { file->parse_state = CSV_PARSE_SEEK; need_char = 0; c = CSV_CHAR_COLSEP; } else if( file->parse_state == CSV_PARSE_ESC ) { file->parse_state = CSV_PARSE_NORMAL; need_char = 0; c = (uint16_t)byte; } else if( is_newline(byte) ) { file->parse_state = CSV_PARSE_SEEK; need_char = 0; c = CSV_CHAR_ROWSEP; } else { file->parse_state = CSV_PARSE_NORMAL; need_char = 0; c = (uint16_t)byte; } } return c; }
void CL_SpanLayout_Impl::layout_text(CL_GraphicContext & gc, std::vector<TextBlock> blocks, std::vector<TextBlock>::size_type block_index, CurrentLine ¤t_line, int max_width) { TextSizeResult text_size_result = find_text_size(gc, blocks[block_index], current_line.object_index); current_line.object_index += text_size_result.objects_traversed; current_line.cur_line.width = current_line.x_position; if (is_newline(blocks[block_index])) { current_line.cur_line.height = cl_max(current_line.cur_line.height, text_size_result.height); current_line.cur_line.ascender = cl_max(current_line.cur_line.ascender, text_size_result.ascender); next_line(current_line); } else { if (!fits_on_line(current_line.x_position, text_size_result, max_width) && !is_whitespace(blocks[block_index])) { if (larger_than_line(text_size_result, max_width)) { // force line breaks to make it fit force_place_line_segments(current_line, text_size_result, max_width); } else { next_line(current_line); place_line_segments(current_line, text_size_result); } } else { place_line_segments(current_line, text_size_result); } } }
// Scan newline characters. // // TODO: Scan for newline sequences ('\r\n'). std::size_t newline(Lexer& t) { if (t.head != t.tail and is_newline(*t.head)) return 1; else return 0; }
static int config_parse(struct darray *sections, const char *file, bool always_open) { char *file_data; struct lexer lex; struct base_token token; struct strref section_name; FILE *f; f = os_fopen(file, "rb"); if (always_open && !f) f = os_fopen(file, "w+"); if (!f) return CONFIG_FILENOTFOUND; os_fread_utf8(f, &file_data); fclose(f); if (!file_data) return CONFIG_SUCCESS; lexer_init(&lex); lexer_start_move(&lex, file_data); base_token_clear(&token); while (lexer_getbasetoken(&lex, &token, PARSE_WHITESPACE)) { struct config_section *section; while (token.type == BASETOKEN_WHITESPACE) { if (!lexer_getbasetoken(&lex, &token, PARSE_WHITESPACE)) goto complete; } if (*token.text.array != '[') { while (!is_newline(*token.text.array)) { if (!lexer_getbasetoken(&lex, &token, PARSE_WHITESPACE)) goto complete; } continue; } strref_clear(§ion_name); config_parse_string(&lex, §ion_name, ']'); if (!section_name.len) break; section = darray_push_back_new(sizeof(struct config_section), sections); section->name = bstrdup_n(section_name.array, section_name.len); config_parse_section(section, &lex); } complete: lexer_free(&lex); return CONFIG_SUCCESS; }
pointer parse_expr(parser* parse) { eat_whitespace(parse); pointer ret_car; pointer ret_cdr; switch(*parse->curr) { case '(': parse->curr++; ret_car = parse_expr(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); case '"': ret_car = parse_string(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); case '\'': parse->curr++; ret_car = parse_quote(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); case ')': parse->curr++; return NIL; case '+': case '-': case 'b': ret_car = parse_number_or_symbol(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); case '.': return parse_number_or_pair(parse); case '\\': parse->curr++; ret_car = create_char(*(parse->curr++)); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); case ';': while(!is_newline(*parse->curr) && *parse->curr != '\0') parse->curr++; return parse_expr(parse); case 0: return NIL; default: if(is_number_char(*parse->curr)) { ret_car = parse_number(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); } else if(is_symbol_char(*parse->curr)) { ret_car = parse_symbol(parse); ret_cdr = parse_expr(parse); return create_pair(ret_car, ret_cdr); } else return parser_error(parse, "Unexpected char in expression."); } parse->curr++; }
void prewind(parser *p, char c) { sseek(p->f, -1, SEEK_CUR); if (is_newline(c)) { p->current_location.line--; } else { p->current_location.column--; } }
void consume_comment(TokenizeContext& context) { int lookahead = 0; while (context.withinRange(lookahead) && !is_newline(context.next(lookahead))) lookahead++; context.consume(tok_Comment, lookahead); }
static void content(void) { char current; /* * We should get one character at a time. */ assert(strlen(yytext) == 1); current = yytext[0]; if (current == EOF) return; if (is_newline(current)) { newline(); return; } buffer_push_char(current_buf, current); /* * Forcing newline changes 'text' so lets do it after we've pushed * it to the buffer. */ if (wrap_long_lines && need_wrap(current_buf)) { struct buffer tmp; buffer_init(&tmp, buffer_size(current_buf)); /* * Find last character that was not whitespace */ for (;;) { int c; if (buffer_size(current_buf) == 0) break; c = buffer_pop_char(current_buf); if (is_whitespace(c)) { /* * Do not push whitespace because it would appear * after the newline. */ break; } /* * Characters are put in tmp buffer in reverse order. */ buffer_push_char(&tmp, c); } force_newline_for_wrap(current_buf); /* * Restore non-wrapped text into buffer. */ while (buffer_size(&tmp) > 0) { buffer_push_char(current_buf, buffer_pop_char(&tmp)); } buffer_release(&tmp); } }
static LTSV_Record *parse_record(const char **string) { LTSV_Record *output_record = ltsv_record_init(); const char *label = NULL, *value; if (!output_record) { fprintf(stderr, "couldn't allocate LTSV_Record!\n"); return NULL; } while (**string != '\0') { label = parse_string(string, &label_char_test_func); if (!label) { fprintf(stderr, "could not parse label!\n"); goto bail; } if (strlen(label) == 0) { if (is_newline(**string)) { free((void*)label); return output_record; } fprintf(stderr, "invalid character for label: %c\n", **string); goto bail; } if (**string != ':') { fprintf(stderr, "':' not found: '%c' found instead!\n", **string); goto bail; } skip_char(string); value = parse_string(string, &field_char_test_func); if (ltsv_record_get_value(output_record, label)) { fprintf(stderr, "dupliate entry for record: %s\n", label); goto bail; } ltsv_record_add_entry(output_record, label, value); if (**string == '\t') { skip_char(string); } free((void*)label); } return output_record; bail: if (label) { free((void*)label); } ltsv_record_free(output_record); return NULL; }
char pgetc(parser *p) { char c = sgetc(p->f); if (is_newline(c)) { p->current_location.column = 1; p->current_location.line++; } else { p->current_location.column++; } return c; }
/* Force newline after tag. Use lexer to get current character. */ static void force_newline_after_tag(struct buffer * buffer) { int current = input(); do_unput(current); if (!is_newline(current)) { do_newline(buffer, "\n"); eat_whitespace(); } }
// Consume horizontal and vertical whitespace. // // TODO: Manage the input location while lexing. void Lexer::space() { while (true) { char c = peek(); if (is_space(c)) get(); else if (is_newline(c)) get(); else break; } }
UniChar Reader::next() { mCurrentIndex++; mOffset.column++; UniChar current = this->current(); if(is_newline(current)) { mOffset.line++; mOffset.column = 0; } return current; }
static void internal_putchar(int b, int c) { serial_lock(); /* Add to buffer */ int index = output_buffers_used[b]; uint8_t *buffer = output_buffers[b]; buffer[index] = (uint8_t)c; output_buffers_used[b]++; if (index + 1 == GUEST_OUTPUT_BUFFER_SIZE || (index >= 1 && is_newline(buffer + index - 1)) || last_out == b) { flush_buffer(b); } has_data = 1; serial_unlock(); }
void add_newline(t_token *cur_tok, t_list **l, char *input, \ int *index) { char cur; cur = input[*index]; if (is_newline(cur)) { if (cur_tok->token) append_and_reset(l, cur_tok); discard_whitespace(input, index); list_append(*l, init_token(NEWLINE, "\n")); } }
static void parse_buffer( vdb_info_bam_hdr * bam_hdr, char * buffer, size_t len ) { char * line; size_t idx, line_len, state = 0; for ( idx = 0; idx < len; ++idx ) { switch( state ) { case 0 : if ( is_newline( buffer[ idx ] ) ) /* init */ state = 2; else { line = &( buffer[ idx ] ); line_len = 1; state = 1; } break; case 1 : if ( is_newline( buffer[ idx ] ) ) /* content */ { inspect_line( bam_hdr, line, line_len ); state = 2; } else line_len++; break; case 2 : if ( !is_newline( buffer[ idx ] ) ) /* newline */ { line = &( buffer[ idx ] ); line_len = 1; state = 1; } break; } } }
char ArffScanner::next() { if(eof()) { return (char)-1; } if(is_newline(m_char)) { ++m_line; m_col = 0; } ++m_col; m_prev_char = m_char; if(fread(&m_char, sizeof(m_char), 1, m_fp) != sizeof(m_char)) { m_char = (char)-1; // you would have reached end-of-file? } return m_char; }
/* Force newline for wrapping line. Use lexer to get current character and do not eat whitespace from next line. */ static void force_newline_for_wrap(struct buffer * buffer) { int current = input(); /* * Flush all pending stuff before doing the newline. */ if (!using_primary_buf()) { merge_buffers(); } do_newline(current_buf, "\n"); if (!is_newline(current)) do_unput(current); }
void Lexer::comment() { get(); while (true) { char c = peek(); if (!c || is_newline(c)) break; get(); } // TODO: Do something interesting with comments // instead of just discarding them. build_.clear(); }
int readLine(FILE* file, char** outStr) { char buf[BUFSIZ]; int i = 0; buf[i] = fgetc(file); if (is_eof(buf[i])) { return -1; } while (!is_eof(buf[i]) && !is_newline(buf[i])) { i++; buf[i] = fgetc(file); } buf[i] = '\0'; COPY_STR((*outStr), buf); return i; }
/* Force newline before tag. Use buffer for getting current character. */ static void force_newline_before_tag(struct buffer * buffer) { int current; if (buffer_size(buffer) == 0) { /* * We just did a newline, no need to force it. */ return; } current = buffer_pop_char(buffer); buffer_push_char(buffer, current); if (!is_newline(current)) { do_newline(buffer, "\n"); eat_whitespace(); } }
static bool cf_lexer_process_comment(struct cf_lexer *lex, struct cf_token *out_token) { const char *offset; if (!cf_is_comment(out_token->unmerged_str.array)) return false; offset = lex->base_lexer.offset; cf_pass_any_splices(&offset); strcpy(lex->write_offset++, " "); out_token->str.len = 1; if (*offset == '/') { while (*++offset && !is_newline(*offset)) cf_pass_any_splices(&offset); } else if (*offset == '*') { bool was_star = false; lex->unexpected_eof = true; while (*++offset) { cf_pass_any_splices(&offset); if (was_star && *offset == '/') { offset++; lex->unexpected_eof = false; break; } else { was_star = (*offset == '*'); } } } out_token->unmerged_str.len += (size_t)(offset - out_token->unmerged_str.array); out_token->type = CFTOKEN_SPACETAB; lex->base_lexer.offset = offset; return true; }
static void config_parse_section(struct config_section *section, struct lexer *lex) { struct base_token token; while (lexer_getbasetoken(lex, &token, false)) { struct strref name, value; while (token.type == BASETOKEN_WHITESPACE) { if (!lexer_getbasetoken(lex, &token, false)) return; } if (token.type == BASETOKEN_OTHER) { if (*token.text.array == '#') { do { if (!lexer_getbasetoken(lex, &token, false)) return; } while (!is_newline(*token.text.array)); continue; } else if (*token.text.array == '[') { lex->offset--; return; } } strref_copy(&name, &token.text); if (!config_parse_string(lex, &name, '=')) continue; strref_clear(&value); config_parse_string(lex, &value, 0); config_add_item(§ion->items, &name, &value); } }
static inline enum cf_token_type cf_get_token_type(const struct cf_token *token, const struct base_token *start_token) { switch (start_token->type) { case BASETOKEN_ALPHA: return CFTOKEN_NAME; case BASETOKEN_DIGIT: return CFTOKEN_NUM; case BASETOKEN_WHITESPACE: if (is_newline(*token->str.array)) return CFTOKEN_NEWLINE; else return CFTOKEN_SPACETAB; case BASETOKEN_NONE: case BASETOKEN_OTHER: break; } return CFTOKEN_OTHER; }