R_API ut64 r_num_calc(RNum *num, const char *str, const char **err) { RNumCalcValue n; RNumCalc *nc, nc_local; if (!str || !*str) { return 0LL; } if (num) { nc = &num->nc; num->dbz = 0; } else { nc = &nc_local; } /* init */ nc->curr_tok = RNCPRINT; nc->number_value.d = 0.0; nc->number_value.n = 0LL; nc->errors = 0; nc->oc = 0; nc->calc_err = NULL; nc->calc_i = 0; nc->calc_len = 0; nc->calc_buf = NULL; load_token (num, nc, str); get_token (num, nc); n = expr (num, nc, 0); if (err) { *err = nc->calc_err; } if (num) { num->fvalue = n.d; } return n.n; }
R_API ut64 r_num_calc (RNum *num, const char *str, const char **err) { RNumCalcValue n; RNumCalc *nc, nc_local; if (!str || !*str) return 0LL; if (num) { nc = &num->nc; num->dbz = 0; } else { nc = &nc_local; } /* init */ nc->curr_tok = RNCPRINT; nc->number_value.d = 0.0; nc->number_value.n = 0LL; nc->errors = 0; nc->oc = 0; nc->calc_err = NULL; nc->calc_i = 0; nc->calc_len = 0; nc->calc_buf = NULL; load_token (num, nc, str); get_token (num, nc); n = expr (num, nc, 0); if (err) *err = nc->calc_err; //if (nc->curr_tok == RNCEND) return 0LL; // XXX: Error //if (nc->curr_tok == RNCPRINT) //return 0LL; // XXX: the f**k // n = expr (num, nc, 0); if (n.d != ((double)(ut64)n.d)) { if (num) num->fvalue = n.d; } else if (num) num->fvalue = (double)n.n; return n.n; }
R_API ut64 r_num_calc (RNum *num, const char *str, const char **err) { NumValue n; if (!*str) return 0LL; calc_num = num; load_token (str); get_token (); n = expr (0); if (err) *err = calc_err; //if (curr_tok == END) return 0LL; // XXX: Error //if (curr_tok == PRINT) //return 0LL; // XXX: the f**k // n = expr (0); if (n.d != ((double)(ut64)n.d)) { if (num) num->fvalue = n.d; } else if (num) num->fvalue = (double)n.n; return n.n; }
void urdutag_file(const char *input_filename, const char *output_filename, const char *lexicon_filename) { FILE *source; FILE *dest; entry *lexicon; token *word; /* open the source file and check for (then discard) directionality character */ if (!(source = fopen(input_filename, "rb"))) { puts("Error opening original file!"); fcloseall(); return; } if (!( ucheckdir(source) )) { fputs("Specified source file not recognised as Unicode!", stderr); fcloseall(); return; } /* open file to write, insert directionality character */ if( !(dest = fopen(output_filename, "wb")) ) { puts("Error opening processed file!"); fcloseall(); return; } if ( fputuc( RIGHTWAY , dest) == UERR ) { puts("Error writing to processed file!"); fcloseall(); return; } if (! ( lexicon = load_lexicon(lexicon_filename) ) ) return; while (1) { /* read a line */ if ( ! (word = load_token(source)) ) break; /* urdutag that token IF it has no tags already */ if (word->tag[0][0] == 0x0000) urdutag(lexicon, word); /* if split-signal tag returned , perform the special actions */ if ( word->tag[0][0] == 0x0053 && word->tag[0][1] == 0x0050 ) word = do_the_splits(word, lexicon, dest); /* write the line to file */ if (write_token(word, dest)) break; free(word); } /* check "word". If it is still alloc'd, then the loop broke prematurely */ /* and "word" will need freeing. */ if (word) free(word); /* free the lexicon */ free_lexicon(lexicon); /* close read and write files */ if (fclose(source) < 0) { puts("Error closing original file!"); fcloseall(); return; } if (fclose(dest) < 0) { puts("Error closing processed file!"); fcloseall(); return; } }
index_t get_token(FILE *fin, TDynamic_buffer *buffer, TDynamic_structure_buffer *struct_buffer) { args_assert(buffer != NULL && struct_buffer != NULL, INTERNAL_ERROR); debug_print("%s\n", "GET_TOKEN"); char hx[3] = {0, 0, '\0'}; char x; int c; int previous = 0; int state = START; bool read = true; bool signed_scientific = false; bool wrong_identifier = false; bool is_number_after_sign = false; bool is_number_after_dot = false; bool is_end = true; index_t index = 0; TToken *token; new_item(struct_buffer, index, token); while (1) { if (read) { c = fgetc(fin); if (feof(fin) && is_end) { if (state == BLOCK_COMMENT) { token->token_index = save_token(buffer); token->token_type = ERRORT; } else { token->token_index = save_token(buffer); token->token_type = EOFT; } return index; } } else { c = previous; read = true; } switch (state) { case START: if ((c > 64 && c < 91) || (c > 96 && c < 123) || c == '_') // A-Z || a-z || _ { state = IDENTIFIER; catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); } else if (c > 47 && c < 58) // 0-9 { state = L_INT; catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); } else { switch (c) { case '+': token->token_index = save_token(buffer); token->token_type = O_PLUS; return index; break; case '-': token->token_index = save_token(buffer); token->token_type = O_MINUS; return index; break; case '/': state = COMMENT; is_end = false; break; case '*': token->token_index = save_token(buffer); token->token_type = O_MUL; return index; break; case ';': token->token_index = save_token(buffer); token->token_type = SEMICOLON; return index; break; case ',': token->token_index = save_token(buffer); token->token_type = COMMA; return index; break; case '(': token->token_index = save_token(buffer); token->token_type = OPENING_BRACKET; return index; break; case ')': token->token_index = save_token(buffer); token->token_type = CLOSING_BRACKET; return index; break; case '{': token->token_index = save_token(buffer); token->token_type = OPENING_CURLY_BRACKET; return index; break; case '}': token->token_index = save_token(buffer); token->token_type = CLOSING_CURLY_BRACKET; return index; break; case '=': state = O_EQUALS; is_end = false; break; case '<': state = O_L; is_end = false; break; case '>': state = O_G; is_end = false; break; case '!': state = UO_EXCLAMATION; is_end = false; break; case '"': state = L_STRING; is_end = false; break; default: if (!isspace(c)) { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); token->token_index = save_token(buffer); token->token_type = ERRORT; return index; } break; } // switch(c) } // else break; case COMMENT: debug_print("%s","COMMENT COMMENT COMMENT COMMENT COMMENT\n"); if (c == '/') // // { state = LINE_COMMENT; is_end = false; } else if (c == '*') // /* { state = BLOCK_COMMENT; is_end = false; } else // / { ungetc(c, fin); state = START; token->token_index = save_token(buffer); token->token_type = O_DIV; return index; } break; case LINE_COMMENT: is_end = true; if (c == '\n') { state = START; } break; case BLOCK_COMMENT: is_end = true; if (c == '*') { state = BLOCK_COMMENT_END; } break; case BLOCK_COMMENT_END: if (c == '/') { state = START; is_end = true; } else state = BLOCK_COMMENT; break; case O_EQUALS: if (c == '=') // == { token->token_index = save_token(buffer); token->token_type = O_EQUALS; state = START; return index; break; } else // = { ungetc(c, fin); state = START; token->token_index = save_token(buffer); token->token_type = O_ASSIGN; return index; break; } case O_G: if (c == '=') // >= { token->token_index = save_token(buffer); token->token_type = O_GE; state = START; return index; break; } else if (c == '>') // >> { token->token_index = save_token(buffer); token->token_type = O_RIGHT_ARROW; state = START; return index; break; } else // > { ungetc(c, fin); token->token_index = save_token(buffer); token->token_type = O_G; state = START; return index; break; } case O_L: if (c == '=') // <= { token->token_index = save_token(buffer); token->token_type = O_LE; state = START; return index; break; } else if (c == '<') // << { token->token_index = save_token(buffer); token->token_type = O_LEFT_ARROW; state = START; return index; break; } else // < { ungetc(c, fin); token->token_index = save_token(buffer); token->token_type = O_L; state = START; return index; break; } case UO_EXCLAMATION: if (c == '=') // != { token->token_index = save_token(buffer); token->token_type = O_NE; state = START; return index; break; } else // ! je zatial errort { ungetc(c, fin); token->token_index = save_token(buffer); token->token_type = ERRORT; // token->token_type = UO_EXCLAMATION; state = START; return index; break; } case IDENTIFIER: if ((c > 64 && c < 91) || (c > 96 && c < 123) || (c > 47 && c < 58) || c == '_') { // (A-Z, a-z, 0-9, _) catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); is_end = false; } else if (SEPARATOR) { token->token_index = save_token(buffer); if (!wrong_identifier) token->token_type = reservedWord(load_token(buffer, token->token_index)); else token->token_type = ERRORT; wrong_identifier = false; is_number_after_dot = false; is_number_after_sign = false; ungetc(c,fin); state = START; return index; } else { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); wrong_identifier = true; is_end = false; } break; case L_INT: if (c > 47 && c < 58) // 0-9 { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); is_end = false; } else if (c == 46) // c == "." { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = L_DOUBLE; is_end = false; } else if (SEPARATOR) { token->token_index = save_token(buffer); token->token_type = L_INT; ungetc(c,fin); state = START; return index; } else if (c == 'e' || c == 'E') { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = SCIENTIFIC; is_end = false; } else { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); wrong_identifier = true; state = IDENTIFIER; is_end = false; } break; case L_DOUBLE: if (c > 47 && c < 58) // 0-9 { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); is_number_after_dot = true; is_end = false; } else if ((SEPARATOR) && is_number_after_dot) { token->token_type = L_DOUBLE; token->token_index = save_token(buffer); ungetc(c,fin); state = START; return index; } else if ((SEPARATOR) && !is_number_after_dot) { token->token_type = ERRORT; token->token_index = save_token(buffer); ungetc(c,fin); state = START; return index; } else if ((c == 'e' || c == 'E') && is_number_after_dot) { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = SCIENTIFIC; is_end = false; } else { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); wrong_identifier = true; state = IDENTIFIER; is_end = false; } break; case SCIENTIFIC: if ((c == '+' || c == '-') && !signed_scientific && !is_number_after_sign) { signed_scientific = true; catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = SCIENTIFIC; is_end = false; } else if (c > 47 && c < 58) { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); is_number_after_sign = true; is_end = false; } else if ((SEPARATOR) && is_number_after_sign) { token->token_type = L_DOUBLE; token->token_index = save_token(buffer); ungetc(c,fin); state = START; return index; } else { // catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); wrong_identifier = true; state = IDENTIFIER; is_end = false; ungetc(c, fin); } break; case L_STRING: if (c == '"') { token->token_type = L_STRING; token->token_index = save_token(buffer); state = START; return index; } else if (c == '\\') { // catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = L_STRING_BACKSLASH; is_end = false; } else { catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); is_end = false; } break; case L_STRING_BACKSLASH: switch(c) { case 'n': catch_token_internal_error(add_char(buffer, '\n'), INTERNAL_ERROR, token, index); state = L_STRING; is_end = false; break; case 't': catch_token_internal_error(add_char(buffer, '\t'), INTERNAL_ERROR, token, index); state = L_STRING; is_end = false; break; case '"': catch_token_internal_error(add_char(buffer, '"'), INTERNAL_ERROR, token, index); state = L_STRING; is_end = false; break; case '\\': catch_token_internal_error(add_char(buffer, '\\'), INTERNAL_ERROR, token, index); state = L_STRING; is_end = false; break; case 'x': hx[0] = fgetc(fin); hx[1] = fgetc(fin); x = (char)(int)strtol(hx, NULL, 16); if (strcmp(&x, "")) { catch_token_internal_error(add_char(buffer, x), INTERNAL_ERROR, token, index); state = L_STRING; break; } default: token->token_type = ERRORT; token->token_index = save_token(buffer); ungetc(c,fin); state = START; return index; } // catch_token_internal_error(add_char(buffer, c), INTERNAL_ERROR, token, index); state = L_STRING; break; } // switch(state) } // while(1) return 0; }
int short_reduction(Resources *res, TStack *stack) { args_assert(res != NULL && stack != NULL, INTERNAL_ERROR); TToken *token_to_reduce; int err; debug_print("%s\n", "ONE TOKEN REDUCED"); catch_internal_error( dereference_structure(&res->struct_buff, stack->top, (void **)&token_to_reduce), INTERNAL_ERROR, "Failed to dereference structure buffer." ); int type = token_to_reduce->token_type; if (type == IDENTIFIER) { debug_print("%s\n", "TOKEN ID CHECKING TYPE"); err = is_var_declared(res, token_to_reduce->token_index); catch_internal_error(err, INTERNAL_ERROR, "Failed to check if var was declared"); catch_undefined_error(err, SEMANTIC_ERROR, "Failed to check if var was declared", 1); type = get_var_type(res, token_to_reduce->token_index); catch_internal_error(type, INTERNAL_ERROR, "Failed to get variable type"); catch_undefined_error(type, SEMANTIC_ERROR, "Failed to get variable type", 1); index_t offset; catch_undefined_error(load_var_index(res, token_to_reduce->token_index, &offset), SEMANTIC_ERROR, "Failed to get var offset", 1 ); if (type == L_INT) catch_internal_error(new_instruction_mem_mem(&res->instruction_buffer, 0lu, offset, 0lu, PUSH_INT_MEM), INTERNAL_ERROR, "Failed to generate new instruction"); else if (type == L_DOUBLE) catch_internal_error(new_instruction_mem_mem(&res->instruction_buffer, 0lu, offset, 0lu, PUSH_DBL_MEM), INTERNAL_ERROR, "Failed to generate new instruction"); else catch_internal_error(new_instruction_mem_mem(&res->instruction_buffer, 0lu, offset, 0lu, PUSH_INDEX_MEM), INTERNAL_ERROR, "Failed to generate new instruction"); } else { // LITERAL if (type == L_INT) { int value; if ((value = to_int(load_token(&res->string_buff, token_to_reduce->token_index))) < 0) return LEXICAL_ERROR; catch_internal_error(new_instruction_int_int(&res->instruction_buffer, 0lu, value, 0, PUSH_INT_CONST), INTERNAL_ERROR, "Failed to generate new "); } else if (type == L_DOUBLE) { double value; if ((value = to_double(load_token(&res->string_buff, token_to_reduce->token_index))) < 0.0) return LEXICAL_ERROR; catch_internal_error(new_instruction_dbl_dbl(&res->instruction_buffer, 0lu, value, 0.0, PUSH_DBL_CONST), INTERNAL_ERROR, "Failed to generate new instruction"); } else if (type == L_STRING) { catch_internal_error(new_instruction_mem_mem(&res->instruction_buffer, 0lu, token_to_reduce->token_index, 0lu, PUSH_INDEX_CONST), INTERNAL_ERROR, "Failed to generate new instruction"); } } debug_print("%s: %d\n", "ORIGINAL_TYPE:", type); index_t top_index = stack->top; pop(&res->struct_buff, stack); pop(&res->struct_buff, stack); // Remove SHIFT from stack push(&res->struct_buff, stack, top_index); return overwrite_top(&res->struct_buff, stack, RVALUE, type); }