static ir_tarval *literal_to_tarval_(const literal_expression_t *literal, ir_mode *mode) { switch (literal->base.kind) { case EXPR_LITERAL_INTEGER: assert(literal->target_value != NULL); return literal->target_value; case EXPR_LITERAL_FLOATINGPOINT: return new_tarval_from_str(literal->value->begin, literal->value->size, mode); case EXPR_LITERAL_BOOLEAN: if (literal->value->begin[0] == 't') { return get_mode_one(mode); } else { assert(literal->value->begin[0] == 'f'); case EXPR_LITERAL_MS_NOOP: return get_mode_null(mode); } default: panic("invalid literal kind"); } }
/** * lower 64bit addition: an 32bit add for the lower parts, an add with * carry for the higher parts. If the carry's value is known, fold it * into the upper add. */ static void ia32_lower_add64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_add_carry(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, add_low, add_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { // We cannot assume that left_high and right_high form a normalized Add. ir_node *constant; ir_node *other; if (is_Const(left_high)) { constant = left_high; other = right_high; } else { constant = right_high; other = left_high; } ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); ir_node *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode); ir_node *add_high = new_rd_Add(dbg, block, other, const_plus_one, high_mode); ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, add_low, add_high); } else { /* l_res = a_l + b_l */ ir_node *add_low = new_bd_ia32_l_Add(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res); ir_node *flags = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags); /* h_res = a_h + b_h + carry */ ir_node *add_high = new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, add_high); } }
/** * creates a tarval from a condensed representation. */ static ir_tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { ir_tarval *tv = get_mode_one(env->mode); ir_tarval *res = NULL; for (int i = 0; i < r; ++i) { int j = R[i]; if (j != 0) tv = tarval_shl_unsigned(tv, j); res = res ? tarval_add(res, tv) : tv; } return res; }
/** * lower 64bit subtraction: a 32bit sub for the lower parts, a sub * with borrow for the higher parts. If the borrow's value is known, * fold it into the upper sub. */ static void ia32_lower_sub64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Sub_left(node); ir_node *right = get_Sub_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_sub_borrow(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { ir_node *sub_high; ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); if (is_Const(right_high)) { ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode); } else if (is_Const(left_high)) { ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode); } else { panic("logic error"); } ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else { /* l_res = a_l - b_l */ ir_node *sub_low = new_bd_ia32_l_Sub(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res); ir_node *flags = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags); /* h_res = a_h - b_h - carry */ ir_node *sub_high = new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, sub_high); } }
static void check_mode(ir_mode *mode) { ir_tarval *zero = get_mode_null(mode); ir_tarval *minus_zero = tarval_neg(zero); ir_tarval *min = get_mode_min(mode); ir_tarval *max = get_mode_max(mode); ir_tarval *inf = get_mode_infinite(mode); ir_tarval *minus_inf = tarval_neg(inf); ir_tarval *one = get_mode_one(mode); ir_tarval *minus_one = tarval_neg(one); /* some random arithmetics */ ir_tarval *int_zero = get_mode_null(mode_Is); ir_tarval *int_one = get_mode_one(mode_Is); ir_tarval *int_minus_one = get_mode_all_one(mode_Is); ir_tarval *int_min = get_mode_min(mode_Is); ir_tarval *int_max = get_mode_max(mode_Is); assert(tarval_convert_to(zero, mode_Is) == int_zero); assert(tarval_convert_to(minus_zero, mode_Is) == int_zero); assert(tarval_convert_to(one, mode_Is) == int_one); assert(tarval_convert_to(minus_one, mode_Is) == int_minus_one); assert(tarval_convert_to(min, mode_Is) == int_min); assert(tarval_convert_to(max, mode_Is) == int_max); assert(tarval_convert_to(inf, mode_Is) == int_max); assert(tarval_convert_to(minus_inf, mode_Is) == int_min); static const char *const ints[] = { "0", "1", "-1", "12345", "2", "4", "8", "16", "32", "64", "128", "256", "512", "1024", "2048", "127", "2047" }; for (unsigned i = 0; i < ARRAY_SIZE(ints); ++i) { const char *str = ints[i]; ir_tarval *flt = new_tarval_from_str(str, strlen(str), mode); ir_tarval *intt = new_tarval_from_str(str, strlen(str), mode_Is); assert(tarval_convert_to(flt, mode_Is) == intt); assert(tarval_convert_to(intt, mode) == flt); } }
/** * Transforms a Sub to a Neg + Add, which subsequently allows swapping * of the inputs. The swapping is also (implicitly) done here. */ static void transform_sub_to_neg_add(ir_node *node, const arch_register_t *out_reg) { ir_node *block = get_nodes_block(node); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *in1 = get_irn_n(node, 0); ir_node *in2 = get_irn_n(node, 1); const arch_register_t *in2_reg = arch_get_irn_register(in2); const amd64_binop_addr_attr_t *attr = get_amd64_binop_addr_attr(node); ir_node *add; unsigned pos; if (is_amd64_subs(node)) { unsigned bits = x86_bytes_from_size(attr->base.base.size) * 8; ir_tarval *tv = get_mode_one(amd64_mode_xmm); tv = tarval_shl_unsigned(tv, bits - 1); ir_entity *sign_bit_const = create_float_const_entity(tv); amd64_binop_addr_attr_t xor_attr = { .base = { .base = { .op_mode = AMD64_OP_REG_ADDR, .size = X86_SIZE_64, }, }, }; init_lconst_addr(&xor_attr.base.addr, sign_bit_const); ir_node *xor_in[] = { in2 }; ir_node *const xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in), xor_in, amd64_xmm_reqs, &xor_attr); sched_add_before(node, xor); ir_node *const neg = be_new_Proj_reg(xor, pn_amd64_xorp_res, in2_reg); ir_node *in[] = { neg, in1 }; add = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, amd64_xmm_xmm_reqs, attr); pos = pn_amd64_adds_res; } else {
void determine_enum_values(enum_t *const enume) { if (enume->error) return; ir_mode *const mode = atomic_modes[enume->akind]; ir_tarval *const one = get_mode_one(mode); ir_tarval * tv_next = get_mode_null(mode); for (entity_t *entry = enume->first_value; entry != NULL && entry->kind == ENTITY_ENUM_VALUE; entry = entry->base.next) { expression_t *const init = entry->enum_value.value; if (init != NULL) { type_t *const init_type = skip_typeref(init->base.type); if (!is_type_valid(init_type)) continue; tv_next = fold_expression(init); } assert(entry->enum_value.tv == NULL || entry->enum_value.tv == tv_next); entry->enum_value.tv = tv_next; tv_next = tarval_add(tv_next, one); } }
/* * Check, if the value of a node can be confirmed >= 0 or <= 0, * If the mode of the value did not honor signed zeros, else * check for >= 0 or < 0. */ ir_value_classify_sign classify_value_sign(ir_node *n) { ir_tarval *tv, *c; ir_mode *mode; ir_relation cmp, ncmp; int negate = 1; for (;;) { unsigned code = get_irn_opcode(n); switch (code) { case iro_Minus: negate *= -1; n = get_Minus_op(n); continue; case iro_Confirm: break; default: return value_classified_unknown; } break; } if (!is_Confirm(n)) return value_classified_unknown; tv = value_of(get_Confirm_bound(n)); if (tv == tarval_bad) return value_classified_unknown; mode = get_irn_mode(n); /* * We can handle only >=, >, <, <= cases. * We could handle == too, but this will be optimized into * a constant either. * * Note that for integer modes we have a slightly better * optimization possibilities, so we handle this * different. */ cmp = get_Confirm_relation(n); switch (cmp) { case ir_relation_less: /* * must be x < c <= 1 to be useful if integer mode and -0 = 0 * x < c <= 0 to be useful else */ case ir_relation_less_equal: /* * must be x <= c < 1 to be useful if integer mode and -0 = 0 * x <= c < 0 to be useful else */ c = mode_is_int(mode) && mode_honor_signed_zeros(mode) ? get_mode_one(mode) : get_mode_null(mode); ncmp = tarval_cmp(tv, c); if (ncmp == ir_relation_equal) ncmp = ir_relation_less_equal; if (cmp != (ncmp ^ ir_relation_equal)) return value_classified_unknown; /* yep, negative */ return value_classified_negative * negate; case ir_relation_greater_equal: /* * must be x >= c > -1 to be useful if integer mode * x >= c >= 0 to be useful else */ case ir_relation_greater: /* * must be x > c >= -1 to be useful if integer mode * x > c >= 0 to be useful else */ if (mode_is_int(mode)) { c = get_mode_minus_one(mode); ncmp = tarval_cmp(tv, c); if (ncmp == ir_relation_equal) ncmp = ir_relation_greater_equal; if (cmp != (ncmp ^ ir_relation_equal)) return value_classified_unknown; } else { c = get_mode_minus_one(mode); ncmp = tarval_cmp(tv, c); if (ncmp != ir_relation_equal && ncmp != ir_relation_greater) return value_classified_unknown; } /* yep, positive */ return value_classified_positive * negate; default: return value_classified_unknown; } }