/** * creates a tarval from a condensed representation. */ static ir_tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) { ir_tarval *tv = get_mode_one(env->mode); ir_tarval *res = NULL; for (int i = 0; i < r; ++i) { int j = R[i]; if (j != 0) tv = tarval_shl_unsigned(tv, j); res = res ? tarval_add(res, tv) : tv; } return res; }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { /* Example: po2_alignment 4 (align to 16 bytes): * size = (size+15) & 0xfff...f8 */ ir_mode *mode = get_irn_mode(size); ir_graph *irg = get_irn_irg(block); ir_tarval *allone = get_mode_all_one(mode); ir_tarval *shr = tarval_shr_unsigned(allone, po2_stack_alignment); ir_tarval *mask = tarval_shl_unsigned(shr, po2_stack_alignment); ir_tarval *invmask = tarval_not(mask); ir_node *addv = new_r_Const(irg, invmask); ir_node *add = new_rd_Add(dbgi, block, size, addv); ir_node *maskc = new_r_Const(irg, mask); ir_node *and = new_rd_And(dbgi, block, add, maskc); return and; }
/** * Transforms a Sub to a Neg + Add, which subsequently allows swapping * of the inputs. The swapping is also (implicitly) done here. */ static void transform_sub_to_neg_add(ir_node *node, const arch_register_t *out_reg) { ir_node *block = get_nodes_block(node); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *in1 = get_irn_n(node, 0); ir_node *in2 = get_irn_n(node, 1); const arch_register_t *in2_reg = arch_get_irn_register(in2); const amd64_binop_addr_attr_t *attr = get_amd64_binop_addr_attr(node); ir_node *add; unsigned pos; if (is_amd64_subs(node)) { unsigned bits = x86_bytes_from_size(attr->base.base.size) * 8; ir_tarval *tv = get_mode_one(amd64_mode_xmm); tv = tarval_shl_unsigned(tv, bits - 1); ir_entity *sign_bit_const = create_float_const_entity(tv); amd64_binop_addr_attr_t xor_attr = { .base = { .base = { .op_mode = AMD64_OP_REG_ADDR, .size = X86_SIZE_64, }, }, }; init_lconst_addr(&xor_attr.base.addr, sign_bit_const); ir_node *xor_in[] = { in2 }; ir_node *const xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in), xor_in, amd64_xmm_reqs, &xor_attr); sched_add_before(node, xor); ir_node *const neg = be_new_Proj_reg(xor, pn_amd64_xorp_res, in2_reg); ir_node *in[] = { neg, in1 }; add = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, amd64_xmm_xmm_reqs, attr); pos = pn_amd64_adds_res; } else {