static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { bool speed_p; wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode)); if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = immed_wide_int_const (mask, mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); return (rtx_cost (and_test, IF_THEN_ELSE, 0, speed_p) <= rtx_cost (shift_test, IF_THEN_ELSE, 0, speed_p)); }
rtx rtx_vector_builder::apply_step (rtx base, unsigned int factor, const wide_int &step) const { scalar_int_mode int_mode = as_a <scalar_int_mode> (GET_MODE_INNER (m_mode)); return immed_wide_int_const (wi::add (rtx_mode_t (base, int_mode), factor * step), int_mode); }
static void emit_case_bit_tests (gswitch *swtch, tree index_expr, tree minval, tree range, tree maxval) { struct case_bit_test test[MAX_CASE_BIT_TESTS]; unsigned int i, j, k; unsigned int count; basic_block switch_bb = gimple_bb (swtch); basic_block default_bb, new_default_bb, new_bb; edge default_edge; bool update_dom = dom_info_available_p (CDI_DOMINATORS); vec<basic_block> bbs_to_fix_dom = vNULL; tree index_type = TREE_TYPE (index_expr); tree unsigned_index_type = unsigned_type_for (index_type); unsigned int branch_num = gimple_switch_num_labels (swtch); gimple_stmt_iterator gsi; gassign *shift_stmt; tree idx, tmp, csui; tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1); tree word_mode_zero = fold_convert (word_type_node, integer_zero_node); tree word_mode_one = fold_convert (word_type_node, integer_one_node); int prec = TYPE_PRECISION (word_type_node); wide_int wone = wi::one (prec); memset (&test, 0, sizeof (test)); /* Get the edge for the default case. */ tmp = gimple_switch_default_label (swtch); default_bb = label_to_block (CASE_LABEL (tmp)); default_edge = find_edge (switch_bb, default_bb); /* Go through all case labels, and collect the case labels, profile counts, and other information we need to build the branch tests. */ count = 0; for (i = 1; i < branch_num; i++) { unsigned int lo, hi; tree cs = gimple_switch_label (swtch, i); tree label = CASE_LABEL (cs); edge e = find_edge (switch_bb, label_to_block (label)); for (k = 0; k < count; k++) if (e == test[k].target_edge) break; if (k == count) { gcc_checking_assert (count < MAX_CASE_BIT_TESTS); test[k].mask = wi::zero (prec); test[k].target_edge = e; test[k].label = label; test[k].bits = 1; count++; } else test[k].bits++; lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_LOW (cs), minval)); if (CASE_HIGH (cs) == NULL_TREE) hi = lo; else hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_HIGH (cs), minval)); for (j = lo; j <= hi; j++) test[k].mask |= wi::lshift (wone, j); } qsort (test, count, sizeof (*test), case_bit_test_cmp); /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of the minval subtractions, but it might make the mask constants more expensive. So, compare the costs. */ if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0) { int cost_diff; HOST_WIDE_INT m = tree_to_uhwi (minval); rtx reg = gen_raw_REG (word_mode, 10000); bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch)); cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg, GEN_INT (-m)), speed_p); for (i = 0; i < count; i++) { rtx r = immed_wide_int_const (test[i].mask, word_mode); cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r), word_mode, speed_p); r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode); cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r), word_mode, speed_p); } if (cost_diff > 0) { for (i = 0; i < count; i++) test[i].mask = wi::lshift (test[i].mask, m); minval = build_zero_cst (TREE_TYPE (minval)); range = maxval; } } /* We generate two jumps to the default case label. Split the default edge, so that we don't have to do any PHI node updating. */ new_default_bb = split_edge (default_edge); if (update_dom) { bbs_to_fix_dom.create (10); bbs_to_fix_dom.quick_push (switch_bb); bbs_to_fix_dom.quick_push (default_bb); bbs_to_fix_dom.quick_push (new_default_bb); } /* Now build the test-and-branch code. */ gsi = gsi_last_bb (switch_bb); /* idx = (unsigned)x - minval. */ idx = fold_convert (unsigned_index_type, index_expr); idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx, fold_convert (unsigned_index_type, minval)); idx = force_gimple_operand_gsi (&gsi, idx, /*simple=*/true, NULL_TREE, /*before=*/true, GSI_SAME_STMT); /* if (idx > range) goto default */ range = force_gimple_operand_gsi (&gsi, fold_convert (unsigned_index_type, range), /*simple=*/true, NULL_TREE, /*before=*/true, GSI_SAME_STMT); tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range); new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom); if (update_dom) bbs_to_fix_dom.quick_push (new_bb); gcc_assert (gimple_bb (swtch) == new_bb); gsi = gsi_last_bb (new_bb); /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors of NEW_BB, are still immediately dominated by SWITCH_BB. Make it so. */ if (update_dom) { vec<basic_block> dom_bbs; basic_block dom_son; dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb); FOR_EACH_VEC_ELT (dom_bbs, i, dom_son) { edge e = find_edge (new_bb, dom_son); if (e && single_pred_p (e->dest)) continue; set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb); bbs_to_fix_dom.safe_push (dom_son); } dom_bbs.release (); }
rtx addr_for_mem_ref (struct mem_address *addr, addr_space_t as, bool really_expand) { machine_mode address_mode = targetm.addr_space.address_mode (as); machine_mode pointer_mode = targetm.addr_space.pointer_mode (as); rtx address, sym, bse, idx, st, off; struct mem_addr_template *templ; if (addr->step && !integer_onep (addr->step)) st = immed_wide_int_const (addr->step, pointer_mode); else st = NULL_RTX; if (addr->offset && !integer_zerop (addr->offset)) { offset_int dc = offset_int::from (addr->offset, SIGNED); off = immed_wide_int_const (dc, pointer_mode); } else off = NULL_RTX; if (!really_expand) { unsigned int templ_index = TEMPL_IDX (as, addr->symbol, addr->base, addr->index, st, off); if (templ_index >= vec_safe_length (mem_addr_template_list)) vec_safe_grow_cleared (mem_addr_template_list, templ_index + 1); /* Reuse the templates for addresses, so that we do not waste memory. */ templ = &(*mem_addr_template_list)[templ_index]; if (!templ->ref) { sym = (addr->symbol ? gen_rtx_SYMBOL_REF (pointer_mode, ggc_strdup ("test_symbol")) : NULL_RTX); bse = (addr->base ? gen_raw_REG (pointer_mode, LAST_VIRTUAL_REGISTER + 1) : NULL_RTX); idx = (addr->index ? gen_raw_REG (pointer_mode, LAST_VIRTUAL_REGISTER + 2) : NULL_RTX); gen_addr_rtx (pointer_mode, sym, bse, idx, st? const0_rtx : NULL_RTX, off? const0_rtx : NULL_RTX, &templ->ref, &templ->step_p, &templ->off_p); } if (st) *templ->step_p = st; if (off) *templ->off_p = off; return templ->ref; } /* Otherwise really expand the expressions. */ sym = (addr->symbol ? expand_expr (addr->symbol, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); bse = (addr->base ? expand_expr (addr->base, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); idx = (addr->index ? expand_expr (addr->index, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); gen_addr_rtx (pointer_mode, sym, bse, idx, st, off, &address, NULL, NULL); if (pointer_mode != address_mode) address = convert_memory_address (address_mode, address); return address; }